Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2009 Oracle. All rights reserved.
4 : */
5 :
6 : #include <linux/sched.h>
7 : #include <linux/pagemap.h>
8 : #include <linux/writeback.h>
9 : #include <linux/blkdev.h>
10 : #include <linux/rbtree.h>
11 : #include <linux/slab.h>
12 : #include <linux/error-injection.h>
13 : #include "ctree.h"
14 : #include "disk-io.h"
15 : #include "transaction.h"
16 : #include "volumes.h"
17 : #include "locking.h"
18 : #include "btrfs_inode.h"
19 : #include "async-thread.h"
20 : #include "free-space-cache.h"
21 : #include "qgroup.h"
22 : #include "print-tree.h"
23 : #include "delalloc-space.h"
24 : #include "block-group.h"
25 : #include "backref.h"
26 : #include "misc.h"
27 : #include "subpage.h"
28 : #include "zoned.h"
29 : #include "inode-item.h"
30 : #include "space-info.h"
31 : #include "fs.h"
32 : #include "accessors.h"
33 : #include "extent-tree.h"
34 : #include "root-tree.h"
35 : #include "file-item.h"
36 : #include "relocation.h"
37 : #include "super.h"
38 : #include "tree-checker.h"
39 :
40 : /*
41 : * Relocation overview
42 : *
43 : * [What does relocation do]
44 : *
45 : * The objective of relocation is to relocate all extents of the target block
46 : * group to other block groups.
47 : * This is utilized by resize (shrink only), profile converting, compacting
48 : * space, or balance routine to spread chunks over devices.
49 : *
50 : * Before | After
51 : * ------------------------------------------------------------------
52 : * BG A: 10 data extents | BG A: deleted
53 : * BG B: 2 data extents | BG B: 10 data extents (2 old + 8 relocated)
54 : * BG C: 1 extents | BG C: 3 data extents (1 old + 2 relocated)
55 : *
56 : * [How does relocation work]
57 : *
58 : * 1. Mark the target block group read-only
59 : * New extents won't be allocated from the target block group.
60 : *
61 : * 2.1 Record each extent in the target block group
62 : * To build a proper map of extents to be relocated.
63 : *
64 : * 2.2 Build data reloc tree and reloc trees
65 : * Data reloc tree will contain an inode, recording all newly relocated
66 : * data extents.
67 : * There will be only one data reloc tree for one data block group.
68 : *
69 : * Reloc tree will be a special snapshot of its source tree, containing
70 : * relocated tree blocks.
71 : * Each tree referring to a tree block in target block group will get its
72 : * reloc tree built.
73 : *
74 : * 2.3 Swap source tree with its corresponding reloc tree
75 : * Each involved tree only refers to new extents after swap.
76 : *
77 : * 3. Cleanup reloc trees and data reloc tree.
78 : * As old extents in the target block group are still referenced by reloc
79 : * trees, we need to clean them up before really freeing the target block
80 : * group.
81 : *
82 : * The main complexity is in steps 2.2 and 2.3.
83 : *
84 : * The entry point of relocation is relocate_block_group() function.
85 : */
86 :
87 : #define RELOCATION_RESERVED_NODES 256
88 : /*
89 : * map address of tree root to tree
90 : */
91 : struct mapping_node {
92 : struct {
93 : struct rb_node rb_node;
94 : u64 bytenr;
95 : }; /* Use rb_simle_node for search/insert */
96 : void *data;
97 : };
98 :
99 : struct mapping_tree {
100 : struct rb_root rb_root;
101 : spinlock_t lock;
102 : };
103 :
104 : /*
105 : * present a tree block to process
106 : */
107 : struct tree_block {
108 : struct {
109 : struct rb_node rb_node;
110 : u64 bytenr;
111 : }; /* Use rb_simple_node for search/insert */
112 : u64 owner;
113 : struct btrfs_key key;
114 : unsigned int level:8;
115 : unsigned int key_ready:1;
116 : };
117 :
118 : #define MAX_EXTENTS 128
119 :
120 : struct file_extent_cluster {
121 : u64 start;
122 : u64 end;
123 : u64 boundary[MAX_EXTENTS];
124 : unsigned int nr;
125 : };
126 :
127 : struct reloc_control {
128 : /* block group to relocate */
129 : struct btrfs_block_group *block_group;
130 : /* extent tree */
131 : struct btrfs_root *extent_root;
132 : /* inode for moving data */
133 : struct inode *data_inode;
134 :
135 : struct btrfs_block_rsv *block_rsv;
136 :
137 : struct btrfs_backref_cache backref_cache;
138 :
139 : struct file_extent_cluster cluster;
140 : /* tree blocks have been processed */
141 : struct extent_io_tree processed_blocks;
142 : /* map start of tree root to corresponding reloc tree */
143 : struct mapping_tree reloc_root_tree;
144 : /* list of reloc trees */
145 : struct list_head reloc_roots;
146 : /* list of subvolume trees that get relocated */
147 : struct list_head dirty_subvol_roots;
148 : /* size of metadata reservation for merging reloc trees */
149 : u64 merging_rsv_size;
150 : /* size of relocated tree nodes */
151 : u64 nodes_relocated;
152 : /* reserved size for block group relocation*/
153 : u64 reserved_bytes;
154 :
155 : u64 search_start;
156 : u64 extents_found;
157 :
158 : unsigned int stage:8;
159 : unsigned int create_reloc_tree:1;
160 : unsigned int merge_reloc_tree:1;
161 : unsigned int found_file_extent:1;
162 : };
163 :
164 : /* stages of data relocation */
165 : #define MOVE_DATA_EXTENTS 0
166 : #define UPDATE_DATA_PTRS 1
167 :
168 0 : static void mark_block_processed(struct reloc_control *rc,
169 : struct btrfs_backref_node *node)
170 : {
171 0 : u32 blocksize;
172 :
173 0 : if (node->level == 0 ||
174 0 : in_range(node->bytenr, rc->block_group->start,
175 : rc->block_group->length)) {
176 0 : blocksize = rc->extent_root->fs_info->nodesize;
177 0 : set_extent_bit(&rc->processed_blocks, node->bytenr,
178 0 : node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL);
179 : }
180 0 : node->processed = 1;
181 0 : }
182 :
183 :
184 0 : static void mapping_tree_init(struct mapping_tree *tree)
185 : {
186 0 : tree->rb_root = RB_ROOT;
187 0 : spin_lock_init(&tree->lock);
188 0 : }
189 :
190 : /*
191 : * walk up backref nodes until reach node presents tree root
192 : */
193 0 : static struct btrfs_backref_node *walk_up_backref(
194 : struct btrfs_backref_node *node,
195 : struct btrfs_backref_edge *edges[], int *index)
196 : {
197 0 : struct btrfs_backref_edge *edge;
198 0 : int idx = *index;
199 :
200 0 : while (!list_empty(&node->upper)) {
201 0 : edge = list_entry(node->upper.next,
202 : struct btrfs_backref_edge, list[LOWER]);
203 0 : edges[idx++] = edge;
204 0 : node = edge->node[UPPER];
205 : }
206 0 : BUG_ON(node->detached);
207 0 : *index = idx;
208 0 : return node;
209 : }
210 :
211 : /*
212 : * walk down backref nodes to find start of next reference path
213 : */
214 0 : static struct btrfs_backref_node *walk_down_backref(
215 : struct btrfs_backref_edge *edges[], int *index)
216 : {
217 0 : struct btrfs_backref_edge *edge;
218 0 : struct btrfs_backref_node *lower;
219 0 : int idx = *index;
220 :
221 0 : while (idx > 0) {
222 0 : edge = edges[idx - 1];
223 0 : lower = edge->node[LOWER];
224 0 : if (list_is_last(&edge->list[LOWER], &lower->upper)) {
225 0 : idx--;
226 0 : continue;
227 : }
228 0 : edge = list_entry(edge->list[LOWER].next,
229 : struct btrfs_backref_edge, list[LOWER]);
230 0 : edges[idx - 1] = edge;
231 0 : *index = idx;
232 0 : return edge->node[UPPER];
233 : }
234 0 : *index = 0;
235 0 : return NULL;
236 : }
237 :
238 0 : static void update_backref_node(struct btrfs_backref_cache *cache,
239 : struct btrfs_backref_node *node, u64 bytenr)
240 : {
241 0 : struct rb_node *rb_node;
242 0 : rb_erase(&node->rb_node, &cache->rb_root);
243 0 : node->bytenr = bytenr;
244 0 : rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
245 0 : if (rb_node)
246 0 : btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
247 0 : }
248 :
249 : /*
250 : * update backref cache after a transaction commit
251 : */
252 0 : static int update_backref_cache(struct btrfs_trans_handle *trans,
253 : struct btrfs_backref_cache *cache)
254 : {
255 0 : struct btrfs_backref_node *node;
256 0 : int level = 0;
257 :
258 0 : if (cache->last_trans == 0) {
259 0 : cache->last_trans = trans->transid;
260 0 : return 0;
261 : }
262 :
263 0 : if (cache->last_trans == trans->transid)
264 : return 0;
265 :
266 : /*
267 : * detached nodes are used to avoid unnecessary backref
268 : * lookup. transaction commit changes the extent tree.
269 : * so the detached nodes are no longer useful.
270 : */
271 0 : while (!list_empty(&cache->detached)) {
272 0 : node = list_entry(cache->detached.next,
273 : struct btrfs_backref_node, list);
274 0 : btrfs_backref_cleanup_node(cache, node);
275 : }
276 :
277 0 : while (!list_empty(&cache->changed)) {
278 0 : node = list_entry(cache->changed.next,
279 : struct btrfs_backref_node, list);
280 0 : list_del_init(&node->list);
281 0 : BUG_ON(node->pending);
282 0 : update_backref_node(cache, node, node->new_bytenr);
283 : }
284 :
285 : /*
286 : * some nodes can be left in the pending list if there were
287 : * errors during processing the pending nodes.
288 : */
289 0 : for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
290 0 : list_for_each_entry(node, &cache->pending[level], list) {
291 0 : BUG_ON(!node->pending);
292 0 : if (node->bytenr == node->new_bytenr)
293 0 : continue;
294 0 : update_backref_node(cache, node, node->new_bytenr);
295 : }
296 : }
297 :
298 0 : cache->last_trans = 0;
299 0 : return 1;
300 : }
301 :
302 0 : static bool reloc_root_is_dead(struct btrfs_root *root)
303 : {
304 : /*
305 : * Pair with set_bit/clear_bit in clean_dirty_subvols and
306 : * btrfs_update_reloc_root. We need to see the updated bit before
307 : * trying to access reloc_root
308 : */
309 0 : smp_rmb();
310 0 : if (test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state))
311 0 : return true;
312 : return false;
313 : }
314 :
315 : /*
316 : * Check if this subvolume tree has valid reloc tree.
317 : *
318 : * Reloc tree after swap is considered dead, thus not considered as valid.
319 : * This is enough for most callers, as they don't distinguish dead reloc root
320 : * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
321 : * special case.
322 : */
323 0 : static bool have_reloc_root(struct btrfs_root *root)
324 : {
325 0 : if (reloc_root_is_dead(root))
326 : return false;
327 0 : if (!root->reloc_root)
328 0 : return false;
329 : return true;
330 : }
331 :
332 0 : int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
333 : {
334 0 : struct btrfs_root *reloc_root;
335 :
336 0 : if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
337 : return 0;
338 :
339 : /* This root has been merged with its reloc tree, we can ignore it */
340 0 : if (reloc_root_is_dead(root))
341 : return 1;
342 :
343 0 : reloc_root = root->reloc_root;
344 0 : if (!reloc_root)
345 : return 0;
346 :
347 0 : if (btrfs_header_generation(reloc_root->commit_root) ==
348 0 : root->fs_info->running_transaction->transid)
349 0 : return 0;
350 : /*
351 : * if there is reloc tree and it was created in previous
352 : * transaction backref lookup can find the reloc tree,
353 : * so backref node for the fs tree root is useless for
354 : * relocation.
355 : */
356 : return 1;
357 : }
358 :
359 : /*
360 : * find reloc tree by address of tree root
361 : */
362 0 : struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
363 : {
364 0 : struct reloc_control *rc = fs_info->reloc_ctl;
365 0 : struct rb_node *rb_node;
366 0 : struct mapping_node *node;
367 0 : struct btrfs_root *root = NULL;
368 :
369 0 : ASSERT(rc);
370 0 : spin_lock(&rc->reloc_root_tree.lock);
371 0 : rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
372 0 : if (rb_node) {
373 0 : node = rb_entry(rb_node, struct mapping_node, rb_node);
374 0 : root = node->data;
375 : }
376 0 : spin_unlock(&rc->reloc_root_tree.lock);
377 0 : return btrfs_grab_root(root);
378 : }
379 :
380 : /*
381 : * For useless nodes, do two major clean ups:
382 : *
383 : * - Cleanup the children edges and nodes
384 : * If child node is also orphan (no parent) during cleanup, then the child
385 : * node will also be cleaned up.
386 : *
387 : * - Freeing up leaves (level 0), keeps nodes detached
388 : * For nodes, the node is still cached as "detached"
389 : *
390 : * Return false if @node is not in the @useless_nodes list.
391 : * Return true if @node is in the @useless_nodes list.
392 : */
393 0 : static bool handle_useless_nodes(struct reloc_control *rc,
394 : struct btrfs_backref_node *node)
395 : {
396 0 : struct btrfs_backref_cache *cache = &rc->backref_cache;
397 0 : struct list_head *useless_node = &cache->useless_node;
398 0 : bool ret = false;
399 :
400 0 : while (!list_empty(useless_node)) {
401 0 : struct btrfs_backref_node *cur;
402 :
403 0 : cur = list_first_entry(useless_node, struct btrfs_backref_node,
404 : list);
405 0 : list_del_init(&cur->list);
406 :
407 : /* Only tree root nodes can be added to @useless_nodes */
408 0 : ASSERT(list_empty(&cur->upper));
409 :
410 0 : if (cur == node)
411 0 : ret = true;
412 :
413 : /* The node is the lowest node */
414 0 : if (cur->lowest) {
415 0 : list_del_init(&cur->lower);
416 0 : cur->lowest = 0;
417 : }
418 :
419 : /* Cleanup the lower edges */
420 0 : while (!list_empty(&cur->lower)) {
421 0 : struct btrfs_backref_edge *edge;
422 0 : struct btrfs_backref_node *lower;
423 :
424 0 : edge = list_entry(cur->lower.next,
425 : struct btrfs_backref_edge, list[UPPER]);
426 0 : list_del(&edge->list[UPPER]);
427 0 : list_del(&edge->list[LOWER]);
428 0 : lower = edge->node[LOWER];
429 0 : btrfs_backref_free_edge(cache, edge);
430 :
431 : /* Child node is also orphan, queue for cleanup */
432 0 : if (list_empty(&lower->upper))
433 0 : list_add(&lower->list, useless_node);
434 : }
435 : /* Mark this block processed for relocation */
436 0 : mark_block_processed(rc, cur);
437 :
438 : /*
439 : * Backref nodes for tree leaves are deleted from the cache.
440 : * Backref nodes for upper level tree blocks are left in the
441 : * cache to avoid unnecessary backref lookup.
442 : */
443 0 : if (cur->level > 0) {
444 0 : list_add(&cur->list, &cache->detached);
445 0 : cur->detached = 1;
446 : } else {
447 0 : rb_erase(&cur->rb_node, &cache->rb_root);
448 0 : btrfs_backref_free_node(cache, cur);
449 : }
450 : }
451 0 : return ret;
452 : }
453 :
454 : /*
455 : * Build backref tree for a given tree block. Root of the backref tree
456 : * corresponds the tree block, leaves of the backref tree correspond roots of
457 : * b-trees that reference the tree block.
458 : *
459 : * The basic idea of this function is check backrefs of a given block to find
460 : * upper level blocks that reference the block, and then check backrefs of
461 : * these upper level blocks recursively. The recursion stops when tree root is
462 : * reached or backrefs for the block is cached.
463 : *
464 : * NOTE: if we find that backrefs for a block are cached, we know backrefs for
465 : * all upper level blocks that directly/indirectly reference the block are also
466 : * cached.
467 : */
468 0 : static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
469 : struct reloc_control *rc, struct btrfs_key *node_key,
470 : int level, u64 bytenr)
471 : {
472 0 : struct btrfs_backref_iter *iter;
473 0 : struct btrfs_backref_cache *cache = &rc->backref_cache;
474 : /* For searching parent of TREE_BLOCK_REF */
475 0 : struct btrfs_path *path;
476 0 : struct btrfs_backref_node *cur;
477 0 : struct btrfs_backref_node *node = NULL;
478 0 : struct btrfs_backref_edge *edge;
479 0 : int ret;
480 0 : int err = 0;
481 :
482 0 : iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info);
483 0 : if (!iter)
484 : return ERR_PTR(-ENOMEM);
485 0 : path = btrfs_alloc_path();
486 0 : if (!path) {
487 0 : err = -ENOMEM;
488 0 : goto out;
489 : }
490 :
491 0 : node = btrfs_backref_alloc_node(cache, bytenr, level);
492 0 : if (!node) {
493 0 : err = -ENOMEM;
494 0 : goto out;
495 : }
496 :
497 0 : node->lowest = 1;
498 0 : cur = node;
499 :
500 : /* Breadth-first search to build backref cache */
501 0 : do {
502 0 : ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
503 : cur);
504 0 : if (ret < 0) {
505 0 : err = ret;
506 0 : goto out;
507 : }
508 0 : edge = list_first_entry_or_null(&cache->pending_edge,
509 : struct btrfs_backref_edge, list[UPPER]);
510 : /*
511 : * The pending list isn't empty, take the first block to
512 : * process
513 : */
514 0 : if (edge) {
515 0 : list_del_init(&edge->list[UPPER]);
516 0 : cur = edge->node[UPPER];
517 : }
518 0 : } while (edge);
519 :
520 : /* Finish the upper linkage of newly added edges/nodes */
521 0 : ret = btrfs_backref_finish_upper_links(cache, node);
522 0 : if (ret < 0) {
523 0 : err = ret;
524 0 : goto out;
525 : }
526 :
527 0 : if (handle_useless_nodes(rc, node))
528 0 : node = NULL;
529 0 : out:
530 0 : btrfs_backref_iter_free(iter);
531 0 : btrfs_free_path(path);
532 0 : if (err) {
533 0 : btrfs_backref_error_cleanup(cache, node);
534 0 : return ERR_PTR(err);
535 : }
536 0 : ASSERT(!node || !node->detached);
537 0 : ASSERT(list_empty(&cache->useless_node) &&
538 : list_empty(&cache->pending_edge));
539 : return node;
540 : }
541 :
542 : /*
543 : * helper to add backref node for the newly created snapshot.
544 : * the backref node is created by cloning backref node that
545 : * corresponds to root of source tree
546 : */
547 0 : static int clone_backref_node(struct btrfs_trans_handle *trans,
548 : struct reloc_control *rc,
549 : struct btrfs_root *src,
550 : struct btrfs_root *dest)
551 : {
552 0 : struct btrfs_root *reloc_root = src->reloc_root;
553 0 : struct btrfs_backref_cache *cache = &rc->backref_cache;
554 0 : struct btrfs_backref_node *node = NULL;
555 0 : struct btrfs_backref_node *new_node;
556 0 : struct btrfs_backref_edge *edge;
557 0 : struct btrfs_backref_edge *new_edge;
558 0 : struct rb_node *rb_node;
559 :
560 0 : if (cache->last_trans > 0)
561 0 : update_backref_cache(trans, cache);
562 :
563 0 : rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
564 0 : if (rb_node) {
565 0 : node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
566 0 : if (node->detached)
567 : node = NULL;
568 : else
569 0 : BUG_ON(node->new_bytenr != reloc_root->node->start);
570 : }
571 :
572 : if (!node) {
573 0 : rb_node = rb_simple_search(&cache->rb_root,
574 0 : reloc_root->commit_root->start);
575 0 : if (rb_node) {
576 0 : node = rb_entry(rb_node, struct btrfs_backref_node,
577 : rb_node);
578 0 : BUG_ON(node->detached);
579 : }
580 : }
581 :
582 0 : if (!node)
583 : return 0;
584 :
585 0 : new_node = btrfs_backref_alloc_node(cache, dest->node->start,
586 0 : node->level);
587 0 : if (!new_node)
588 : return -ENOMEM;
589 :
590 0 : new_node->lowest = node->lowest;
591 0 : new_node->checked = 1;
592 0 : new_node->root = btrfs_grab_root(dest);
593 0 : ASSERT(new_node->root);
594 :
595 0 : if (!node->lowest) {
596 0 : list_for_each_entry(edge, &node->lower, list[UPPER]) {
597 0 : new_edge = btrfs_backref_alloc_edge(cache);
598 0 : if (!new_edge)
599 0 : goto fail;
600 :
601 0 : btrfs_backref_link_edge(new_edge, edge->node[LOWER],
602 : new_node, LINK_UPPER);
603 : }
604 : } else {
605 0 : list_add_tail(&new_node->lower, &cache->leaves);
606 : }
607 :
608 0 : rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
609 : &new_node->rb_node);
610 0 : if (rb_node)
611 0 : btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
612 :
613 0 : if (!new_node->lowest) {
614 0 : list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
615 0 : list_add_tail(&new_edge->list[LOWER],
616 0 : &new_edge->node[LOWER]->upper);
617 : }
618 : }
619 : return 0;
620 : fail:
621 0 : while (!list_empty(&new_node->lower)) {
622 0 : new_edge = list_entry(new_node->lower.next,
623 : struct btrfs_backref_edge, list[UPPER]);
624 0 : list_del(&new_edge->list[UPPER]);
625 0 : btrfs_backref_free_edge(cache, new_edge);
626 : }
627 0 : btrfs_backref_free_node(cache, new_node);
628 0 : return -ENOMEM;
629 : }
630 :
631 : /*
632 : * helper to add 'address of tree root -> reloc tree' mapping
633 : */
634 0 : static int __must_check __add_reloc_root(struct btrfs_root *root)
635 : {
636 0 : struct btrfs_fs_info *fs_info = root->fs_info;
637 0 : struct rb_node *rb_node;
638 0 : struct mapping_node *node;
639 0 : struct reloc_control *rc = fs_info->reloc_ctl;
640 :
641 0 : node = kmalloc(sizeof(*node), GFP_NOFS);
642 0 : if (!node)
643 : return -ENOMEM;
644 :
645 0 : node->bytenr = root->commit_root->start;
646 0 : node->data = root;
647 :
648 0 : spin_lock(&rc->reloc_root_tree.lock);
649 0 : rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
650 : node->bytenr, &node->rb_node);
651 0 : spin_unlock(&rc->reloc_root_tree.lock);
652 0 : if (rb_node) {
653 0 : btrfs_err(fs_info,
654 : "Duplicate root found for start=%llu while inserting into relocation tree",
655 : node->bytenr);
656 0 : return -EEXIST;
657 : }
658 :
659 0 : list_add_tail(&root->root_list, &rc->reloc_roots);
660 0 : return 0;
661 : }
662 :
663 : /*
664 : * helper to delete the 'address of tree root -> reloc tree'
665 : * mapping
666 : */
667 0 : static void __del_reloc_root(struct btrfs_root *root)
668 : {
669 0 : struct btrfs_fs_info *fs_info = root->fs_info;
670 0 : struct rb_node *rb_node;
671 0 : struct mapping_node *node = NULL;
672 0 : struct reloc_control *rc = fs_info->reloc_ctl;
673 0 : bool put_ref = false;
674 :
675 0 : if (rc && root->node) {
676 0 : spin_lock(&rc->reloc_root_tree.lock);
677 0 : rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
678 0 : root->commit_root->start);
679 0 : if (rb_node) {
680 0 : node = rb_entry(rb_node, struct mapping_node, rb_node);
681 0 : rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
682 0 : RB_CLEAR_NODE(&node->rb_node);
683 : }
684 0 : spin_unlock(&rc->reloc_root_tree.lock);
685 0 : ASSERT(!node || (struct btrfs_root *)node->data == root);
686 : }
687 :
688 : /*
689 : * We only put the reloc root here if it's on the list. There's a lot
690 : * of places where the pattern is to splice the rc->reloc_roots, process
691 : * the reloc roots, and then add the reloc root back onto
692 : * rc->reloc_roots. If we call __del_reloc_root while it's off of the
693 : * list we don't want the reference being dropped, because the guy
694 : * messing with the list is in charge of the reference.
695 : */
696 0 : spin_lock(&fs_info->trans_lock);
697 0 : if (!list_empty(&root->root_list)) {
698 0 : put_ref = true;
699 0 : list_del_init(&root->root_list);
700 : }
701 0 : spin_unlock(&fs_info->trans_lock);
702 0 : if (put_ref)
703 0 : btrfs_put_root(root);
704 0 : kfree(node);
705 0 : }
706 :
707 : /*
708 : * helper to update the 'address of tree root -> reloc tree'
709 : * mapping
710 : */
711 0 : static int __update_reloc_root(struct btrfs_root *root)
712 : {
713 0 : struct btrfs_fs_info *fs_info = root->fs_info;
714 0 : struct rb_node *rb_node;
715 0 : struct mapping_node *node = NULL;
716 0 : struct reloc_control *rc = fs_info->reloc_ctl;
717 :
718 0 : spin_lock(&rc->reloc_root_tree.lock);
719 0 : rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
720 0 : root->commit_root->start);
721 0 : if (rb_node) {
722 0 : node = rb_entry(rb_node, struct mapping_node, rb_node);
723 0 : rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
724 : }
725 0 : spin_unlock(&rc->reloc_root_tree.lock);
726 :
727 0 : if (!node)
728 : return 0;
729 0 : BUG_ON((struct btrfs_root *)node->data != root);
730 :
731 0 : spin_lock(&rc->reloc_root_tree.lock);
732 0 : node->bytenr = root->node->start;
733 0 : rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
734 : node->bytenr, &node->rb_node);
735 0 : spin_unlock(&rc->reloc_root_tree.lock);
736 0 : if (rb_node)
737 0 : btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
738 : return 0;
739 : }
740 :
741 0 : static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
742 : struct btrfs_root *root, u64 objectid)
743 : {
744 0 : struct btrfs_fs_info *fs_info = root->fs_info;
745 0 : struct btrfs_root *reloc_root;
746 0 : struct extent_buffer *eb;
747 0 : struct btrfs_root_item *root_item;
748 0 : struct btrfs_key root_key;
749 0 : int ret = 0;
750 0 : bool must_abort = false;
751 :
752 0 : root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
753 0 : if (!root_item)
754 : return ERR_PTR(-ENOMEM);
755 :
756 0 : root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
757 0 : root_key.type = BTRFS_ROOT_ITEM_KEY;
758 0 : root_key.offset = objectid;
759 :
760 0 : if (root->root_key.objectid == objectid) {
761 0 : u64 commit_root_gen;
762 :
763 : /* called by btrfs_init_reloc_root */
764 0 : ret = btrfs_copy_root(trans, root, root->commit_root, &eb,
765 : BTRFS_TREE_RELOC_OBJECTID);
766 0 : if (ret)
767 0 : goto fail;
768 :
769 : /*
770 : * Set the last_snapshot field to the generation of the commit
771 : * root - like this ctree.c:btrfs_block_can_be_shared() behaves
772 : * correctly (returns true) when the relocation root is created
773 : * either inside the critical section of a transaction commit
774 : * (through transaction.c:qgroup_account_snapshot()) and when
775 : * it's created before the transaction commit is started.
776 : */
777 0 : commit_root_gen = btrfs_header_generation(root->commit_root);
778 0 : btrfs_set_root_last_snapshot(&root->root_item, commit_root_gen);
779 : } else {
780 : /*
781 : * called by btrfs_reloc_post_snapshot_hook.
782 : * the source tree is a reloc tree, all tree blocks
783 : * modified after it was created have RELOC flag
784 : * set in their headers. so it's OK to not update
785 : * the 'last_snapshot'.
786 : */
787 0 : ret = btrfs_copy_root(trans, root, root->node, &eb,
788 : BTRFS_TREE_RELOC_OBJECTID);
789 0 : if (ret)
790 0 : goto fail;
791 : }
792 :
793 : /*
794 : * We have changed references at this point, we must abort the
795 : * transaction if anything fails.
796 : */
797 0 : must_abort = true;
798 :
799 0 : memcpy(root_item, &root->root_item, sizeof(*root_item));
800 0 : btrfs_set_root_bytenr(root_item, eb->start);
801 0 : btrfs_set_root_level(root_item, btrfs_header_level(eb));
802 0 : btrfs_set_root_generation(root_item, trans->transid);
803 :
804 0 : if (root->root_key.objectid == objectid) {
805 0 : btrfs_set_root_refs(root_item, 0);
806 0 : memset(&root_item->drop_progress, 0,
807 : sizeof(struct btrfs_disk_key));
808 0 : btrfs_set_root_drop_level(root_item, 0);
809 : }
810 :
811 0 : btrfs_tree_unlock(eb);
812 0 : free_extent_buffer(eb);
813 :
814 0 : ret = btrfs_insert_root(trans, fs_info->tree_root,
815 : &root_key, root_item);
816 0 : if (ret)
817 0 : goto fail;
818 :
819 0 : kfree(root_item);
820 :
821 0 : reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
822 0 : if (IS_ERR(reloc_root)) {
823 0 : ret = PTR_ERR(reloc_root);
824 0 : goto abort;
825 : }
826 0 : set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
827 0 : reloc_root->last_trans = trans->transid;
828 0 : return reloc_root;
829 0 : fail:
830 0 : kfree(root_item);
831 : abort:
832 0 : if (must_abort)
833 0 : btrfs_abort_transaction(trans, ret);
834 0 : return ERR_PTR(ret);
835 : }
836 :
837 : /*
838 : * create reloc tree for a given fs tree. reloc tree is just a
839 : * snapshot of the fs tree with special root objectid.
840 : *
841 : * The reloc_root comes out of here with two references, one for
842 : * root->reloc_root, and another for being on the rc->reloc_roots list.
843 : */
844 0 : int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
845 : struct btrfs_root *root)
846 : {
847 0 : struct btrfs_fs_info *fs_info = root->fs_info;
848 0 : struct btrfs_root *reloc_root;
849 0 : struct reloc_control *rc = fs_info->reloc_ctl;
850 0 : struct btrfs_block_rsv *rsv;
851 0 : int clear_rsv = 0;
852 0 : int ret;
853 :
854 0 : if (!rc)
855 : return 0;
856 :
857 : /*
858 : * The subvolume has reloc tree but the swap is finished, no need to
859 : * create/update the dead reloc tree
860 : */
861 0 : if (reloc_root_is_dead(root))
862 : return 0;
863 :
864 : /*
865 : * This is subtle but important. We do not do
866 : * record_root_in_transaction for reloc roots, instead we record their
867 : * corresponding fs root, and then here we update the last trans for the
868 : * reloc root. This means that we have to do this for the entire life
869 : * of the reloc root, regardless of which stage of the relocation we are
870 : * in.
871 : */
872 0 : if (root->reloc_root) {
873 0 : reloc_root = root->reloc_root;
874 0 : reloc_root->last_trans = trans->transid;
875 0 : return 0;
876 : }
877 :
878 : /*
879 : * We are merging reloc roots, we do not need new reloc trees. Also
880 : * reloc trees never need their own reloc tree.
881 : */
882 0 : if (!rc->create_reloc_tree ||
883 0 : root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
884 : return 0;
885 :
886 0 : if (!trans->reloc_reserved) {
887 0 : rsv = trans->block_rsv;
888 0 : trans->block_rsv = rc->block_rsv;
889 0 : clear_rsv = 1;
890 : }
891 0 : reloc_root = create_reloc_root(trans, root, root->root_key.objectid);
892 0 : if (clear_rsv)
893 0 : trans->block_rsv = rsv;
894 0 : if (IS_ERR(reloc_root))
895 0 : return PTR_ERR(reloc_root);
896 :
897 0 : ret = __add_reloc_root(reloc_root);
898 0 : ASSERT(ret != -EEXIST);
899 0 : if (ret) {
900 : /* Pairs with create_reloc_root */
901 0 : btrfs_put_root(reloc_root);
902 0 : return ret;
903 : }
904 0 : root->reloc_root = btrfs_grab_root(reloc_root);
905 0 : return 0;
906 : }
907 :
908 : /*
909 : * update root item of reloc tree
910 : */
911 0 : int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
912 : struct btrfs_root *root)
913 : {
914 0 : struct btrfs_fs_info *fs_info = root->fs_info;
915 0 : struct btrfs_root *reloc_root;
916 0 : struct btrfs_root_item *root_item;
917 0 : int ret;
918 :
919 0 : if (!have_reloc_root(root))
920 : return 0;
921 :
922 0 : reloc_root = root->reloc_root;
923 0 : root_item = &reloc_root->root_item;
924 :
925 : /*
926 : * We are probably ok here, but __del_reloc_root() will drop its ref of
927 : * the root. We have the ref for root->reloc_root, but just in case
928 : * hold it while we update the reloc root.
929 : */
930 0 : btrfs_grab_root(reloc_root);
931 :
932 : /* root->reloc_root will stay until current relocation finished */
933 0 : if (fs_info->reloc_ctl->merge_reloc_tree &&
934 : btrfs_root_refs(root_item) == 0) {
935 0 : set_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
936 : /*
937 : * Mark the tree as dead before we change reloc_root so
938 : * have_reloc_root will not touch it from now on.
939 : */
940 0 : smp_wmb();
941 0 : __del_reloc_root(reloc_root);
942 : }
943 :
944 0 : if (reloc_root->commit_root != reloc_root->node) {
945 0 : __update_reloc_root(reloc_root);
946 0 : btrfs_set_root_node(root_item, reloc_root->node);
947 0 : free_extent_buffer(reloc_root->commit_root);
948 0 : reloc_root->commit_root = btrfs_root_node(reloc_root);
949 : }
950 :
951 0 : ret = btrfs_update_root(trans, fs_info->tree_root,
952 : &reloc_root->root_key, root_item);
953 0 : btrfs_put_root(reloc_root);
954 0 : return ret;
955 : }
956 :
957 : /*
958 : * helper to find first cached inode with inode number >= objectid
959 : * in a subvolume
960 : */
961 0 : static struct inode *find_next_inode(struct btrfs_root *root, u64 objectid)
962 : {
963 0 : struct rb_node *node;
964 0 : struct rb_node *prev;
965 0 : struct btrfs_inode *entry;
966 0 : struct inode *inode;
967 :
968 0 : spin_lock(&root->inode_lock);
969 0 : again:
970 0 : node = root->inode_tree.rb_node;
971 0 : prev = NULL;
972 0 : while (node) {
973 0 : prev = node;
974 0 : entry = rb_entry(node, struct btrfs_inode, rb_node);
975 :
976 0 : if (objectid < btrfs_ino(entry))
977 0 : node = node->rb_left;
978 0 : else if (objectid > btrfs_ino(entry))
979 0 : node = node->rb_right;
980 : else
981 : break;
982 : }
983 0 : if (!node) {
984 0 : while (prev) {
985 0 : entry = rb_entry(prev, struct btrfs_inode, rb_node);
986 0 : if (objectid <= btrfs_ino(entry)) {
987 : node = prev;
988 : break;
989 : }
990 0 : prev = rb_next(prev);
991 : }
992 : }
993 0 : while (node) {
994 0 : entry = rb_entry(node, struct btrfs_inode, rb_node);
995 0 : inode = igrab(&entry->vfs_inode);
996 0 : if (inode) {
997 0 : spin_unlock(&root->inode_lock);
998 0 : return inode;
999 : }
1000 :
1001 0 : objectid = btrfs_ino(entry) + 1;
1002 0 : if (cond_resched_lock(&root->inode_lock))
1003 0 : goto again;
1004 :
1005 0 : node = rb_next(node);
1006 : }
1007 0 : spin_unlock(&root->inode_lock);
1008 0 : return NULL;
1009 : }
1010 :
1011 : /*
1012 : * get new location of data
1013 : */
1014 0 : static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
1015 : u64 bytenr, u64 num_bytes)
1016 : {
1017 0 : struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
1018 0 : struct btrfs_path *path;
1019 0 : struct btrfs_file_extent_item *fi;
1020 0 : struct extent_buffer *leaf;
1021 0 : int ret;
1022 :
1023 0 : path = btrfs_alloc_path();
1024 0 : if (!path)
1025 : return -ENOMEM;
1026 :
1027 0 : bytenr -= BTRFS_I(reloc_inode)->index_cnt;
1028 0 : ret = btrfs_lookup_file_extent(NULL, root, path,
1029 : btrfs_ino(BTRFS_I(reloc_inode)), bytenr, 0);
1030 0 : if (ret < 0)
1031 0 : goto out;
1032 0 : if (ret > 0) {
1033 0 : ret = -ENOENT;
1034 0 : goto out;
1035 : }
1036 :
1037 0 : leaf = path->nodes[0];
1038 0 : fi = btrfs_item_ptr(leaf, path->slots[0],
1039 : struct btrfs_file_extent_item);
1040 :
1041 0 : BUG_ON(btrfs_file_extent_offset(leaf, fi) ||
1042 : btrfs_file_extent_compression(leaf, fi) ||
1043 : btrfs_file_extent_encryption(leaf, fi) ||
1044 : btrfs_file_extent_other_encoding(leaf, fi));
1045 :
1046 0 : if (num_bytes != btrfs_file_extent_disk_num_bytes(leaf, fi)) {
1047 0 : ret = -EINVAL;
1048 0 : goto out;
1049 : }
1050 :
1051 0 : *new_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1052 0 : ret = 0;
1053 0 : out:
1054 0 : btrfs_free_path(path);
1055 0 : return ret;
1056 : }
1057 :
1058 : /*
1059 : * update file extent items in the tree leaf to point to
1060 : * the new locations.
1061 : */
1062 : static noinline_for_stack
1063 0 : int replace_file_extents(struct btrfs_trans_handle *trans,
1064 : struct reloc_control *rc,
1065 : struct btrfs_root *root,
1066 : struct extent_buffer *leaf)
1067 : {
1068 0 : struct btrfs_fs_info *fs_info = root->fs_info;
1069 0 : struct btrfs_key key;
1070 0 : struct btrfs_file_extent_item *fi;
1071 0 : struct inode *inode = NULL;
1072 0 : u64 parent;
1073 0 : u64 bytenr;
1074 0 : u64 new_bytenr = 0;
1075 0 : u64 num_bytes;
1076 0 : u64 end;
1077 0 : u32 nritems;
1078 0 : u32 i;
1079 0 : int ret = 0;
1080 0 : int first = 1;
1081 0 : int dirty = 0;
1082 :
1083 0 : if (rc->stage != UPDATE_DATA_PTRS)
1084 : return 0;
1085 :
1086 : /* reloc trees always use full backref */
1087 0 : if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1088 0 : parent = leaf->start;
1089 : else
1090 : parent = 0;
1091 :
1092 0 : nritems = btrfs_header_nritems(leaf);
1093 0 : for (i = 0; i < nritems; i++) {
1094 0 : struct btrfs_ref ref = { 0 };
1095 :
1096 0 : cond_resched();
1097 0 : btrfs_item_key_to_cpu(leaf, &key, i);
1098 0 : if (key.type != BTRFS_EXTENT_DATA_KEY)
1099 0 : continue;
1100 0 : fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
1101 0 : if (btrfs_file_extent_type(leaf, fi) ==
1102 : BTRFS_FILE_EXTENT_INLINE)
1103 0 : continue;
1104 0 : bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1105 0 : num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
1106 0 : if (bytenr == 0)
1107 0 : continue;
1108 0 : if (!in_range(bytenr, rc->block_group->start,
1109 : rc->block_group->length))
1110 0 : continue;
1111 :
1112 : /*
1113 : * if we are modifying block in fs tree, wait for read_folio
1114 : * to complete and drop the extent cache
1115 : */
1116 0 : if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1117 0 : if (first) {
1118 0 : inode = find_next_inode(root, key.objectid);
1119 0 : first = 0;
1120 0 : } else if (inode && btrfs_ino(BTRFS_I(inode)) < key.objectid) {
1121 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
1122 0 : inode = find_next_inode(root, key.objectid);
1123 : }
1124 0 : if (inode && btrfs_ino(BTRFS_I(inode)) == key.objectid) {
1125 0 : struct extent_state *cached_state = NULL;
1126 :
1127 0 : end = key.offset +
1128 : btrfs_file_extent_num_bytes(leaf, fi);
1129 0 : WARN_ON(!IS_ALIGNED(key.offset,
1130 : fs_info->sectorsize));
1131 0 : WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1132 0 : end--;
1133 0 : ret = try_lock_extent(&BTRFS_I(inode)->io_tree,
1134 : key.offset, end,
1135 : &cached_state);
1136 0 : if (!ret)
1137 0 : continue;
1138 :
1139 0 : btrfs_drop_extent_map_range(BTRFS_I(inode),
1140 : key.offset, end, true);
1141 0 : unlock_extent(&BTRFS_I(inode)->io_tree,
1142 : key.offset, end, &cached_state);
1143 : }
1144 : }
1145 :
1146 0 : ret = get_new_location(rc->data_inode, &new_bytenr,
1147 : bytenr, num_bytes);
1148 0 : if (ret) {
1149 : /*
1150 : * Don't have to abort since we've not changed anything
1151 : * in the file extent yet.
1152 : */
1153 : break;
1154 : }
1155 :
1156 0 : btrfs_set_file_extent_disk_bytenr(leaf, fi, new_bytenr);
1157 0 : dirty = 1;
1158 :
1159 0 : key.offset -= btrfs_file_extent_offset(leaf, fi);
1160 0 : btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1161 : num_bytes, parent);
1162 0 : btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1163 : key.objectid, key.offset,
1164 : root->root_key.objectid, false);
1165 0 : ret = btrfs_inc_extent_ref(trans, &ref);
1166 0 : if (ret) {
1167 0 : btrfs_abort_transaction(trans, ret);
1168 0 : break;
1169 : }
1170 :
1171 0 : btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
1172 : num_bytes, parent);
1173 0 : btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
1174 : key.objectid, key.offset,
1175 : root->root_key.objectid, false);
1176 0 : ret = btrfs_free_extent(trans, &ref);
1177 0 : if (ret) {
1178 0 : btrfs_abort_transaction(trans, ret);
1179 0 : break;
1180 : }
1181 : }
1182 0 : if (dirty)
1183 0 : btrfs_mark_buffer_dirty(leaf);
1184 0 : if (inode)
1185 0 : btrfs_add_delayed_iput(BTRFS_I(inode));
1186 : return ret;
1187 : }
1188 :
1189 : static noinline_for_stack
1190 0 : int memcmp_node_keys(struct extent_buffer *eb, int slot,
1191 : struct btrfs_path *path, int level)
1192 : {
1193 0 : struct btrfs_disk_key key1;
1194 0 : struct btrfs_disk_key key2;
1195 0 : btrfs_node_key(eb, &key1, slot);
1196 0 : btrfs_node_key(path->nodes[level], &key2, path->slots[level]);
1197 0 : return memcmp(&key1, &key2, sizeof(key1));
1198 : }
1199 :
1200 : /*
1201 : * try to replace tree blocks in fs tree with the new blocks
1202 : * in reloc tree. tree blocks haven't been modified since the
1203 : * reloc tree was create can be replaced.
1204 : *
1205 : * if a block was replaced, level of the block + 1 is returned.
1206 : * if no block got replaced, 0 is returned. if there are other
1207 : * errors, a negative error number is returned.
1208 : */
1209 : static noinline_for_stack
1210 0 : int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
1211 : struct btrfs_root *dest, struct btrfs_root *src,
1212 : struct btrfs_path *path, struct btrfs_key *next_key,
1213 : int lowest_level, int max_level)
1214 : {
1215 0 : struct btrfs_fs_info *fs_info = dest->fs_info;
1216 0 : struct extent_buffer *eb;
1217 0 : struct extent_buffer *parent;
1218 0 : struct btrfs_ref ref = { 0 };
1219 0 : struct btrfs_key key;
1220 0 : u64 old_bytenr;
1221 0 : u64 new_bytenr;
1222 0 : u64 old_ptr_gen;
1223 0 : u64 new_ptr_gen;
1224 0 : u64 last_snapshot;
1225 0 : u32 blocksize;
1226 0 : int cow = 0;
1227 0 : int level;
1228 0 : int ret;
1229 0 : int slot;
1230 :
1231 0 : ASSERT(src->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID);
1232 0 : ASSERT(dest->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1233 :
1234 0 : last_snapshot = btrfs_root_last_snapshot(&src->root_item);
1235 0 : again:
1236 0 : slot = path->slots[lowest_level];
1237 0 : btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
1238 :
1239 0 : eb = btrfs_lock_root_node(dest);
1240 0 : level = btrfs_header_level(eb);
1241 :
1242 0 : if (level < lowest_level) {
1243 0 : btrfs_tree_unlock(eb);
1244 0 : free_extent_buffer(eb);
1245 0 : return 0;
1246 : }
1247 :
1248 0 : if (cow) {
1249 0 : ret = btrfs_cow_block(trans, dest, eb, NULL, 0, &eb,
1250 : BTRFS_NESTING_COW);
1251 0 : if (ret) {
1252 0 : btrfs_tree_unlock(eb);
1253 0 : free_extent_buffer(eb);
1254 0 : return ret;
1255 : }
1256 : }
1257 :
1258 0 : if (next_key) {
1259 0 : next_key->objectid = (u64)-1;
1260 0 : next_key->type = (u8)-1;
1261 0 : next_key->offset = (u64)-1;
1262 : }
1263 :
1264 0 : parent = eb;
1265 0 : while (1) {
1266 0 : level = btrfs_header_level(parent);
1267 0 : ASSERT(level >= lowest_level);
1268 :
1269 0 : ret = btrfs_bin_search(parent, 0, &key, &slot);
1270 0 : if (ret < 0)
1271 : break;
1272 0 : if (ret && slot > 0)
1273 0 : slot--;
1274 :
1275 0 : if (next_key && slot + 1 < btrfs_header_nritems(parent))
1276 0 : btrfs_node_key_to_cpu(parent, next_key, slot + 1);
1277 :
1278 0 : old_bytenr = btrfs_node_blockptr(parent, slot);
1279 0 : blocksize = fs_info->nodesize;
1280 0 : old_ptr_gen = btrfs_node_ptr_generation(parent, slot);
1281 :
1282 0 : if (level <= max_level) {
1283 0 : eb = path->nodes[level];
1284 0 : new_bytenr = btrfs_node_blockptr(eb,
1285 : path->slots[level]);
1286 0 : new_ptr_gen = btrfs_node_ptr_generation(eb,
1287 : path->slots[level]);
1288 : } else {
1289 : new_bytenr = 0;
1290 : new_ptr_gen = 0;
1291 : }
1292 :
1293 0 : if (WARN_ON(new_bytenr > 0 && new_bytenr == old_bytenr)) {
1294 : ret = level;
1295 : break;
1296 : }
1297 :
1298 0 : if (new_bytenr == 0 || old_ptr_gen > last_snapshot ||
1299 0 : memcmp_node_keys(parent, slot, path, level)) {
1300 0 : if (level <= lowest_level) {
1301 : ret = 0;
1302 : break;
1303 : }
1304 :
1305 0 : eb = btrfs_read_node_slot(parent, slot);
1306 0 : if (IS_ERR(eb)) {
1307 0 : ret = PTR_ERR(eb);
1308 0 : break;
1309 : }
1310 0 : btrfs_tree_lock(eb);
1311 0 : if (cow) {
1312 0 : ret = btrfs_cow_block(trans, dest, eb, parent,
1313 : slot, &eb,
1314 : BTRFS_NESTING_COW);
1315 0 : if (ret) {
1316 0 : btrfs_tree_unlock(eb);
1317 0 : free_extent_buffer(eb);
1318 0 : break;
1319 : }
1320 : }
1321 :
1322 0 : btrfs_tree_unlock(parent);
1323 0 : free_extent_buffer(parent);
1324 :
1325 0 : parent = eb;
1326 0 : continue;
1327 : }
1328 :
1329 0 : if (!cow) {
1330 0 : btrfs_tree_unlock(parent);
1331 0 : free_extent_buffer(parent);
1332 0 : cow = 1;
1333 0 : goto again;
1334 : }
1335 :
1336 0 : btrfs_node_key_to_cpu(path->nodes[level], &key,
1337 : path->slots[level]);
1338 0 : btrfs_release_path(path);
1339 :
1340 0 : path->lowest_level = level;
1341 0 : set_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1342 0 : ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
1343 0 : clear_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &src->state);
1344 0 : path->lowest_level = 0;
1345 0 : if (ret) {
1346 0 : if (ret > 0)
1347 0 : ret = -ENOENT;
1348 : break;
1349 : }
1350 :
1351 : /*
1352 : * Info qgroup to trace both subtrees.
1353 : *
1354 : * We must trace both trees.
1355 : * 1) Tree reloc subtree
1356 : * If not traced, we will leak data numbers
1357 : * 2) Fs subtree
1358 : * If not traced, we will double count old data
1359 : *
1360 : * We don't scan the subtree right now, but only record
1361 : * the swapped tree blocks.
1362 : * The real subtree rescan is delayed until we have new
1363 : * CoW on the subtree root node before transaction commit.
1364 : */
1365 0 : ret = btrfs_qgroup_add_swapped_blocks(trans, dest,
1366 : rc->block_group, parent, slot,
1367 : path->nodes[level], path->slots[level],
1368 : last_snapshot);
1369 0 : if (ret < 0)
1370 : break;
1371 : /*
1372 : * swap blocks in fs tree and reloc tree.
1373 : */
1374 0 : btrfs_set_node_blockptr(parent, slot, new_bytenr);
1375 0 : btrfs_set_node_ptr_generation(parent, slot, new_ptr_gen);
1376 0 : btrfs_mark_buffer_dirty(parent);
1377 :
1378 0 : btrfs_set_node_blockptr(path->nodes[level],
1379 : path->slots[level], old_bytenr);
1380 0 : btrfs_set_node_ptr_generation(path->nodes[level],
1381 : path->slots[level], old_ptr_gen);
1382 0 : btrfs_mark_buffer_dirty(path->nodes[level]);
1383 :
1384 0 : btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
1385 0 : blocksize, path->nodes[level]->start);
1386 0 : btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1387 : 0, true);
1388 0 : ret = btrfs_inc_extent_ref(trans, &ref);
1389 0 : if (ret) {
1390 0 : btrfs_abort_transaction(trans, ret);
1391 0 : break;
1392 : }
1393 0 : btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
1394 : blocksize, 0);
1395 0 : btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
1396 : true);
1397 0 : ret = btrfs_inc_extent_ref(trans, &ref);
1398 0 : if (ret) {
1399 0 : btrfs_abort_transaction(trans, ret);
1400 0 : break;
1401 : }
1402 :
1403 0 : btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
1404 0 : blocksize, path->nodes[level]->start);
1405 0 : btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
1406 : 0, true);
1407 0 : ret = btrfs_free_extent(trans, &ref);
1408 0 : if (ret) {
1409 0 : btrfs_abort_transaction(trans, ret);
1410 0 : break;
1411 : }
1412 :
1413 0 : btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
1414 : blocksize, 0);
1415 0 : btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
1416 : 0, true);
1417 0 : ret = btrfs_free_extent(trans, &ref);
1418 0 : if (ret) {
1419 0 : btrfs_abort_transaction(trans, ret);
1420 0 : break;
1421 : }
1422 :
1423 0 : btrfs_unlock_up_safe(path, 0);
1424 :
1425 0 : ret = level;
1426 0 : break;
1427 : }
1428 0 : btrfs_tree_unlock(parent);
1429 0 : free_extent_buffer(parent);
1430 0 : return ret;
1431 : }
1432 :
1433 : /*
1434 : * helper to find next relocated block in reloc tree
1435 : */
1436 : static noinline_for_stack
1437 0 : int walk_up_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1438 : int *level)
1439 : {
1440 0 : struct extent_buffer *eb;
1441 0 : int i;
1442 0 : u64 last_snapshot;
1443 0 : u32 nritems;
1444 :
1445 0 : last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1446 :
1447 0 : for (i = 0; i < *level; i++) {
1448 0 : free_extent_buffer(path->nodes[i]);
1449 0 : path->nodes[i] = NULL;
1450 : }
1451 :
1452 0 : for (i = *level; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
1453 0 : eb = path->nodes[i];
1454 0 : nritems = btrfs_header_nritems(eb);
1455 0 : while (path->slots[i] + 1 < nritems) {
1456 0 : path->slots[i]++;
1457 0 : if (btrfs_node_ptr_generation(eb, path->slots[i]) <=
1458 : last_snapshot)
1459 0 : continue;
1460 :
1461 0 : *level = i;
1462 0 : return 0;
1463 : }
1464 0 : free_extent_buffer(path->nodes[i]);
1465 0 : path->nodes[i] = NULL;
1466 : }
1467 : return 1;
1468 : }
1469 :
1470 : /*
1471 : * walk down reloc tree to find relocated block of lowest level
1472 : */
1473 : static noinline_for_stack
1474 0 : int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
1475 : int *level)
1476 : {
1477 0 : struct extent_buffer *eb = NULL;
1478 0 : int i;
1479 0 : u64 ptr_gen = 0;
1480 0 : u64 last_snapshot;
1481 0 : u32 nritems;
1482 :
1483 0 : last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1484 :
1485 0 : for (i = *level; i > 0; i--) {
1486 0 : eb = path->nodes[i];
1487 0 : nritems = btrfs_header_nritems(eb);
1488 0 : while (path->slots[i] < nritems) {
1489 0 : ptr_gen = btrfs_node_ptr_generation(eb, path->slots[i]);
1490 0 : if (ptr_gen > last_snapshot)
1491 : break;
1492 0 : path->slots[i]++;
1493 : }
1494 0 : if (path->slots[i] >= nritems) {
1495 0 : if (i == *level)
1496 : break;
1497 0 : *level = i + 1;
1498 0 : return 0;
1499 : }
1500 0 : if (i == 1) {
1501 0 : *level = i;
1502 0 : return 0;
1503 : }
1504 :
1505 0 : eb = btrfs_read_node_slot(eb, path->slots[i]);
1506 0 : if (IS_ERR(eb))
1507 0 : return PTR_ERR(eb);
1508 0 : BUG_ON(btrfs_header_level(eb) != i - 1);
1509 0 : path->nodes[i - 1] = eb;
1510 0 : path->slots[i - 1] = 0;
1511 : }
1512 : return 1;
1513 : }
1514 :
1515 : /*
1516 : * invalidate extent cache for file extents whose key in range of
1517 : * [min_key, max_key)
1518 : */
1519 0 : static int invalidate_extent_cache(struct btrfs_root *root,
1520 : struct btrfs_key *min_key,
1521 : struct btrfs_key *max_key)
1522 : {
1523 0 : struct btrfs_fs_info *fs_info = root->fs_info;
1524 0 : struct inode *inode = NULL;
1525 0 : u64 objectid;
1526 0 : u64 start, end;
1527 0 : u64 ino;
1528 :
1529 0 : objectid = min_key->objectid;
1530 0 : while (1) {
1531 0 : struct extent_state *cached_state = NULL;
1532 :
1533 0 : cond_resched();
1534 0 : iput(inode);
1535 :
1536 0 : if (objectid > max_key->objectid)
1537 : break;
1538 :
1539 0 : inode = find_next_inode(root, objectid);
1540 0 : if (!inode)
1541 : break;
1542 0 : ino = btrfs_ino(BTRFS_I(inode));
1543 :
1544 0 : if (ino > max_key->objectid) {
1545 0 : iput(inode);
1546 0 : break;
1547 : }
1548 :
1549 0 : objectid = ino + 1;
1550 0 : if (!S_ISREG(inode->i_mode))
1551 0 : continue;
1552 :
1553 0 : if (unlikely(min_key->objectid == ino)) {
1554 0 : if (min_key->type > BTRFS_EXTENT_DATA_KEY)
1555 0 : continue;
1556 0 : if (min_key->type < BTRFS_EXTENT_DATA_KEY)
1557 : start = 0;
1558 : else {
1559 0 : start = min_key->offset;
1560 0 : WARN_ON(!IS_ALIGNED(start, fs_info->sectorsize));
1561 : }
1562 : } else {
1563 : start = 0;
1564 : }
1565 :
1566 0 : if (unlikely(max_key->objectid == ino)) {
1567 0 : if (max_key->type < BTRFS_EXTENT_DATA_KEY)
1568 0 : continue;
1569 0 : if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
1570 : end = (u64)-1;
1571 : } else {
1572 0 : if (max_key->offset == 0)
1573 0 : continue;
1574 0 : end = max_key->offset;
1575 0 : WARN_ON(!IS_ALIGNED(end, fs_info->sectorsize));
1576 0 : end--;
1577 : }
1578 : } else {
1579 : end = (u64)-1;
1580 : }
1581 :
1582 : /* the lock_extent waits for read_folio to complete */
1583 0 : lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1584 0 : btrfs_drop_extent_map_range(BTRFS_I(inode), start, end, true);
1585 0 : unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
1586 : }
1587 0 : return 0;
1588 : }
1589 :
1590 0 : static int find_next_key(struct btrfs_path *path, int level,
1591 : struct btrfs_key *key)
1592 :
1593 : {
1594 0 : while (level < BTRFS_MAX_LEVEL) {
1595 0 : if (!path->nodes[level])
1596 : break;
1597 0 : if (path->slots[level] + 1 <
1598 : btrfs_header_nritems(path->nodes[level])) {
1599 0 : btrfs_node_key_to_cpu(path->nodes[level], key,
1600 : path->slots[level] + 1);
1601 0 : return 0;
1602 : }
1603 0 : level++;
1604 : }
1605 : return 1;
1606 : }
1607 :
1608 : /*
1609 : * Insert current subvolume into reloc_control::dirty_subvol_roots
1610 : */
1611 0 : static int insert_dirty_subvol(struct btrfs_trans_handle *trans,
1612 : struct reloc_control *rc,
1613 : struct btrfs_root *root)
1614 : {
1615 0 : struct btrfs_root *reloc_root = root->reloc_root;
1616 0 : struct btrfs_root_item *reloc_root_item;
1617 0 : int ret;
1618 :
1619 : /* @root must be a subvolume tree root with a valid reloc tree */
1620 0 : ASSERT(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
1621 0 : ASSERT(reloc_root);
1622 :
1623 0 : reloc_root_item = &reloc_root->root_item;
1624 0 : memset(&reloc_root_item->drop_progress, 0,
1625 : sizeof(reloc_root_item->drop_progress));
1626 0 : btrfs_set_root_drop_level(reloc_root_item, 0);
1627 0 : btrfs_set_root_refs(reloc_root_item, 0);
1628 0 : ret = btrfs_update_reloc_root(trans, root);
1629 0 : if (ret)
1630 : return ret;
1631 :
1632 0 : if (list_empty(&root->reloc_dirty_list)) {
1633 0 : btrfs_grab_root(root);
1634 0 : list_add_tail(&root->reloc_dirty_list, &rc->dirty_subvol_roots);
1635 : }
1636 :
1637 : return 0;
1638 : }
1639 :
1640 0 : static int clean_dirty_subvols(struct reloc_control *rc)
1641 : {
1642 0 : struct btrfs_root *root;
1643 0 : struct btrfs_root *next;
1644 0 : int ret = 0;
1645 0 : int ret2;
1646 :
1647 0 : list_for_each_entry_safe(root, next, &rc->dirty_subvol_roots,
1648 : reloc_dirty_list) {
1649 0 : if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1650 : /* Merged subvolume, cleanup its reloc root */
1651 0 : struct btrfs_root *reloc_root = root->reloc_root;
1652 :
1653 0 : list_del_init(&root->reloc_dirty_list);
1654 0 : root->reloc_root = NULL;
1655 : /*
1656 : * Need barrier to ensure clear_bit() only happens after
1657 : * root->reloc_root = NULL. Pairs with have_reloc_root.
1658 : */
1659 0 : smp_wmb();
1660 0 : clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state);
1661 0 : if (reloc_root) {
1662 : /*
1663 : * btrfs_drop_snapshot drops our ref we hold for
1664 : * ->reloc_root. If it fails however we must
1665 : * drop the ref ourselves.
1666 : */
1667 0 : ret2 = btrfs_drop_snapshot(reloc_root, 0, 1);
1668 0 : if (ret2 < 0) {
1669 0 : btrfs_put_root(reloc_root);
1670 0 : if (!ret)
1671 0 : ret = ret2;
1672 : }
1673 : }
1674 0 : btrfs_put_root(root);
1675 : } else {
1676 : /* Orphan reloc tree, just clean it up */
1677 0 : ret2 = btrfs_drop_snapshot(root, 0, 1);
1678 0 : if (ret2 < 0) {
1679 0 : btrfs_put_root(root);
1680 0 : if (!ret)
1681 0 : ret = ret2;
1682 : }
1683 : }
1684 : }
1685 0 : return ret;
1686 : }
1687 :
1688 : /*
1689 : * merge the relocated tree blocks in reloc tree with corresponding
1690 : * fs tree.
1691 : */
1692 0 : static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1693 : struct btrfs_root *root)
1694 : {
1695 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1696 0 : struct btrfs_key key;
1697 0 : struct btrfs_key next_key;
1698 0 : struct btrfs_trans_handle *trans = NULL;
1699 0 : struct btrfs_root *reloc_root;
1700 0 : struct btrfs_root_item *root_item;
1701 0 : struct btrfs_path *path;
1702 0 : struct extent_buffer *leaf;
1703 0 : int reserve_level;
1704 0 : int level;
1705 0 : int max_level;
1706 0 : int replaced = 0;
1707 0 : int ret = 0;
1708 0 : u32 min_reserved;
1709 :
1710 0 : path = btrfs_alloc_path();
1711 0 : if (!path)
1712 : return -ENOMEM;
1713 0 : path->reada = READA_FORWARD;
1714 :
1715 0 : reloc_root = root->reloc_root;
1716 0 : root_item = &reloc_root->root_item;
1717 :
1718 0 : if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
1719 0 : level = btrfs_root_level(root_item);
1720 0 : atomic_inc(&reloc_root->node->refs);
1721 0 : path->nodes[level] = reloc_root->node;
1722 0 : path->slots[level] = 0;
1723 : } else {
1724 0 : btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
1725 :
1726 0 : level = btrfs_root_drop_level(root_item);
1727 0 : BUG_ON(level == 0);
1728 0 : path->lowest_level = level;
1729 0 : ret = btrfs_search_slot(NULL, reloc_root, &key, path, 0, 0);
1730 0 : path->lowest_level = 0;
1731 0 : if (ret < 0) {
1732 0 : btrfs_free_path(path);
1733 0 : return ret;
1734 : }
1735 :
1736 0 : btrfs_node_key_to_cpu(path->nodes[level], &next_key,
1737 : path->slots[level]);
1738 0 : WARN_ON(memcmp(&key, &next_key, sizeof(key)));
1739 :
1740 0 : btrfs_unlock_up_safe(path, 0);
1741 : }
1742 :
1743 : /*
1744 : * In merge_reloc_root(), we modify the upper level pointer to swap the
1745 : * tree blocks between reloc tree and subvolume tree. Thus for tree
1746 : * block COW, we COW at most from level 1 to root level for each tree.
1747 : *
1748 : * Thus the needed metadata size is at most root_level * nodesize,
1749 : * and * 2 since we have two trees to COW.
1750 : */
1751 0 : reserve_level = max_t(int, 1, btrfs_root_level(root_item));
1752 0 : min_reserved = fs_info->nodesize * reserve_level * 2;
1753 0 : memset(&next_key, 0, sizeof(next_key));
1754 :
1755 0 : while (1) {
1756 0 : ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
1757 : min_reserved,
1758 : BTRFS_RESERVE_FLUSH_LIMIT);
1759 0 : if (ret)
1760 0 : goto out;
1761 0 : trans = btrfs_start_transaction(root, 0);
1762 0 : if (IS_ERR(trans)) {
1763 0 : ret = PTR_ERR(trans);
1764 0 : trans = NULL;
1765 0 : goto out;
1766 : }
1767 :
1768 : /*
1769 : * At this point we no longer have a reloc_control, so we can't
1770 : * depend on btrfs_init_reloc_root to update our last_trans.
1771 : *
1772 : * But that's ok, we started the trans handle on our
1773 : * corresponding fs_root, which means it's been added to the
1774 : * dirty list. At commit time we'll still call
1775 : * btrfs_update_reloc_root() and update our root item
1776 : * appropriately.
1777 : */
1778 0 : reloc_root->last_trans = trans->transid;
1779 0 : trans->block_rsv = rc->block_rsv;
1780 :
1781 0 : replaced = 0;
1782 0 : max_level = level;
1783 :
1784 0 : ret = walk_down_reloc_tree(reloc_root, path, &level);
1785 0 : if (ret < 0)
1786 0 : goto out;
1787 0 : if (ret > 0)
1788 : break;
1789 :
1790 0 : if (!find_next_key(path, level, &key) &&
1791 0 : btrfs_comp_cpu_keys(&next_key, &key) >= 0) {
1792 : ret = 0;
1793 : } else {
1794 0 : ret = replace_path(trans, rc, root, reloc_root, path,
1795 : &next_key, level, max_level);
1796 : }
1797 0 : if (ret < 0)
1798 0 : goto out;
1799 0 : if (ret > 0) {
1800 0 : level = ret;
1801 0 : btrfs_node_key_to_cpu(path->nodes[level], &key,
1802 : path->slots[level]);
1803 0 : replaced = 1;
1804 : }
1805 :
1806 0 : ret = walk_up_reloc_tree(reloc_root, path, &level);
1807 0 : if (ret > 0)
1808 : break;
1809 :
1810 0 : BUG_ON(level == 0);
1811 : /*
1812 : * save the merging progress in the drop_progress.
1813 : * this is OK since root refs == 1 in this case.
1814 : */
1815 0 : btrfs_node_key(path->nodes[level], &root_item->drop_progress,
1816 : path->slots[level]);
1817 0 : btrfs_set_root_drop_level(root_item, level);
1818 :
1819 0 : btrfs_end_transaction_throttle(trans);
1820 0 : trans = NULL;
1821 :
1822 0 : btrfs_btree_balance_dirty(fs_info);
1823 :
1824 0 : if (replaced && rc->stage == UPDATE_DATA_PTRS)
1825 0 : invalidate_extent_cache(root, &key, &next_key);
1826 : }
1827 :
1828 : /*
1829 : * handle the case only one block in the fs tree need to be
1830 : * relocated and the block is tree root.
1831 : */
1832 0 : leaf = btrfs_lock_root_node(root);
1833 0 : ret = btrfs_cow_block(trans, root, leaf, NULL, 0, &leaf,
1834 : BTRFS_NESTING_COW);
1835 0 : btrfs_tree_unlock(leaf);
1836 0 : free_extent_buffer(leaf);
1837 0 : out:
1838 0 : btrfs_free_path(path);
1839 :
1840 0 : if (ret == 0) {
1841 0 : ret = insert_dirty_subvol(trans, rc, root);
1842 0 : if (ret)
1843 0 : btrfs_abort_transaction(trans, ret);
1844 : }
1845 :
1846 0 : if (trans)
1847 0 : btrfs_end_transaction_throttle(trans);
1848 :
1849 0 : btrfs_btree_balance_dirty(fs_info);
1850 :
1851 0 : if (replaced && rc->stage == UPDATE_DATA_PTRS)
1852 0 : invalidate_extent_cache(root, &key, &next_key);
1853 :
1854 : return ret;
1855 : }
1856 :
1857 : static noinline_for_stack
1858 0 : int prepare_to_merge(struct reloc_control *rc, int err)
1859 : {
1860 0 : struct btrfs_root *root = rc->extent_root;
1861 0 : struct btrfs_fs_info *fs_info = root->fs_info;
1862 0 : struct btrfs_root *reloc_root;
1863 0 : struct btrfs_trans_handle *trans;
1864 0 : LIST_HEAD(reloc_roots);
1865 0 : u64 num_bytes = 0;
1866 0 : int ret;
1867 :
1868 0 : mutex_lock(&fs_info->reloc_mutex);
1869 0 : rc->merging_rsv_size += fs_info->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
1870 0 : rc->merging_rsv_size += rc->nodes_relocated * 2;
1871 0 : mutex_unlock(&fs_info->reloc_mutex);
1872 :
1873 0 : again:
1874 0 : if (!err) {
1875 0 : num_bytes = rc->merging_rsv_size;
1876 0 : ret = btrfs_block_rsv_add(fs_info, rc->block_rsv, num_bytes,
1877 : BTRFS_RESERVE_FLUSH_ALL);
1878 0 : if (ret)
1879 0 : err = ret;
1880 : }
1881 :
1882 0 : trans = btrfs_join_transaction(rc->extent_root);
1883 0 : if (IS_ERR(trans)) {
1884 0 : if (!err)
1885 0 : btrfs_block_rsv_release(fs_info, rc->block_rsv,
1886 : num_bytes, NULL);
1887 0 : return PTR_ERR(trans);
1888 : }
1889 :
1890 0 : if (!err) {
1891 0 : if (num_bytes != rc->merging_rsv_size) {
1892 0 : btrfs_end_transaction(trans);
1893 0 : btrfs_block_rsv_release(fs_info, rc->block_rsv,
1894 : num_bytes, NULL);
1895 0 : goto again;
1896 : }
1897 : }
1898 :
1899 0 : rc->merge_reloc_tree = 1;
1900 :
1901 0 : while (!list_empty(&rc->reloc_roots)) {
1902 0 : reloc_root = list_entry(rc->reloc_roots.next,
1903 : struct btrfs_root, root_list);
1904 0 : list_del_init(&reloc_root->root_list);
1905 :
1906 0 : root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1907 : false);
1908 0 : if (IS_ERR(root)) {
1909 : /*
1910 : * Even if we have an error we need this reloc root
1911 : * back on our list so we can clean up properly.
1912 : */
1913 0 : list_add(&reloc_root->root_list, &reloc_roots);
1914 0 : btrfs_abort_transaction(trans, (int)PTR_ERR(root));
1915 0 : if (!err)
1916 0 : err = PTR_ERR(root);
1917 : break;
1918 : }
1919 0 : ASSERT(root->reloc_root == reloc_root);
1920 :
1921 : /*
1922 : * set reference count to 1, so btrfs_recover_relocation
1923 : * knows it should resumes merging
1924 : */
1925 0 : if (!err)
1926 0 : btrfs_set_root_refs(&reloc_root->root_item, 1);
1927 0 : ret = btrfs_update_reloc_root(trans, root);
1928 :
1929 : /*
1930 : * Even if we have an error we need this reloc root back on our
1931 : * list so we can clean up properly.
1932 : */
1933 0 : list_add(&reloc_root->root_list, &reloc_roots);
1934 0 : btrfs_put_root(root);
1935 :
1936 0 : if (ret) {
1937 0 : btrfs_abort_transaction(trans, ret);
1938 0 : if (!err)
1939 0 : err = ret;
1940 : break;
1941 : }
1942 : }
1943 :
1944 0 : list_splice(&reloc_roots, &rc->reloc_roots);
1945 :
1946 0 : if (!err)
1947 0 : err = btrfs_commit_transaction(trans);
1948 : else
1949 0 : btrfs_end_transaction(trans);
1950 : return err;
1951 : }
1952 :
1953 : static noinline_for_stack
1954 0 : void free_reloc_roots(struct list_head *list)
1955 : {
1956 0 : struct btrfs_root *reloc_root, *tmp;
1957 :
1958 0 : list_for_each_entry_safe(reloc_root, tmp, list, root_list)
1959 0 : __del_reloc_root(reloc_root);
1960 0 : }
1961 :
1962 : static noinline_for_stack
1963 0 : void merge_reloc_roots(struct reloc_control *rc)
1964 : {
1965 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
1966 0 : struct btrfs_root *root;
1967 0 : struct btrfs_root *reloc_root;
1968 0 : LIST_HEAD(reloc_roots);
1969 0 : int found = 0;
1970 0 : int ret = 0;
1971 0 : again:
1972 0 : root = rc->extent_root;
1973 :
1974 : /*
1975 : * this serializes us with btrfs_record_root_in_transaction,
1976 : * we have to make sure nobody is in the middle of
1977 : * adding their roots to the list while we are
1978 : * doing this splice
1979 : */
1980 0 : mutex_lock(&fs_info->reloc_mutex);
1981 0 : list_splice_init(&rc->reloc_roots, &reloc_roots);
1982 0 : mutex_unlock(&fs_info->reloc_mutex);
1983 :
1984 0 : while (!list_empty(&reloc_roots)) {
1985 0 : found = 1;
1986 0 : reloc_root = list_entry(reloc_roots.next,
1987 : struct btrfs_root, root_list);
1988 :
1989 0 : root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
1990 : false);
1991 0 : if (btrfs_root_refs(&reloc_root->root_item) > 0) {
1992 0 : if (IS_ERR(root)) {
1993 : /*
1994 : * For recovery we read the fs roots on mount,
1995 : * and if we didn't find the root then we marked
1996 : * the reloc root as a garbage root. For normal
1997 : * relocation obviously the root should exist in
1998 : * memory. However there's no reason we can't
1999 : * handle the error properly here just in case.
2000 : */
2001 0 : ASSERT(0);
2002 0 : ret = PTR_ERR(root);
2003 0 : goto out;
2004 : }
2005 0 : if (root->reloc_root != reloc_root) {
2006 : /*
2007 : * This is actually impossible without something
2008 : * going really wrong (like weird race condition
2009 : * or cosmic rays).
2010 : */
2011 0 : ASSERT(0);
2012 0 : ret = -EINVAL;
2013 0 : goto out;
2014 : }
2015 0 : ret = merge_reloc_root(rc, root);
2016 0 : btrfs_put_root(root);
2017 0 : if (ret) {
2018 0 : if (list_empty(&reloc_root->root_list))
2019 0 : list_add_tail(&reloc_root->root_list,
2020 : &reloc_roots);
2021 0 : goto out;
2022 : }
2023 : } else {
2024 0 : if (!IS_ERR(root)) {
2025 0 : if (root->reloc_root == reloc_root) {
2026 0 : root->reloc_root = NULL;
2027 0 : btrfs_put_root(reloc_root);
2028 : }
2029 0 : clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
2030 0 : &root->state);
2031 0 : btrfs_put_root(root);
2032 : }
2033 :
2034 0 : list_del_init(&reloc_root->root_list);
2035 : /* Don't forget to queue this reloc root for cleanup */
2036 0 : list_add_tail(&reloc_root->reloc_dirty_list,
2037 : &rc->dirty_subvol_roots);
2038 : }
2039 : }
2040 :
2041 0 : if (found) {
2042 0 : found = 0;
2043 0 : goto again;
2044 : }
2045 0 : out:
2046 0 : if (ret) {
2047 0 : btrfs_handle_fs_error(fs_info, ret, NULL);
2048 0 : free_reloc_roots(&reloc_roots);
2049 :
2050 : /* new reloc root may be added */
2051 0 : mutex_lock(&fs_info->reloc_mutex);
2052 0 : list_splice_init(&rc->reloc_roots, &reloc_roots);
2053 0 : mutex_unlock(&fs_info->reloc_mutex);
2054 0 : free_reloc_roots(&reloc_roots);
2055 : }
2056 :
2057 : /*
2058 : * We used to have
2059 : *
2060 : * BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
2061 : *
2062 : * here, but it's wrong. If we fail to start the transaction in
2063 : * prepare_to_merge() we will have only 0 ref reloc roots, none of which
2064 : * have actually been removed from the reloc_root_tree rb tree. This is
2065 : * fine because we're bailing here, and we hold a reference on the root
2066 : * for the list that holds it, so these roots will be cleaned up when we
2067 : * do the reloc_dirty_list afterwards. Meanwhile the root->reloc_root
2068 : * will be cleaned up on unmount.
2069 : *
2070 : * The remaining nodes will be cleaned up by free_reloc_control.
2071 : */
2072 0 : }
2073 :
2074 0 : static void free_block_list(struct rb_root *blocks)
2075 : {
2076 0 : struct tree_block *block;
2077 0 : struct rb_node *rb_node;
2078 0 : while ((rb_node = rb_first(blocks))) {
2079 0 : block = rb_entry(rb_node, struct tree_block, rb_node);
2080 0 : rb_erase(rb_node, blocks);
2081 0 : kfree(block);
2082 : }
2083 0 : }
2084 :
2085 0 : static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
2086 : struct btrfs_root *reloc_root)
2087 : {
2088 0 : struct btrfs_fs_info *fs_info = reloc_root->fs_info;
2089 0 : struct btrfs_root *root;
2090 0 : int ret;
2091 :
2092 0 : if (reloc_root->last_trans == trans->transid)
2093 : return 0;
2094 :
2095 0 : root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
2096 :
2097 : /*
2098 : * This should succeed, since we can't have a reloc root without having
2099 : * already looked up the actual root and created the reloc root for this
2100 : * root.
2101 : *
2102 : * However if there's some sort of corruption where we have a ref to a
2103 : * reloc root without a corresponding root this could return ENOENT.
2104 : */
2105 0 : if (IS_ERR(root)) {
2106 0 : ASSERT(0);
2107 0 : return PTR_ERR(root);
2108 : }
2109 0 : if (root->reloc_root != reloc_root) {
2110 0 : ASSERT(0);
2111 0 : btrfs_err(fs_info,
2112 : "root %llu has two reloc roots associated with it",
2113 : reloc_root->root_key.offset);
2114 0 : btrfs_put_root(root);
2115 0 : return -EUCLEAN;
2116 : }
2117 0 : ret = btrfs_record_root_in_trans(trans, root);
2118 0 : btrfs_put_root(root);
2119 :
2120 0 : return ret;
2121 : }
2122 :
2123 : static noinline_for_stack
2124 0 : struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
2125 : struct reloc_control *rc,
2126 : struct btrfs_backref_node *node,
2127 : struct btrfs_backref_edge *edges[])
2128 : {
2129 0 : struct btrfs_backref_node *next;
2130 0 : struct btrfs_root *root;
2131 0 : int index = 0;
2132 0 : int ret;
2133 :
2134 0 : next = node;
2135 0 : while (1) {
2136 0 : cond_resched();
2137 0 : next = walk_up_backref(next, edges, &index);
2138 0 : root = next->root;
2139 :
2140 : /*
2141 : * If there is no root, then our references for this block are
2142 : * incomplete, as we should be able to walk all the way up to a
2143 : * block that is owned by a root.
2144 : *
2145 : * This path is only for SHAREABLE roots, so if we come upon a
2146 : * non-SHAREABLE root then we have backrefs that resolve
2147 : * improperly.
2148 : *
2149 : * Both of these cases indicate file system corruption, or a bug
2150 : * in the backref walking code.
2151 : */
2152 0 : if (!root) {
2153 0 : ASSERT(0);
2154 0 : btrfs_err(trans->fs_info,
2155 : "bytenr %llu doesn't have a backref path ending in a root",
2156 : node->bytenr);
2157 0 : return ERR_PTR(-EUCLEAN);
2158 : }
2159 0 : if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2160 0 : ASSERT(0);
2161 0 : btrfs_err(trans->fs_info,
2162 : "bytenr %llu has multiple refs with one ending in a non-shareable root",
2163 : node->bytenr);
2164 0 : return ERR_PTR(-EUCLEAN);
2165 : }
2166 :
2167 0 : if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
2168 0 : ret = record_reloc_root_in_trans(trans, root);
2169 0 : if (ret)
2170 0 : return ERR_PTR(ret);
2171 : break;
2172 : }
2173 :
2174 0 : ret = btrfs_record_root_in_trans(trans, root);
2175 0 : if (ret)
2176 0 : return ERR_PTR(ret);
2177 0 : root = root->reloc_root;
2178 :
2179 : /*
2180 : * We could have raced with another thread which failed, so
2181 : * root->reloc_root may not be set, return ENOENT in this case.
2182 : */
2183 0 : if (!root)
2184 : return ERR_PTR(-ENOENT);
2185 :
2186 0 : if (next->new_bytenr != root->node->start) {
2187 : /*
2188 : * We just created the reloc root, so we shouldn't have
2189 : * ->new_bytenr set and this shouldn't be in the changed
2190 : * list. If it is then we have multiple roots pointing
2191 : * at the same bytenr which indicates corruption, or
2192 : * we've made a mistake in the backref walking code.
2193 : */
2194 0 : ASSERT(next->new_bytenr == 0);
2195 0 : ASSERT(list_empty(&next->list));
2196 0 : if (next->new_bytenr || !list_empty(&next->list)) {
2197 0 : btrfs_err(trans->fs_info,
2198 : "bytenr %llu possibly has multiple roots pointing at the same bytenr %llu",
2199 : node->bytenr, next->bytenr);
2200 0 : return ERR_PTR(-EUCLEAN);
2201 : }
2202 :
2203 0 : next->new_bytenr = root->node->start;
2204 0 : btrfs_put_root(next->root);
2205 0 : next->root = btrfs_grab_root(root);
2206 0 : ASSERT(next->root);
2207 0 : list_add_tail(&next->list,
2208 : &rc->backref_cache.changed);
2209 0 : mark_block_processed(rc, next);
2210 0 : break;
2211 : }
2212 :
2213 0 : WARN_ON(1);
2214 0 : root = NULL;
2215 0 : next = walk_down_backref(edges, &index);
2216 0 : if (!next || next->level <= node->level)
2217 : break;
2218 : }
2219 0 : if (!root) {
2220 : /*
2221 : * This can happen if there's fs corruption or if there's a bug
2222 : * in the backref lookup code.
2223 : */
2224 : ASSERT(0);
2225 : return ERR_PTR(-ENOENT);
2226 : }
2227 :
2228 : next = node;
2229 : /* setup backref node path for btrfs_reloc_cow_block */
2230 0 : while (1) {
2231 0 : rc->backref_cache.path[next->level] = next;
2232 0 : if (--index < 0)
2233 : break;
2234 0 : next = edges[index]->node[UPPER];
2235 : }
2236 : return root;
2237 : }
2238 :
2239 : /*
2240 : * Select a tree root for relocation.
2241 : *
2242 : * Return NULL if the block is not shareable. We should use do_relocation() in
2243 : * this case.
2244 : *
2245 : * Return a tree root pointer if the block is shareable.
2246 : * Return -ENOENT if the block is root of reloc tree.
2247 : */
2248 : static noinline_for_stack
2249 0 : struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
2250 : {
2251 0 : struct btrfs_backref_node *next;
2252 0 : struct btrfs_root *root;
2253 0 : struct btrfs_root *fs_root = NULL;
2254 0 : struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2255 0 : int index = 0;
2256 :
2257 0 : next = node;
2258 0 : while (1) {
2259 0 : cond_resched();
2260 0 : next = walk_up_backref(next, edges, &index);
2261 0 : root = next->root;
2262 :
2263 : /*
2264 : * This can occur if we have incomplete extent refs leading all
2265 : * the way up a particular path, in this case return -EUCLEAN.
2266 : */
2267 0 : if (!root)
2268 : return ERR_PTR(-EUCLEAN);
2269 :
2270 : /* No other choice for non-shareable tree */
2271 0 : if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2272 0 : return root;
2273 :
2274 0 : if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
2275 0 : fs_root = root;
2276 :
2277 0 : if (next != node)
2278 : return NULL;
2279 :
2280 0 : next = walk_down_backref(edges, &index);
2281 0 : if (!next || next->level <= node->level)
2282 : break;
2283 : }
2284 :
2285 0 : if (!fs_root)
2286 0 : return ERR_PTR(-ENOENT);
2287 : return fs_root;
2288 : }
2289 :
2290 : static noinline_for_stack
2291 0 : u64 calcu_metadata_size(struct reloc_control *rc,
2292 : struct btrfs_backref_node *node, int reserve)
2293 : {
2294 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2295 0 : struct btrfs_backref_node *next = node;
2296 0 : struct btrfs_backref_edge *edge;
2297 0 : struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2298 0 : u64 num_bytes = 0;
2299 0 : int index = 0;
2300 :
2301 0 : BUG_ON(reserve && node->processed);
2302 :
2303 0 : while (next) {
2304 0 : cond_resched();
2305 0 : while (1) {
2306 0 : if (next->processed && (reserve || next != node))
2307 : break;
2308 :
2309 0 : num_bytes += fs_info->nodesize;
2310 :
2311 0 : if (list_empty(&next->upper))
2312 : break;
2313 :
2314 0 : edge = list_entry(next->upper.next,
2315 : struct btrfs_backref_edge, list[LOWER]);
2316 0 : edges[index++] = edge;
2317 0 : next = edge->node[UPPER];
2318 : }
2319 0 : next = walk_down_backref(edges, &index);
2320 : }
2321 0 : return num_bytes;
2322 : }
2323 :
2324 0 : static int reserve_metadata_space(struct btrfs_trans_handle *trans,
2325 : struct reloc_control *rc,
2326 : struct btrfs_backref_node *node)
2327 : {
2328 0 : struct btrfs_root *root = rc->extent_root;
2329 0 : struct btrfs_fs_info *fs_info = root->fs_info;
2330 0 : u64 num_bytes;
2331 0 : int ret;
2332 0 : u64 tmp;
2333 :
2334 0 : num_bytes = calcu_metadata_size(rc, node, 1) * 2;
2335 :
2336 0 : trans->block_rsv = rc->block_rsv;
2337 0 : rc->reserved_bytes += num_bytes;
2338 :
2339 : /*
2340 : * We are under a transaction here so we can only do limited flushing.
2341 : * If we get an enospc just kick back -EAGAIN so we know to drop the
2342 : * transaction and try to refill when we can flush all the things.
2343 : */
2344 0 : ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv, num_bytes,
2345 : BTRFS_RESERVE_FLUSH_LIMIT);
2346 0 : if (ret) {
2347 0 : tmp = fs_info->nodesize * RELOCATION_RESERVED_NODES;
2348 0 : while (tmp <= rc->reserved_bytes)
2349 0 : tmp <<= 1;
2350 : /*
2351 : * only one thread can access block_rsv at this point,
2352 : * so we don't need hold lock to protect block_rsv.
2353 : * we expand more reservation size here to allow enough
2354 : * space for relocation and we will return earlier in
2355 : * enospc case.
2356 : */
2357 0 : rc->block_rsv->size = tmp + fs_info->nodesize *
2358 : RELOCATION_RESERVED_NODES;
2359 0 : return -EAGAIN;
2360 : }
2361 :
2362 : return 0;
2363 : }
2364 :
2365 : /*
2366 : * relocate a block tree, and then update pointers in upper level
2367 : * blocks that reference the block to point to the new location.
2368 : *
2369 : * if called by link_to_upper, the block has already been relocated.
2370 : * in that case this function just updates pointers.
2371 : */
2372 0 : static int do_relocation(struct btrfs_trans_handle *trans,
2373 : struct reloc_control *rc,
2374 : struct btrfs_backref_node *node,
2375 : struct btrfs_key *key,
2376 : struct btrfs_path *path, int lowest)
2377 : {
2378 0 : struct btrfs_backref_node *upper;
2379 0 : struct btrfs_backref_edge *edge;
2380 0 : struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2381 0 : struct btrfs_root *root;
2382 0 : struct extent_buffer *eb;
2383 0 : u32 blocksize;
2384 0 : u64 bytenr;
2385 0 : int slot;
2386 0 : int ret = 0;
2387 :
2388 : /*
2389 : * If we are lowest then this is the first time we're processing this
2390 : * block, and thus shouldn't have an eb associated with it yet.
2391 : */
2392 0 : ASSERT(!lowest || !node->eb);
2393 :
2394 0 : path->lowest_level = node->level + 1;
2395 0 : rc->backref_cache.path[node->level] = node;
2396 0 : list_for_each_entry(edge, &node->upper, list[LOWER]) {
2397 0 : struct btrfs_ref ref = { 0 };
2398 :
2399 0 : cond_resched();
2400 :
2401 0 : upper = edge->node[UPPER];
2402 0 : root = select_reloc_root(trans, rc, upper, edges);
2403 0 : if (IS_ERR(root)) {
2404 0 : ret = PTR_ERR(root);
2405 0 : goto next;
2406 : }
2407 :
2408 0 : if (upper->eb && !upper->locked) {
2409 0 : if (!lowest) {
2410 0 : ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2411 0 : if (ret < 0)
2412 0 : goto next;
2413 0 : BUG_ON(ret);
2414 0 : bytenr = btrfs_node_blockptr(upper->eb, slot);
2415 0 : if (node->eb->start == bytenr)
2416 0 : goto next;
2417 : }
2418 0 : btrfs_backref_drop_node_buffer(upper);
2419 : }
2420 :
2421 0 : if (!upper->eb) {
2422 0 : ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2423 0 : if (ret) {
2424 0 : if (ret > 0)
2425 0 : ret = -ENOENT;
2426 :
2427 0 : btrfs_release_path(path);
2428 0 : break;
2429 : }
2430 :
2431 0 : if (!upper->eb) {
2432 0 : upper->eb = path->nodes[upper->level];
2433 0 : path->nodes[upper->level] = NULL;
2434 : } else {
2435 0 : BUG_ON(upper->eb != path->nodes[upper->level]);
2436 : }
2437 :
2438 0 : upper->locked = 1;
2439 0 : path->locks[upper->level] = 0;
2440 :
2441 0 : slot = path->slots[upper->level];
2442 0 : btrfs_release_path(path);
2443 : } else {
2444 0 : ret = btrfs_bin_search(upper->eb, 0, key, &slot);
2445 0 : if (ret < 0)
2446 0 : goto next;
2447 0 : BUG_ON(ret);
2448 : }
2449 :
2450 0 : bytenr = btrfs_node_blockptr(upper->eb, slot);
2451 0 : if (lowest) {
2452 0 : if (bytenr != node->bytenr) {
2453 0 : btrfs_err(root->fs_info,
2454 : "lowest leaf/node mismatch: bytenr %llu node->bytenr %llu slot %d upper %llu",
2455 : bytenr, node->bytenr, slot,
2456 : upper->eb->start);
2457 0 : ret = -EIO;
2458 0 : goto next;
2459 : }
2460 : } else {
2461 0 : if (node->eb->start == bytenr)
2462 0 : goto next;
2463 : }
2464 :
2465 0 : blocksize = root->fs_info->nodesize;
2466 0 : eb = btrfs_read_node_slot(upper->eb, slot);
2467 0 : if (IS_ERR(eb)) {
2468 0 : ret = PTR_ERR(eb);
2469 0 : goto next;
2470 : }
2471 0 : btrfs_tree_lock(eb);
2472 :
2473 0 : if (!node->eb) {
2474 0 : ret = btrfs_cow_block(trans, root, eb, upper->eb,
2475 : slot, &eb, BTRFS_NESTING_COW);
2476 0 : btrfs_tree_unlock(eb);
2477 0 : free_extent_buffer(eb);
2478 0 : if (ret < 0)
2479 0 : goto next;
2480 : /*
2481 : * We've just COWed this block, it should have updated
2482 : * the correct backref node entry.
2483 : */
2484 : ASSERT(node->eb == eb);
2485 : } else {
2486 0 : btrfs_set_node_blockptr(upper->eb, slot,
2487 : node->eb->start);
2488 0 : btrfs_set_node_ptr_generation(upper->eb, slot,
2489 : trans->transid);
2490 0 : btrfs_mark_buffer_dirty(upper->eb);
2491 :
2492 0 : btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
2493 0 : node->eb->start, blocksize,
2494 0 : upper->eb->start);
2495 0 : btrfs_init_tree_ref(&ref, node->level,
2496 : btrfs_header_owner(upper->eb),
2497 : root->root_key.objectid, false);
2498 0 : ret = btrfs_inc_extent_ref(trans, &ref);
2499 0 : if (!ret)
2500 0 : ret = btrfs_drop_subtree(trans, root, eb,
2501 : upper->eb);
2502 0 : if (ret)
2503 0 : btrfs_abort_transaction(trans, ret);
2504 : }
2505 0 : next:
2506 0 : if (!upper->pending)
2507 0 : btrfs_backref_drop_node_buffer(upper);
2508 : else
2509 0 : btrfs_backref_unlock_node_buffer(upper);
2510 0 : if (ret)
2511 : break;
2512 : }
2513 :
2514 0 : if (!ret && node->pending) {
2515 0 : btrfs_backref_drop_node_buffer(node);
2516 0 : list_move_tail(&node->list, &rc->backref_cache.changed);
2517 0 : node->pending = 0;
2518 : }
2519 :
2520 0 : path->lowest_level = 0;
2521 :
2522 : /*
2523 : * We should have allocated all of our space in the block rsv and thus
2524 : * shouldn't ENOSPC.
2525 : */
2526 0 : ASSERT(ret != -ENOSPC);
2527 0 : return ret;
2528 : }
2529 :
2530 0 : static int link_to_upper(struct btrfs_trans_handle *trans,
2531 : struct reloc_control *rc,
2532 : struct btrfs_backref_node *node,
2533 : struct btrfs_path *path)
2534 : {
2535 0 : struct btrfs_key key;
2536 :
2537 0 : btrfs_node_key_to_cpu(node->eb, &key, 0);
2538 0 : return do_relocation(trans, rc, node, &key, path, 0);
2539 : }
2540 :
2541 0 : static int finish_pending_nodes(struct btrfs_trans_handle *trans,
2542 : struct reloc_control *rc,
2543 : struct btrfs_path *path, int err)
2544 : {
2545 0 : LIST_HEAD(list);
2546 0 : struct btrfs_backref_cache *cache = &rc->backref_cache;
2547 0 : struct btrfs_backref_node *node;
2548 0 : int level;
2549 0 : int ret;
2550 :
2551 0 : for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
2552 0 : while (!list_empty(&cache->pending[level])) {
2553 0 : node = list_entry(cache->pending[level].next,
2554 : struct btrfs_backref_node, list);
2555 0 : list_move_tail(&node->list, &list);
2556 0 : BUG_ON(!node->pending);
2557 :
2558 0 : if (!err) {
2559 0 : ret = link_to_upper(trans, rc, node, path);
2560 0 : if (ret < 0)
2561 : err = ret;
2562 : }
2563 : }
2564 0 : list_splice_init(&list, &cache->pending[level]);
2565 : }
2566 0 : return err;
2567 : }
2568 :
2569 : /*
2570 : * mark a block and all blocks directly/indirectly reference the block
2571 : * as processed.
2572 : */
2573 0 : static void update_processed_blocks(struct reloc_control *rc,
2574 : struct btrfs_backref_node *node)
2575 : {
2576 0 : struct btrfs_backref_node *next = node;
2577 0 : struct btrfs_backref_edge *edge;
2578 0 : struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
2579 0 : int index = 0;
2580 :
2581 0 : while (next) {
2582 0 : cond_resched();
2583 0 : while (1) {
2584 0 : if (next->processed)
2585 : break;
2586 :
2587 0 : mark_block_processed(rc, next);
2588 :
2589 0 : if (list_empty(&next->upper))
2590 : break;
2591 :
2592 0 : edge = list_entry(next->upper.next,
2593 : struct btrfs_backref_edge, list[LOWER]);
2594 0 : edges[index++] = edge;
2595 0 : next = edge->node[UPPER];
2596 : }
2597 0 : next = walk_down_backref(edges, &index);
2598 : }
2599 0 : }
2600 :
2601 0 : static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
2602 : {
2603 0 : u32 blocksize = rc->extent_root->fs_info->nodesize;
2604 :
2605 0 : if (test_range_bit(&rc->processed_blocks, bytenr,
2606 0 : bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
2607 0 : return 1;
2608 : return 0;
2609 : }
2610 :
2611 0 : static int get_tree_block_key(struct btrfs_fs_info *fs_info,
2612 : struct tree_block *block)
2613 : {
2614 0 : struct btrfs_tree_parent_check check = {
2615 0 : .level = block->level,
2616 0 : .owner_root = block->owner,
2617 0 : .transid = block->key.offset
2618 : };
2619 0 : struct extent_buffer *eb;
2620 :
2621 0 : eb = read_tree_block(fs_info, block->bytenr, &check);
2622 0 : if (IS_ERR(eb))
2623 0 : return PTR_ERR(eb);
2624 0 : if (!extent_buffer_uptodate(eb)) {
2625 0 : free_extent_buffer(eb);
2626 0 : return -EIO;
2627 : }
2628 0 : if (block->level == 0)
2629 0 : btrfs_item_key_to_cpu(eb, &block->key, 0);
2630 : else
2631 0 : btrfs_node_key_to_cpu(eb, &block->key, 0);
2632 0 : free_extent_buffer(eb);
2633 0 : block->key_ready = 1;
2634 0 : return 0;
2635 : }
2636 :
2637 : /*
2638 : * helper function to relocate a tree block
2639 : */
2640 0 : static int relocate_tree_block(struct btrfs_trans_handle *trans,
2641 : struct reloc_control *rc,
2642 : struct btrfs_backref_node *node,
2643 : struct btrfs_key *key,
2644 : struct btrfs_path *path)
2645 : {
2646 0 : struct btrfs_root *root;
2647 0 : int ret = 0;
2648 :
2649 0 : if (!node)
2650 : return 0;
2651 :
2652 : /*
2653 : * If we fail here we want to drop our backref_node because we are going
2654 : * to start over and regenerate the tree for it.
2655 : */
2656 0 : ret = reserve_metadata_space(trans, rc, node);
2657 0 : if (ret)
2658 0 : goto out;
2659 :
2660 0 : BUG_ON(node->processed);
2661 0 : root = select_one_root(node);
2662 0 : if (IS_ERR(root)) {
2663 0 : ret = PTR_ERR(root);
2664 :
2665 : /* See explanation in select_one_root for the -EUCLEAN case. */
2666 0 : ASSERT(ret == -ENOENT);
2667 0 : if (ret == -ENOENT) {
2668 0 : ret = 0;
2669 0 : update_processed_blocks(rc, node);
2670 : }
2671 0 : goto out;
2672 : }
2673 :
2674 0 : if (root) {
2675 0 : if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
2676 : /*
2677 : * This block was the root block of a root, and this is
2678 : * the first time we're processing the block and thus it
2679 : * should not have had the ->new_bytenr modified and
2680 : * should have not been included on the changed list.
2681 : *
2682 : * However in the case of corruption we could have
2683 : * multiple refs pointing to the same block improperly,
2684 : * and thus we would trip over these checks. ASSERT()
2685 : * for the developer case, because it could indicate a
2686 : * bug in the backref code, however error out for a
2687 : * normal user in the case of corruption.
2688 : */
2689 0 : ASSERT(node->new_bytenr == 0);
2690 0 : ASSERT(list_empty(&node->list));
2691 0 : if (node->new_bytenr || !list_empty(&node->list)) {
2692 0 : btrfs_err(root->fs_info,
2693 : "bytenr %llu has improper references to it",
2694 : node->bytenr);
2695 0 : ret = -EUCLEAN;
2696 0 : goto out;
2697 : }
2698 0 : ret = btrfs_record_root_in_trans(trans, root);
2699 0 : if (ret)
2700 0 : goto out;
2701 : /*
2702 : * Another thread could have failed, need to check if we
2703 : * have reloc_root actually set.
2704 : */
2705 0 : if (!root->reloc_root) {
2706 0 : ret = -ENOENT;
2707 0 : goto out;
2708 : }
2709 0 : root = root->reloc_root;
2710 0 : node->new_bytenr = root->node->start;
2711 0 : btrfs_put_root(node->root);
2712 0 : node->root = btrfs_grab_root(root);
2713 0 : ASSERT(node->root);
2714 0 : list_add_tail(&node->list, &rc->backref_cache.changed);
2715 : } else {
2716 0 : path->lowest_level = node->level;
2717 0 : if (root == root->fs_info->chunk_root)
2718 0 : btrfs_reserve_chunk_metadata(trans, false);
2719 0 : ret = btrfs_search_slot(trans, root, key, path, 0, 1);
2720 0 : btrfs_release_path(path);
2721 0 : if (root == root->fs_info->chunk_root)
2722 0 : btrfs_trans_release_chunk_metadata(trans);
2723 0 : if (ret > 0)
2724 : ret = 0;
2725 : }
2726 0 : if (!ret)
2727 0 : update_processed_blocks(rc, node);
2728 : } else {
2729 0 : ret = do_relocation(trans, rc, node, key, path, 1);
2730 : }
2731 0 : out:
2732 0 : if (ret || node->level == 0 || node->cowonly)
2733 0 : btrfs_backref_cleanup_node(&rc->backref_cache, node);
2734 : return ret;
2735 : }
2736 :
2737 : /*
2738 : * relocate a list of blocks
2739 : */
2740 : static noinline_for_stack
2741 0 : int relocate_tree_blocks(struct btrfs_trans_handle *trans,
2742 : struct reloc_control *rc, struct rb_root *blocks)
2743 : {
2744 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
2745 0 : struct btrfs_backref_node *node;
2746 0 : struct btrfs_path *path;
2747 0 : struct tree_block *block;
2748 0 : struct tree_block *next;
2749 0 : int ret;
2750 0 : int err = 0;
2751 :
2752 0 : path = btrfs_alloc_path();
2753 0 : if (!path) {
2754 0 : err = -ENOMEM;
2755 0 : goto out_free_blocks;
2756 : }
2757 :
2758 : /* Kick in readahead for tree blocks with missing keys */
2759 0 : rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2760 0 : if (!block->key_ready)
2761 0 : btrfs_readahead_tree_block(fs_info, block->bytenr,
2762 : block->owner, 0,
2763 0 : block->level);
2764 : }
2765 :
2766 : /* Get first keys */
2767 0 : rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2768 0 : if (!block->key_ready) {
2769 0 : err = get_tree_block_key(fs_info, block);
2770 0 : if (err)
2771 0 : goto out_free_path;
2772 : }
2773 : }
2774 :
2775 : /* Do tree relocation */
2776 0 : rbtree_postorder_for_each_entry_safe(block, next, blocks, rb_node) {
2777 0 : node = build_backref_tree(rc, &block->key,
2778 0 : block->level, block->bytenr);
2779 0 : if (IS_ERR(node)) {
2780 0 : err = PTR_ERR(node);
2781 0 : goto out;
2782 : }
2783 :
2784 0 : ret = relocate_tree_block(trans, rc, node, &block->key,
2785 : path);
2786 0 : if (ret < 0) {
2787 : err = ret;
2788 : break;
2789 : }
2790 : }
2791 0 : out:
2792 0 : err = finish_pending_nodes(trans, rc, path, err);
2793 :
2794 0 : out_free_path:
2795 0 : btrfs_free_path(path);
2796 0 : out_free_blocks:
2797 0 : free_block_list(blocks);
2798 0 : return err;
2799 : }
2800 :
2801 0 : static noinline_for_stack int prealloc_file_extent_cluster(
2802 : struct btrfs_inode *inode,
2803 : struct file_extent_cluster *cluster)
2804 : {
2805 0 : u64 alloc_hint = 0;
2806 0 : u64 start;
2807 0 : u64 end;
2808 0 : u64 offset = inode->index_cnt;
2809 0 : u64 num_bytes;
2810 0 : int nr;
2811 0 : int ret = 0;
2812 0 : u64 i_size = i_size_read(&inode->vfs_inode);
2813 0 : u64 prealloc_start = cluster->start - offset;
2814 0 : u64 prealloc_end = cluster->end - offset;
2815 0 : u64 cur_offset = prealloc_start;
2816 :
2817 : /*
2818 : * For subpage case, previous i_size may not be aligned to PAGE_SIZE.
2819 : * This means the range [i_size, PAGE_END + 1) is filled with zeros by
2820 : * btrfs_do_readpage() call of previously relocated file cluster.
2821 : *
2822 : * If the current cluster starts in the above range, btrfs_do_readpage()
2823 : * will skip the read, and relocate_one_page() will later writeback
2824 : * the padding zeros as new data, causing data corruption.
2825 : *
2826 : * Here we have to manually invalidate the range (i_size, PAGE_END + 1).
2827 : */
2828 0 : if (!PAGE_ALIGNED(i_size)) {
2829 0 : struct address_space *mapping = inode->vfs_inode.i_mapping;
2830 0 : struct btrfs_fs_info *fs_info = inode->root->fs_info;
2831 0 : const u32 sectorsize = fs_info->sectorsize;
2832 0 : struct page *page;
2833 :
2834 0 : ASSERT(sectorsize < PAGE_SIZE);
2835 0 : ASSERT(IS_ALIGNED(i_size, sectorsize));
2836 :
2837 : /*
2838 : * Subpage can't handle page with DIRTY but without UPTODATE
2839 : * bit as it can lead to the following deadlock:
2840 : *
2841 : * btrfs_read_folio()
2842 : * | Page already *locked*
2843 : * |- btrfs_lock_and_flush_ordered_range()
2844 : * |- btrfs_start_ordered_extent()
2845 : * |- extent_write_cache_pages()
2846 : * |- lock_page()
2847 : * We try to lock the page we already hold.
2848 : *
2849 : * Here we just writeback the whole data reloc inode, so that
2850 : * we will be ensured to have no dirty range in the page, and
2851 : * are safe to clear the uptodate bits.
2852 : *
2853 : * This shouldn't cause too much overhead, as we need to write
2854 : * the data back anyway.
2855 : */
2856 0 : ret = filemap_write_and_wait(mapping);
2857 0 : if (ret < 0)
2858 : return ret;
2859 :
2860 0 : clear_extent_bits(&inode->io_tree, i_size,
2861 0 : round_up(i_size, PAGE_SIZE) - 1,
2862 : EXTENT_UPTODATE);
2863 0 : page = find_lock_page(mapping, i_size >> PAGE_SHIFT);
2864 : /*
2865 : * If page is freed we don't need to do anything then, as we
2866 : * will re-read the whole page anyway.
2867 : */
2868 0 : if (page) {
2869 0 : btrfs_subpage_clear_uptodate(fs_info, page, i_size,
2870 0 : round_up(i_size, PAGE_SIZE) - i_size);
2871 0 : unlock_page(page);
2872 0 : put_page(page);
2873 : }
2874 : }
2875 :
2876 0 : BUG_ON(cluster->start != cluster->boundary[0]);
2877 0 : ret = btrfs_alloc_data_chunk_ondemand(inode,
2878 0 : prealloc_end + 1 - prealloc_start);
2879 0 : if (ret)
2880 : return ret;
2881 :
2882 0 : btrfs_inode_lock(inode, 0);
2883 0 : for (nr = 0; nr < cluster->nr; nr++) {
2884 0 : struct extent_state *cached_state = NULL;
2885 :
2886 0 : start = cluster->boundary[nr] - offset;
2887 0 : if (nr + 1 < cluster->nr)
2888 0 : end = cluster->boundary[nr + 1] - 1 - offset;
2889 : else
2890 0 : end = cluster->end - offset;
2891 :
2892 0 : lock_extent(&inode->io_tree, start, end, &cached_state);
2893 0 : num_bytes = end + 1 - start;
2894 0 : ret = btrfs_prealloc_file_range(&inode->vfs_inode, 0, start,
2895 : num_bytes, num_bytes,
2896 0 : end + 1, &alloc_hint);
2897 0 : cur_offset = end + 1;
2898 0 : unlock_extent(&inode->io_tree, start, end, &cached_state);
2899 0 : if (ret)
2900 : break;
2901 : }
2902 0 : btrfs_inode_unlock(inode, 0);
2903 :
2904 0 : if (cur_offset < prealloc_end)
2905 0 : btrfs_free_reserved_data_space_noquota(inode->root->fs_info,
2906 0 : prealloc_end + 1 - cur_offset);
2907 : return ret;
2908 : }
2909 :
2910 0 : static noinline_for_stack int setup_relocation_extent_mapping(struct inode *inode,
2911 : u64 start, u64 end, u64 block_start)
2912 : {
2913 0 : struct extent_map *em;
2914 0 : struct extent_state *cached_state = NULL;
2915 0 : int ret = 0;
2916 :
2917 0 : em = alloc_extent_map();
2918 0 : if (!em)
2919 : return -ENOMEM;
2920 :
2921 0 : em->start = start;
2922 0 : em->len = end + 1 - start;
2923 0 : em->block_len = em->len;
2924 0 : em->block_start = block_start;
2925 0 : set_bit(EXTENT_FLAG_PINNED, &em->flags);
2926 :
2927 0 : lock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2928 0 : ret = btrfs_replace_extent_map_range(BTRFS_I(inode), em, false);
2929 0 : unlock_extent(&BTRFS_I(inode)->io_tree, start, end, &cached_state);
2930 0 : free_extent_map(em);
2931 :
2932 0 : return ret;
2933 : }
2934 :
2935 : /*
2936 : * Allow error injection to test balance/relocation cancellation
2937 : */
2938 0 : noinline int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info)
2939 : {
2940 0 : return atomic_read(&fs_info->balance_cancel_req) ||
2941 0 : atomic_read(&fs_info->reloc_cancel_req) ||
2942 0 : fatal_signal_pending(current);
2943 : }
2944 : ALLOW_ERROR_INJECTION(btrfs_should_cancel_balance, TRUE);
2945 :
2946 : static u64 get_cluster_boundary_end(struct file_extent_cluster *cluster,
2947 : int cluster_nr)
2948 : {
2949 : /* Last extent, use cluster end directly */
2950 0 : if (cluster_nr >= cluster->nr - 1)
2951 0 : return cluster->end;
2952 :
2953 : /* Use next boundary start*/
2954 0 : return cluster->boundary[cluster_nr + 1] - 1;
2955 : }
2956 :
2957 0 : static int relocate_one_page(struct inode *inode, struct file_ra_state *ra,
2958 : struct file_extent_cluster *cluster,
2959 : int *cluster_nr, unsigned long page_index)
2960 : {
2961 0 : struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
2962 0 : u64 offset = BTRFS_I(inode)->index_cnt;
2963 0 : const unsigned long last_index = (cluster->end - offset) >> PAGE_SHIFT;
2964 0 : gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
2965 0 : struct page *page;
2966 0 : u64 page_start;
2967 0 : u64 page_end;
2968 0 : u64 cur;
2969 0 : int ret;
2970 :
2971 0 : ASSERT(page_index <= last_index);
2972 0 : page = find_lock_page(inode->i_mapping, page_index);
2973 0 : if (!page) {
2974 0 : page_cache_sync_readahead(inode->i_mapping, ra, NULL,
2975 0 : page_index, last_index + 1 - page_index);
2976 0 : page = find_or_create_page(inode->i_mapping, page_index, mask);
2977 0 : if (!page)
2978 : return -ENOMEM;
2979 : }
2980 0 : ret = set_page_extent_mapped(page);
2981 0 : if (ret < 0)
2982 0 : goto release_page;
2983 :
2984 0 : if (PageReadahead(page))
2985 0 : page_cache_async_readahead(inode->i_mapping, ra, NULL,
2986 : page_folio(page), page_index,
2987 0 : last_index + 1 - page_index);
2988 :
2989 0 : if (!PageUptodate(page)) {
2990 0 : btrfs_read_folio(NULL, page_folio(page));
2991 0 : lock_page(page);
2992 0 : if (!PageUptodate(page)) {
2993 0 : ret = -EIO;
2994 0 : goto release_page;
2995 : }
2996 : }
2997 :
2998 0 : page_start = page_offset(page);
2999 0 : page_end = page_start + PAGE_SIZE - 1;
3000 :
3001 : /*
3002 : * Start from the cluster, as for subpage case, the cluster can start
3003 : * inside the page.
3004 : */
3005 0 : cur = max(page_start, cluster->boundary[*cluster_nr] - offset);
3006 0 : while (cur <= page_end) {
3007 0 : struct extent_state *cached_state = NULL;
3008 0 : u64 extent_start = cluster->boundary[*cluster_nr] - offset;
3009 0 : u64 extent_end = get_cluster_boundary_end(cluster,
3010 : *cluster_nr) - offset;
3011 0 : u64 clamped_start = max(page_start, extent_start);
3012 0 : u64 clamped_end = min(page_end, extent_end);
3013 0 : u32 clamped_len = clamped_end + 1 - clamped_start;
3014 :
3015 : /* Reserve metadata for this range */
3016 0 : ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode),
3017 : clamped_len, clamped_len,
3018 : false);
3019 0 : if (ret)
3020 0 : goto release_page;
3021 :
3022 : /* Mark the range delalloc and dirty for later writeback */
3023 0 : lock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3024 : &cached_state);
3025 0 : ret = btrfs_set_extent_delalloc(BTRFS_I(inode), clamped_start,
3026 : clamped_end, 0, &cached_state);
3027 0 : if (ret) {
3028 0 : clear_extent_bit(&BTRFS_I(inode)->io_tree,
3029 : clamped_start, clamped_end,
3030 : EXTENT_LOCKED | EXTENT_BOUNDARY,
3031 : &cached_state);
3032 0 : btrfs_delalloc_release_metadata(BTRFS_I(inode),
3033 : clamped_len, true);
3034 0 : btrfs_delalloc_release_extents(BTRFS_I(inode),
3035 : clamped_len);
3036 0 : goto release_page;
3037 : }
3038 0 : btrfs_page_set_dirty(fs_info, page, clamped_start, clamped_len);
3039 :
3040 : /*
3041 : * Set the boundary if it's inside the page.
3042 : * Data relocation requires the destination extents to have the
3043 : * same size as the source.
3044 : * EXTENT_BOUNDARY bit prevents current extent from being merged
3045 : * with previous extent.
3046 : */
3047 0 : if (in_range(cluster->boundary[*cluster_nr] - offset,
3048 : page_start, PAGE_SIZE)) {
3049 0 : u64 boundary_start = cluster->boundary[*cluster_nr] -
3050 : offset;
3051 0 : u64 boundary_end = boundary_start +
3052 0 : fs_info->sectorsize - 1;
3053 :
3054 0 : set_extent_bit(&BTRFS_I(inode)->io_tree,
3055 : boundary_start, boundary_end,
3056 : EXTENT_BOUNDARY, NULL);
3057 : }
3058 0 : unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end,
3059 : &cached_state);
3060 0 : btrfs_delalloc_release_extents(BTRFS_I(inode), clamped_len);
3061 0 : cur += clamped_len;
3062 :
3063 : /* Crossed extent end, go to next extent */
3064 0 : if (cur >= extent_end) {
3065 0 : (*cluster_nr)++;
3066 : /* Just finished the last extent of the cluster, exit. */
3067 0 : if (*cluster_nr >= cluster->nr)
3068 : break;
3069 : }
3070 : }
3071 0 : unlock_page(page);
3072 0 : put_page(page);
3073 :
3074 0 : balance_dirty_pages_ratelimited(inode->i_mapping);
3075 0 : btrfs_throttle(fs_info);
3076 0 : if (btrfs_should_cancel_balance(fs_info))
3077 0 : ret = -ECANCELED;
3078 : return ret;
3079 :
3080 0 : release_page:
3081 0 : unlock_page(page);
3082 0 : put_page(page);
3083 0 : return ret;
3084 : }
3085 :
3086 0 : static int relocate_file_extent_cluster(struct inode *inode,
3087 : struct file_extent_cluster *cluster)
3088 : {
3089 0 : u64 offset = BTRFS_I(inode)->index_cnt;
3090 0 : unsigned long index;
3091 0 : unsigned long last_index;
3092 0 : struct file_ra_state *ra;
3093 0 : int cluster_nr = 0;
3094 0 : int ret = 0;
3095 :
3096 0 : if (!cluster->nr)
3097 : return 0;
3098 :
3099 0 : ra = kzalloc(sizeof(*ra), GFP_NOFS);
3100 0 : if (!ra)
3101 : return -ENOMEM;
3102 :
3103 0 : ret = prealloc_file_extent_cluster(BTRFS_I(inode), cluster);
3104 0 : if (ret)
3105 0 : goto out;
3106 :
3107 0 : file_ra_state_init(ra, inode->i_mapping);
3108 :
3109 0 : ret = setup_relocation_extent_mapping(inode, cluster->start - offset,
3110 0 : cluster->end - offset, cluster->start);
3111 0 : if (ret)
3112 0 : goto out;
3113 :
3114 0 : last_index = (cluster->end - offset) >> PAGE_SHIFT;
3115 0 : for (index = (cluster->start - offset) >> PAGE_SHIFT;
3116 0 : index <= last_index && !ret; index++)
3117 0 : ret = relocate_one_page(inode, ra, cluster, &cluster_nr, index);
3118 0 : if (ret == 0)
3119 0 : WARN_ON(cluster_nr != cluster->nr);
3120 0 : out:
3121 0 : kfree(ra);
3122 0 : return ret;
3123 : }
3124 :
3125 : static noinline_for_stack
3126 0 : int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key,
3127 : struct file_extent_cluster *cluster)
3128 : {
3129 0 : int ret;
3130 :
3131 0 : if (cluster->nr > 0 && extent_key->objectid != cluster->end + 1) {
3132 0 : ret = relocate_file_extent_cluster(inode, cluster);
3133 0 : if (ret)
3134 : return ret;
3135 0 : cluster->nr = 0;
3136 : }
3137 :
3138 0 : if (!cluster->nr)
3139 0 : cluster->start = extent_key->objectid;
3140 : else
3141 0 : BUG_ON(cluster->nr >= MAX_EXTENTS);
3142 0 : cluster->end = extent_key->objectid + extent_key->offset - 1;
3143 0 : cluster->boundary[cluster->nr] = extent_key->objectid;
3144 0 : cluster->nr++;
3145 :
3146 0 : if (cluster->nr >= MAX_EXTENTS) {
3147 0 : ret = relocate_file_extent_cluster(inode, cluster);
3148 0 : if (ret)
3149 : return ret;
3150 0 : cluster->nr = 0;
3151 : }
3152 : return 0;
3153 : }
3154 :
3155 : /*
3156 : * helper to add a tree block to the list.
3157 : * the major work is getting the generation and level of the block
3158 : */
3159 0 : static int add_tree_block(struct reloc_control *rc,
3160 : struct btrfs_key *extent_key,
3161 : struct btrfs_path *path,
3162 : struct rb_root *blocks)
3163 : {
3164 0 : struct extent_buffer *eb;
3165 0 : struct btrfs_extent_item *ei;
3166 0 : struct btrfs_tree_block_info *bi;
3167 0 : struct tree_block *block;
3168 0 : struct rb_node *rb_node;
3169 0 : u32 item_size;
3170 0 : int level = -1;
3171 0 : u64 generation;
3172 0 : u64 owner = 0;
3173 :
3174 0 : eb = path->nodes[0];
3175 0 : item_size = btrfs_item_size(eb, path->slots[0]);
3176 :
3177 0 : if (extent_key->type == BTRFS_METADATA_ITEM_KEY ||
3178 : item_size >= sizeof(*ei) + sizeof(*bi)) {
3179 0 : unsigned long ptr = 0, end;
3180 :
3181 0 : ei = btrfs_item_ptr(eb, path->slots[0],
3182 : struct btrfs_extent_item);
3183 0 : end = (unsigned long)ei + item_size;
3184 0 : if (extent_key->type == BTRFS_EXTENT_ITEM_KEY) {
3185 0 : bi = (struct btrfs_tree_block_info *)(ei + 1);
3186 0 : level = btrfs_tree_block_level(eb, bi);
3187 0 : ptr = (unsigned long)(bi + 1);
3188 : } else {
3189 0 : level = (int)extent_key->offset;
3190 0 : ptr = (unsigned long)(ei + 1);
3191 : }
3192 0 : generation = btrfs_extent_generation(eb, ei);
3193 :
3194 : /*
3195 : * We're reading random blocks without knowing their owner ahead
3196 : * of time. This is ok most of the time, as all reloc roots and
3197 : * fs roots have the same lock type. However normal trees do
3198 : * not, and the only way to know ahead of time is to read the
3199 : * inline ref offset. We know it's an fs root if
3200 : *
3201 : * 1. There's more than one ref.
3202 : * 2. There's a SHARED_DATA_REF_KEY set.
3203 : * 3. FULL_BACKREF is set on the flags.
3204 : *
3205 : * Otherwise it's safe to assume that the ref offset == the
3206 : * owner of this block, so we can use that when calling
3207 : * read_tree_block.
3208 : */
3209 0 : if (btrfs_extent_refs(eb, ei) == 1 &&
3210 0 : !(btrfs_extent_flags(eb, ei) &
3211 0 : BTRFS_BLOCK_FLAG_FULL_BACKREF) &&
3212 : ptr < end) {
3213 0 : struct btrfs_extent_inline_ref *iref;
3214 0 : int type;
3215 :
3216 0 : iref = (struct btrfs_extent_inline_ref *)ptr;
3217 0 : type = btrfs_get_extent_inline_ref_type(eb, iref,
3218 : BTRFS_REF_TYPE_BLOCK);
3219 0 : if (type == BTRFS_REF_TYPE_INVALID)
3220 : return -EINVAL;
3221 0 : if (type == BTRFS_TREE_BLOCK_REF_KEY)
3222 0 : owner = btrfs_extent_inline_ref_offset(eb, iref);
3223 : }
3224 0 : } else if (unlikely(item_size == sizeof(struct btrfs_extent_item_v0))) {
3225 0 : btrfs_print_v0_err(eb->fs_info);
3226 0 : btrfs_handle_fs_error(eb->fs_info, -EINVAL, NULL);
3227 0 : return -EINVAL;
3228 : } else {
3229 0 : BUG();
3230 : }
3231 :
3232 0 : btrfs_release_path(path);
3233 :
3234 0 : BUG_ON(level == -1);
3235 :
3236 0 : block = kmalloc(sizeof(*block), GFP_NOFS);
3237 0 : if (!block)
3238 : return -ENOMEM;
3239 :
3240 0 : block->bytenr = extent_key->objectid;
3241 0 : block->key.objectid = rc->extent_root->fs_info->nodesize;
3242 0 : block->key.offset = generation;
3243 0 : block->level = level;
3244 0 : block->key_ready = 0;
3245 0 : block->owner = owner;
3246 :
3247 0 : rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
3248 0 : if (rb_node)
3249 0 : btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
3250 : -EEXIST);
3251 :
3252 : return 0;
3253 : }
3254 :
3255 : /*
3256 : * helper to add tree blocks for backref of type BTRFS_SHARED_DATA_REF_KEY
3257 : */
3258 0 : static int __add_tree_block(struct reloc_control *rc,
3259 : u64 bytenr, u32 blocksize,
3260 : struct rb_root *blocks)
3261 : {
3262 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3263 0 : struct btrfs_path *path;
3264 0 : struct btrfs_key key;
3265 0 : int ret;
3266 0 : bool skinny = btrfs_fs_incompat(fs_info, SKINNY_METADATA);
3267 :
3268 0 : if (tree_block_processed(bytenr, rc))
3269 : return 0;
3270 :
3271 0 : if (rb_simple_search(blocks, bytenr))
3272 : return 0;
3273 :
3274 0 : path = btrfs_alloc_path();
3275 0 : if (!path)
3276 : return -ENOMEM;
3277 0 : again:
3278 0 : key.objectid = bytenr;
3279 0 : if (skinny) {
3280 0 : key.type = BTRFS_METADATA_ITEM_KEY;
3281 0 : key.offset = (u64)-1;
3282 : } else {
3283 0 : key.type = BTRFS_EXTENT_ITEM_KEY;
3284 0 : key.offset = blocksize;
3285 : }
3286 :
3287 0 : path->search_commit_root = 1;
3288 0 : path->skip_locking = 1;
3289 0 : ret = btrfs_search_slot(NULL, rc->extent_root, &key, path, 0, 0);
3290 0 : if (ret < 0)
3291 0 : goto out;
3292 :
3293 0 : if (ret > 0 && skinny) {
3294 0 : if (path->slots[0]) {
3295 0 : path->slots[0]--;
3296 0 : btrfs_item_key_to_cpu(path->nodes[0], &key,
3297 : path->slots[0]);
3298 0 : if (key.objectid == bytenr &&
3299 0 : (key.type == BTRFS_METADATA_ITEM_KEY ||
3300 0 : (key.type == BTRFS_EXTENT_ITEM_KEY &&
3301 0 : key.offset == blocksize)))
3302 : ret = 0;
3303 : }
3304 :
3305 0 : if (ret) {
3306 0 : skinny = false;
3307 0 : btrfs_release_path(path);
3308 0 : goto again;
3309 : }
3310 : }
3311 0 : if (ret) {
3312 0 : ASSERT(ret == 1);
3313 0 : btrfs_print_leaf(path->nodes[0]);
3314 0 : btrfs_err(fs_info,
3315 : "tree block extent item (%llu) is not found in extent tree",
3316 : bytenr);
3317 0 : WARN_ON(1);
3318 0 : ret = -EINVAL;
3319 0 : goto out;
3320 : }
3321 :
3322 0 : ret = add_tree_block(rc, &key, path, blocks);
3323 0 : out:
3324 0 : btrfs_free_path(path);
3325 0 : return ret;
3326 : }
3327 :
3328 0 : static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3329 : struct btrfs_block_group *block_group,
3330 : struct inode *inode,
3331 : u64 ino)
3332 : {
3333 0 : struct btrfs_root *root = fs_info->tree_root;
3334 0 : struct btrfs_trans_handle *trans;
3335 0 : int ret = 0;
3336 :
3337 0 : if (inode)
3338 0 : goto truncate;
3339 :
3340 0 : inode = btrfs_iget(fs_info->sb, ino, root);
3341 0 : if (IS_ERR(inode))
3342 : return -ENOENT;
3343 :
3344 0 : truncate:
3345 0 : ret = btrfs_check_trunc_cache_free_space(fs_info,
3346 : &fs_info->global_block_rsv);
3347 0 : if (ret)
3348 0 : goto out;
3349 :
3350 0 : trans = btrfs_join_transaction(root);
3351 0 : if (IS_ERR(trans)) {
3352 0 : ret = PTR_ERR(trans);
3353 0 : goto out;
3354 : }
3355 :
3356 0 : ret = btrfs_truncate_free_space_cache(trans, block_group, inode);
3357 :
3358 0 : btrfs_end_transaction(trans);
3359 0 : btrfs_btree_balance_dirty(fs_info);
3360 0 : out:
3361 0 : iput(inode);
3362 0 : return ret;
3363 : }
3364 :
3365 : /*
3366 : * Locate the free space cache EXTENT_DATA in root tree leaf and delete the
3367 : * cache inode, to avoid free space cache data extent blocking data relocation.
3368 : */
3369 0 : static int delete_v1_space_cache(struct extent_buffer *leaf,
3370 : struct btrfs_block_group *block_group,
3371 : u64 data_bytenr)
3372 : {
3373 0 : u64 space_cache_ino;
3374 0 : struct btrfs_file_extent_item *ei;
3375 0 : struct btrfs_key key;
3376 0 : bool found = false;
3377 0 : int i;
3378 0 : int ret;
3379 :
3380 0 : if (btrfs_header_owner(leaf) != BTRFS_ROOT_TREE_OBJECTID)
3381 : return 0;
3382 :
3383 0 : for (i = 0; i < btrfs_header_nritems(leaf); i++) {
3384 0 : u8 type;
3385 :
3386 0 : btrfs_item_key_to_cpu(leaf, &key, i);
3387 0 : if (key.type != BTRFS_EXTENT_DATA_KEY)
3388 0 : continue;
3389 0 : ei = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3390 0 : type = btrfs_file_extent_type(leaf, ei);
3391 :
3392 0 : if ((type == BTRFS_FILE_EXTENT_REG ||
3393 0 : type == BTRFS_FILE_EXTENT_PREALLOC) &&
3394 : btrfs_file_extent_disk_bytenr(leaf, ei) == data_bytenr) {
3395 0 : found = true;
3396 0 : space_cache_ino = key.objectid;
3397 0 : break;
3398 : }
3399 : }
3400 0 : if (!found)
3401 : return -ENOENT;
3402 0 : ret = delete_block_group_cache(leaf->fs_info, block_group, NULL,
3403 : space_cache_ino);
3404 0 : return ret;
3405 : }
3406 :
3407 : /*
3408 : * helper to find all tree blocks that reference a given data extent
3409 : */
3410 : static noinline_for_stack
3411 0 : int add_data_references(struct reloc_control *rc,
3412 : struct btrfs_key *extent_key,
3413 : struct btrfs_path *path,
3414 : struct rb_root *blocks)
3415 : {
3416 0 : struct btrfs_backref_walk_ctx ctx = { 0 };
3417 0 : struct ulist_iterator leaf_uiter;
3418 0 : struct ulist_node *ref_node = NULL;
3419 0 : const u32 blocksize = rc->extent_root->fs_info->nodesize;
3420 0 : int ret = 0;
3421 :
3422 0 : btrfs_release_path(path);
3423 :
3424 0 : ctx.bytenr = extent_key->objectid;
3425 0 : ctx.skip_inode_ref_list = true;
3426 0 : ctx.fs_info = rc->extent_root->fs_info;
3427 :
3428 0 : ret = btrfs_find_all_leafs(&ctx);
3429 0 : if (ret < 0)
3430 : return ret;
3431 :
3432 0 : ULIST_ITER_INIT(&leaf_uiter);
3433 0 : while ((ref_node = ulist_next(ctx.refs, &leaf_uiter))) {
3434 0 : struct btrfs_tree_parent_check check = { 0 };
3435 0 : struct extent_buffer *eb;
3436 :
3437 0 : eb = read_tree_block(ctx.fs_info, ref_node->val, &check);
3438 0 : if (IS_ERR(eb)) {
3439 0 : ret = PTR_ERR(eb);
3440 0 : break;
3441 : }
3442 0 : ret = delete_v1_space_cache(eb, rc->block_group,
3443 : extent_key->objectid);
3444 0 : free_extent_buffer(eb);
3445 0 : if (ret < 0)
3446 : break;
3447 0 : ret = __add_tree_block(rc, ref_node->val, blocksize, blocks);
3448 0 : if (ret < 0)
3449 : break;
3450 : }
3451 0 : if (ret < 0)
3452 0 : free_block_list(blocks);
3453 0 : ulist_free(ctx.refs);
3454 0 : return ret;
3455 : }
3456 :
3457 : /*
3458 : * helper to find next unprocessed extent
3459 : */
3460 : static noinline_for_stack
3461 0 : int find_next_extent(struct reloc_control *rc, struct btrfs_path *path,
3462 : struct btrfs_key *extent_key)
3463 : {
3464 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3465 0 : struct btrfs_key key;
3466 0 : struct extent_buffer *leaf;
3467 0 : u64 start, end, last;
3468 0 : int ret;
3469 :
3470 0 : last = rc->block_group->start + rc->block_group->length;
3471 0 : while (1) {
3472 0 : cond_resched();
3473 0 : if (rc->search_start >= last) {
3474 : ret = 1;
3475 : break;
3476 : }
3477 :
3478 0 : key.objectid = rc->search_start;
3479 0 : key.type = BTRFS_EXTENT_ITEM_KEY;
3480 0 : key.offset = 0;
3481 :
3482 0 : path->search_commit_root = 1;
3483 0 : path->skip_locking = 1;
3484 0 : ret = btrfs_search_slot(NULL, rc->extent_root, &key, path,
3485 : 0, 0);
3486 0 : if (ret < 0)
3487 : break;
3488 0 : next:
3489 0 : leaf = path->nodes[0];
3490 0 : if (path->slots[0] >= btrfs_header_nritems(leaf)) {
3491 0 : ret = btrfs_next_leaf(rc->extent_root, path);
3492 0 : if (ret != 0)
3493 : break;
3494 0 : leaf = path->nodes[0];
3495 : }
3496 :
3497 0 : btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3498 0 : if (key.objectid >= last) {
3499 : ret = 1;
3500 : break;
3501 : }
3502 :
3503 0 : if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3504 : key.type != BTRFS_METADATA_ITEM_KEY) {
3505 0 : path->slots[0]++;
3506 0 : goto next;
3507 : }
3508 :
3509 0 : if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3510 0 : key.objectid + key.offset <= rc->search_start) {
3511 0 : path->slots[0]++;
3512 0 : goto next;
3513 : }
3514 :
3515 0 : if (key.type == BTRFS_METADATA_ITEM_KEY &&
3516 0 : key.objectid + fs_info->nodesize <=
3517 0 : rc->search_start) {
3518 0 : path->slots[0]++;
3519 0 : goto next;
3520 : }
3521 :
3522 0 : ret = find_first_extent_bit(&rc->processed_blocks,
3523 : key.objectid, &start, &end,
3524 : EXTENT_DIRTY, NULL);
3525 :
3526 0 : if (ret == 0 && start <= key.objectid) {
3527 0 : btrfs_release_path(path);
3528 0 : rc->search_start = end + 1;
3529 : } else {
3530 0 : if (key.type == BTRFS_EXTENT_ITEM_KEY)
3531 0 : rc->search_start = key.objectid + key.offset;
3532 : else
3533 0 : rc->search_start = key.objectid +
3534 0 : fs_info->nodesize;
3535 0 : memcpy(extent_key, &key, sizeof(key));
3536 0 : return 0;
3537 : }
3538 : }
3539 0 : btrfs_release_path(path);
3540 0 : return ret;
3541 : }
3542 :
3543 0 : static void set_reloc_control(struct reloc_control *rc)
3544 : {
3545 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3546 :
3547 0 : mutex_lock(&fs_info->reloc_mutex);
3548 0 : fs_info->reloc_ctl = rc;
3549 0 : mutex_unlock(&fs_info->reloc_mutex);
3550 0 : }
3551 :
3552 0 : static void unset_reloc_control(struct reloc_control *rc)
3553 : {
3554 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3555 :
3556 0 : mutex_lock(&fs_info->reloc_mutex);
3557 0 : fs_info->reloc_ctl = NULL;
3558 0 : mutex_unlock(&fs_info->reloc_mutex);
3559 0 : }
3560 :
3561 : static noinline_for_stack
3562 0 : int prepare_to_relocate(struct reloc_control *rc)
3563 : {
3564 0 : struct btrfs_trans_handle *trans;
3565 0 : int ret;
3566 :
3567 0 : rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
3568 : BTRFS_BLOCK_RSV_TEMP);
3569 0 : if (!rc->block_rsv)
3570 : return -ENOMEM;
3571 :
3572 0 : memset(&rc->cluster, 0, sizeof(rc->cluster));
3573 0 : rc->search_start = rc->block_group->start;
3574 0 : rc->extents_found = 0;
3575 0 : rc->nodes_relocated = 0;
3576 0 : rc->merging_rsv_size = 0;
3577 0 : rc->reserved_bytes = 0;
3578 0 : rc->block_rsv->size = rc->extent_root->fs_info->nodesize *
3579 : RELOCATION_RESERVED_NODES;
3580 0 : ret = btrfs_block_rsv_refill(rc->extent_root->fs_info,
3581 : rc->block_rsv, rc->block_rsv->size,
3582 : BTRFS_RESERVE_FLUSH_ALL);
3583 0 : if (ret)
3584 : return ret;
3585 :
3586 0 : rc->create_reloc_tree = 1;
3587 0 : set_reloc_control(rc);
3588 :
3589 0 : trans = btrfs_join_transaction(rc->extent_root);
3590 0 : if (IS_ERR(trans)) {
3591 0 : unset_reloc_control(rc);
3592 : /*
3593 : * extent tree is not a ref_cow tree and has no reloc_root to
3594 : * cleanup. And callers are responsible to free the above
3595 : * block rsv.
3596 : */
3597 0 : return PTR_ERR(trans);
3598 : }
3599 :
3600 0 : ret = btrfs_commit_transaction(trans);
3601 0 : if (ret)
3602 0 : unset_reloc_control(rc);
3603 :
3604 : return ret;
3605 : }
3606 :
3607 0 : static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3608 : {
3609 0 : struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3610 0 : struct rb_root blocks = RB_ROOT;
3611 0 : struct btrfs_key key;
3612 0 : struct btrfs_trans_handle *trans = NULL;
3613 0 : struct btrfs_path *path;
3614 0 : struct btrfs_extent_item *ei;
3615 0 : u64 flags;
3616 0 : int ret;
3617 0 : int err = 0;
3618 0 : int progress = 0;
3619 :
3620 0 : path = btrfs_alloc_path();
3621 0 : if (!path)
3622 : return -ENOMEM;
3623 0 : path->reada = READA_FORWARD;
3624 :
3625 0 : ret = prepare_to_relocate(rc);
3626 0 : if (ret) {
3627 0 : err = ret;
3628 0 : goto out_free;
3629 : }
3630 :
3631 0 : while (1) {
3632 0 : rc->reserved_bytes = 0;
3633 0 : ret = btrfs_block_rsv_refill(fs_info, rc->block_rsv,
3634 : rc->block_rsv->size,
3635 : BTRFS_RESERVE_FLUSH_ALL);
3636 0 : if (ret) {
3637 : err = ret;
3638 : break;
3639 : }
3640 0 : progress++;
3641 0 : trans = btrfs_start_transaction(rc->extent_root, 0);
3642 0 : if (IS_ERR(trans)) {
3643 0 : err = PTR_ERR(trans);
3644 0 : trans = NULL;
3645 0 : break;
3646 : }
3647 0 : restart:
3648 0 : if (update_backref_cache(trans, &rc->backref_cache)) {
3649 0 : btrfs_end_transaction(trans);
3650 0 : trans = NULL;
3651 0 : continue;
3652 : }
3653 :
3654 0 : ret = find_next_extent(rc, path, &key);
3655 0 : if (ret < 0)
3656 0 : err = ret;
3657 0 : if (ret != 0)
3658 : break;
3659 :
3660 0 : rc->extents_found++;
3661 :
3662 0 : ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
3663 : struct btrfs_extent_item);
3664 0 : flags = btrfs_extent_flags(path->nodes[0], ei);
3665 :
3666 0 : if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
3667 0 : ret = add_tree_block(rc, &key, path, &blocks);
3668 0 : } else if (rc->stage == UPDATE_DATA_PTRS &&
3669 0 : (flags & BTRFS_EXTENT_FLAG_DATA)) {
3670 0 : ret = add_data_references(rc, &key, path, &blocks);
3671 : } else {
3672 0 : btrfs_release_path(path);
3673 0 : ret = 0;
3674 : }
3675 0 : if (ret < 0) {
3676 : err = ret;
3677 : break;
3678 : }
3679 :
3680 0 : if (!RB_EMPTY_ROOT(&blocks)) {
3681 0 : ret = relocate_tree_blocks(trans, rc, &blocks);
3682 0 : if (ret < 0) {
3683 0 : if (ret != -EAGAIN) {
3684 : err = ret;
3685 : break;
3686 : }
3687 0 : rc->extents_found--;
3688 0 : rc->search_start = key.objectid;
3689 : }
3690 : }
3691 :
3692 0 : btrfs_end_transaction_throttle(trans);
3693 0 : btrfs_btree_balance_dirty(fs_info);
3694 0 : trans = NULL;
3695 :
3696 0 : if (rc->stage == MOVE_DATA_EXTENTS &&
3697 0 : (flags & BTRFS_EXTENT_FLAG_DATA)) {
3698 0 : rc->found_file_extent = 1;
3699 0 : ret = relocate_data_extent(rc->data_inode,
3700 : &key, &rc->cluster);
3701 0 : if (ret < 0) {
3702 : err = ret;
3703 : break;
3704 : }
3705 : }
3706 0 : if (btrfs_should_cancel_balance(fs_info)) {
3707 : err = -ECANCELED;
3708 : break;
3709 : }
3710 : }
3711 0 : if (trans && progress && err == -ENOSPC) {
3712 0 : ret = btrfs_force_chunk_alloc(trans, rc->block_group->flags);
3713 0 : if (ret == 1) {
3714 0 : err = 0;
3715 0 : progress = 0;
3716 0 : goto restart;
3717 : }
3718 : }
3719 :
3720 0 : btrfs_release_path(path);
3721 0 : clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
3722 :
3723 0 : if (trans) {
3724 0 : btrfs_end_transaction_throttle(trans);
3725 0 : btrfs_btree_balance_dirty(fs_info);
3726 : }
3727 :
3728 0 : if (!err) {
3729 0 : ret = relocate_file_extent_cluster(rc->data_inode,
3730 : &rc->cluster);
3731 0 : if (ret < 0)
3732 : err = ret;
3733 : }
3734 :
3735 0 : rc->create_reloc_tree = 0;
3736 0 : set_reloc_control(rc);
3737 :
3738 0 : btrfs_backref_release_cache(&rc->backref_cache);
3739 0 : btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3740 :
3741 : /*
3742 : * Even in the case when the relocation is cancelled, we should all go
3743 : * through prepare_to_merge() and merge_reloc_roots().
3744 : *
3745 : * For error (including cancelled balance), prepare_to_merge() will
3746 : * mark all reloc trees orphan, then queue them for cleanup in
3747 : * merge_reloc_roots()
3748 : */
3749 0 : err = prepare_to_merge(rc, err);
3750 :
3751 0 : merge_reloc_roots(rc);
3752 :
3753 0 : rc->merge_reloc_tree = 0;
3754 0 : unset_reloc_control(rc);
3755 0 : btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
3756 :
3757 : /* get rid of pinned extents */
3758 0 : trans = btrfs_join_transaction(rc->extent_root);
3759 0 : if (IS_ERR(trans)) {
3760 0 : err = PTR_ERR(trans);
3761 0 : goto out_free;
3762 : }
3763 0 : ret = btrfs_commit_transaction(trans);
3764 0 : if (ret && !err)
3765 0 : err = ret;
3766 0 : out_free:
3767 0 : ret = clean_dirty_subvols(rc);
3768 0 : if (ret < 0 && !err)
3769 0 : err = ret;
3770 0 : btrfs_free_block_rsv(fs_info, rc->block_rsv);
3771 0 : btrfs_free_path(path);
3772 0 : return err;
3773 : }
3774 :
3775 0 : static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
3776 : struct btrfs_root *root, u64 objectid)
3777 : {
3778 0 : struct btrfs_path *path;
3779 0 : struct btrfs_inode_item *item;
3780 0 : struct extent_buffer *leaf;
3781 0 : int ret;
3782 :
3783 0 : path = btrfs_alloc_path();
3784 0 : if (!path)
3785 : return -ENOMEM;
3786 :
3787 0 : ret = btrfs_insert_empty_inode(trans, root, path, objectid);
3788 0 : if (ret)
3789 0 : goto out;
3790 :
3791 0 : leaf = path->nodes[0];
3792 0 : item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
3793 0 : memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3794 0 : btrfs_set_inode_generation(leaf, item, 1);
3795 0 : btrfs_set_inode_size(leaf, item, 0);
3796 0 : btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
3797 0 : btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
3798 : BTRFS_INODE_PREALLOC);
3799 0 : btrfs_mark_buffer_dirty(leaf);
3800 0 : out:
3801 0 : btrfs_free_path(path);
3802 0 : return ret;
3803 : }
3804 :
3805 0 : static void delete_orphan_inode(struct btrfs_trans_handle *trans,
3806 : struct btrfs_root *root, u64 objectid)
3807 : {
3808 0 : struct btrfs_path *path;
3809 0 : struct btrfs_key key;
3810 0 : int ret = 0;
3811 :
3812 0 : path = btrfs_alloc_path();
3813 0 : if (!path) {
3814 0 : ret = -ENOMEM;
3815 0 : goto out;
3816 : }
3817 :
3818 0 : key.objectid = objectid;
3819 0 : key.type = BTRFS_INODE_ITEM_KEY;
3820 0 : key.offset = 0;
3821 0 : ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3822 0 : if (ret) {
3823 0 : if (ret > 0)
3824 0 : ret = -ENOENT;
3825 0 : goto out;
3826 : }
3827 0 : ret = btrfs_del_item(trans, root, path);
3828 0 : out:
3829 0 : if (ret)
3830 0 : btrfs_abort_transaction(trans, ret);
3831 0 : btrfs_free_path(path);
3832 0 : }
3833 :
3834 : /*
3835 : * helper to create inode for data relocation.
3836 : * the inode is in data relocation tree and its link count is 0
3837 : */
3838 : static noinline_for_stack
3839 0 : struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
3840 : struct btrfs_block_group *group)
3841 : {
3842 0 : struct inode *inode = NULL;
3843 0 : struct btrfs_trans_handle *trans;
3844 0 : struct btrfs_root *root;
3845 0 : u64 objectid;
3846 0 : int err = 0;
3847 :
3848 0 : root = btrfs_grab_root(fs_info->data_reloc_root);
3849 0 : trans = btrfs_start_transaction(root, 6);
3850 0 : if (IS_ERR(trans)) {
3851 0 : btrfs_put_root(root);
3852 0 : return ERR_CAST(trans);
3853 : }
3854 :
3855 0 : err = btrfs_get_free_objectid(root, &objectid);
3856 0 : if (err)
3857 0 : goto out;
3858 :
3859 0 : err = __insert_orphan_inode(trans, root, objectid);
3860 0 : if (err)
3861 0 : goto out;
3862 :
3863 0 : inode = btrfs_iget(fs_info->sb, objectid, root);
3864 0 : if (IS_ERR(inode)) {
3865 0 : delete_orphan_inode(trans, root, objectid);
3866 0 : err = PTR_ERR(inode);
3867 0 : inode = NULL;
3868 0 : goto out;
3869 : }
3870 0 : BTRFS_I(inode)->index_cnt = group->start;
3871 :
3872 0 : err = btrfs_orphan_add(trans, BTRFS_I(inode));
3873 0 : out:
3874 0 : btrfs_put_root(root);
3875 0 : btrfs_end_transaction(trans);
3876 0 : btrfs_btree_balance_dirty(fs_info);
3877 0 : if (err) {
3878 0 : iput(inode);
3879 0 : inode = ERR_PTR(err);
3880 : }
3881 : return inode;
3882 : }
3883 :
3884 : /*
3885 : * Mark start of chunk relocation that is cancellable. Check if the cancellation
3886 : * has been requested meanwhile and don't start in that case.
3887 : *
3888 : * Return:
3889 : * 0 success
3890 : * -EINPROGRESS operation is already in progress, that's probably a bug
3891 : * -ECANCELED cancellation request was set before the operation started
3892 : */
3893 0 : static int reloc_chunk_start(struct btrfs_fs_info *fs_info)
3894 : {
3895 0 : if (test_and_set_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
3896 : /* This should not happen */
3897 0 : btrfs_err(fs_info, "reloc already running, cannot start");
3898 0 : return -EINPROGRESS;
3899 : }
3900 :
3901 0 : if (atomic_read(&fs_info->reloc_cancel_req) > 0) {
3902 0 : btrfs_info(fs_info, "chunk relocation canceled on start");
3903 : /*
3904 : * On cancel, clear all requests but let the caller mark
3905 : * the end after cleanup operations.
3906 : */
3907 0 : atomic_set(&fs_info->reloc_cancel_req, 0);
3908 0 : return -ECANCELED;
3909 : }
3910 : return 0;
3911 : }
3912 :
3913 : /*
3914 : * Mark end of chunk relocation that is cancellable and wake any waiters.
3915 : */
3916 0 : static void reloc_chunk_end(struct btrfs_fs_info *fs_info)
3917 : {
3918 : /* Requested after start, clear bit first so any waiters can continue */
3919 0 : if (atomic_read(&fs_info->reloc_cancel_req) > 0)
3920 0 : btrfs_info(fs_info, "chunk relocation canceled during operation");
3921 0 : clear_and_wake_up_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags);
3922 0 : atomic_set(&fs_info->reloc_cancel_req, 0);
3923 0 : }
3924 :
3925 0 : static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
3926 : {
3927 0 : struct reloc_control *rc;
3928 :
3929 0 : rc = kzalloc(sizeof(*rc), GFP_NOFS);
3930 0 : if (!rc)
3931 : return NULL;
3932 :
3933 0 : INIT_LIST_HEAD(&rc->reloc_roots);
3934 0 : INIT_LIST_HEAD(&rc->dirty_subvol_roots);
3935 0 : btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
3936 0 : mapping_tree_init(&rc->reloc_root_tree);
3937 0 : extent_io_tree_init(fs_info, &rc->processed_blocks, IO_TREE_RELOC_BLOCKS);
3938 0 : return rc;
3939 : }
3940 :
3941 0 : static void free_reloc_control(struct reloc_control *rc)
3942 : {
3943 0 : struct mapping_node *node, *tmp;
3944 :
3945 0 : free_reloc_roots(&rc->reloc_roots);
3946 0 : rbtree_postorder_for_each_entry_safe(node, tmp,
3947 : &rc->reloc_root_tree.rb_root, rb_node)
3948 0 : kfree(node);
3949 :
3950 0 : kfree(rc);
3951 0 : }
3952 :
3953 : /*
3954 : * Print the block group being relocated
3955 : */
3956 0 : static void describe_relocation(struct btrfs_fs_info *fs_info,
3957 : struct btrfs_block_group *block_group)
3958 : {
3959 0 : char buf[128] = {'\0'};
3960 :
3961 0 : btrfs_describe_block_groups(block_group->flags, buf, sizeof(buf));
3962 :
3963 0 : btrfs_info(fs_info,
3964 : "relocating block group %llu flags %s",
3965 : block_group->start, buf);
3966 0 : }
3967 :
3968 : static const char *stage_to_string(int stage)
3969 : {
3970 0 : if (stage == MOVE_DATA_EXTENTS)
3971 : return "move data extents";
3972 0 : if (stage == UPDATE_DATA_PTRS)
3973 0 : return "update data pointers";
3974 : return "unknown";
3975 : }
3976 :
3977 : /*
3978 : * function to relocate all extents in a block group.
3979 : */
3980 0 : int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
3981 : {
3982 0 : struct btrfs_block_group *bg;
3983 0 : struct btrfs_root *extent_root = btrfs_extent_root(fs_info, group_start);
3984 0 : struct reloc_control *rc;
3985 0 : struct inode *inode;
3986 0 : struct btrfs_path *path;
3987 0 : int ret;
3988 0 : int rw = 0;
3989 0 : int err = 0;
3990 :
3991 : /*
3992 : * This only gets set if we had a half-deleted snapshot on mount. We
3993 : * cannot allow relocation to start while we're still trying to clean up
3994 : * these pending deletions.
3995 : */
3996 0 : ret = wait_on_bit(&fs_info->flags, BTRFS_FS_UNFINISHED_DROPS, TASK_INTERRUPTIBLE);
3997 0 : if (ret)
3998 : return ret;
3999 :
4000 : /* We may have been woken up by close_ctree, so bail if we're closing. */
4001 0 : if (btrfs_fs_closing(fs_info))
4002 : return -EINTR;
4003 :
4004 0 : bg = btrfs_lookup_block_group(fs_info, group_start);
4005 0 : if (!bg)
4006 : return -ENOENT;
4007 :
4008 : /*
4009 : * Relocation of a data block group creates ordered extents. Without
4010 : * sb_start_write(), we can freeze the filesystem while unfinished
4011 : * ordered extents are left. Such ordered extents can cause a deadlock
4012 : * e.g. when syncfs() is waiting for their completion but they can't
4013 : * finish because they block when joining a transaction, due to the
4014 : * fact that the freeze locks are being held in write mode.
4015 : */
4016 0 : if (bg->flags & BTRFS_BLOCK_GROUP_DATA)
4017 : ASSERT(sb_write_started(fs_info->sb));
4018 :
4019 0 : if (btrfs_pinned_by_swapfile(fs_info, bg)) {
4020 0 : btrfs_put_block_group(bg);
4021 0 : return -ETXTBSY;
4022 : }
4023 :
4024 0 : rc = alloc_reloc_control(fs_info);
4025 0 : if (!rc) {
4026 0 : btrfs_put_block_group(bg);
4027 0 : return -ENOMEM;
4028 : }
4029 :
4030 0 : ret = reloc_chunk_start(fs_info);
4031 0 : if (ret < 0) {
4032 0 : err = ret;
4033 0 : goto out_put_bg;
4034 : }
4035 :
4036 0 : rc->extent_root = extent_root;
4037 0 : rc->block_group = bg;
4038 :
4039 0 : ret = btrfs_inc_block_group_ro(rc->block_group, true);
4040 0 : if (ret) {
4041 0 : err = ret;
4042 0 : goto out;
4043 : }
4044 0 : rw = 1;
4045 :
4046 0 : path = btrfs_alloc_path();
4047 0 : if (!path) {
4048 0 : err = -ENOMEM;
4049 0 : goto out;
4050 : }
4051 :
4052 0 : inode = lookup_free_space_inode(rc->block_group, path);
4053 0 : btrfs_free_path(path);
4054 :
4055 0 : if (!IS_ERR(inode))
4056 0 : ret = delete_block_group_cache(fs_info, rc->block_group, inode, 0);
4057 : else
4058 0 : ret = PTR_ERR(inode);
4059 :
4060 0 : if (ret && ret != -ENOENT) {
4061 0 : err = ret;
4062 0 : goto out;
4063 : }
4064 :
4065 0 : rc->data_inode = create_reloc_inode(fs_info, rc->block_group);
4066 0 : if (IS_ERR(rc->data_inode)) {
4067 0 : err = PTR_ERR(rc->data_inode);
4068 0 : rc->data_inode = NULL;
4069 0 : goto out;
4070 : }
4071 :
4072 0 : describe_relocation(fs_info, rc->block_group);
4073 :
4074 0 : btrfs_wait_block_group_reservations(rc->block_group);
4075 0 : btrfs_wait_nocow_writers(rc->block_group);
4076 0 : btrfs_wait_ordered_roots(fs_info, U64_MAX,
4077 0 : rc->block_group->start,
4078 : rc->block_group->length);
4079 :
4080 0 : ret = btrfs_zone_finish(rc->block_group);
4081 0 : WARN_ON(ret && ret != -EAGAIN);
4082 :
4083 0 : while (1) {
4084 0 : int finishes_stage;
4085 :
4086 0 : mutex_lock(&fs_info->cleaner_mutex);
4087 0 : ret = relocate_block_group(rc);
4088 0 : mutex_unlock(&fs_info->cleaner_mutex);
4089 0 : if (ret < 0)
4090 0 : err = ret;
4091 :
4092 0 : finishes_stage = rc->stage;
4093 : /*
4094 : * We may have gotten ENOSPC after we already dirtied some
4095 : * extents. If writeout happens while we're relocating a
4096 : * different block group we could end up hitting the
4097 : * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
4098 : * btrfs_reloc_cow_block. Make sure we write everything out
4099 : * properly so we don't trip over this problem, and then break
4100 : * out of the loop if we hit an error.
4101 : */
4102 0 : if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
4103 0 : ret = btrfs_wait_ordered_range(rc->data_inode, 0,
4104 : (u64)-1);
4105 0 : if (ret)
4106 0 : err = ret;
4107 0 : invalidate_mapping_pages(rc->data_inode->i_mapping,
4108 : 0, -1);
4109 0 : rc->stage = UPDATE_DATA_PTRS;
4110 : }
4111 :
4112 0 : if (err < 0)
4113 0 : goto out;
4114 :
4115 0 : if (rc->extents_found == 0)
4116 : break;
4117 :
4118 0 : btrfs_info(fs_info, "found %llu extents, stage: %s",
4119 : rc->extents_found, stage_to_string(finishes_stage));
4120 : }
4121 :
4122 0 : WARN_ON(rc->block_group->pinned > 0);
4123 0 : WARN_ON(rc->block_group->reserved > 0);
4124 0 : WARN_ON(rc->block_group->used > 0);
4125 0 : out:
4126 0 : if (err && rw)
4127 0 : btrfs_dec_block_group_ro(rc->block_group);
4128 0 : iput(rc->data_inode);
4129 0 : out_put_bg:
4130 0 : btrfs_put_block_group(bg);
4131 0 : reloc_chunk_end(fs_info);
4132 0 : free_reloc_control(rc);
4133 0 : return err;
4134 : }
4135 :
4136 0 : static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
4137 : {
4138 0 : struct btrfs_fs_info *fs_info = root->fs_info;
4139 0 : struct btrfs_trans_handle *trans;
4140 0 : int ret, err;
4141 :
4142 0 : trans = btrfs_start_transaction(fs_info->tree_root, 0);
4143 0 : if (IS_ERR(trans))
4144 0 : return PTR_ERR(trans);
4145 :
4146 0 : memset(&root->root_item.drop_progress, 0,
4147 : sizeof(root->root_item.drop_progress));
4148 0 : btrfs_set_root_drop_level(&root->root_item, 0);
4149 0 : btrfs_set_root_refs(&root->root_item, 0);
4150 0 : ret = btrfs_update_root(trans, fs_info->tree_root,
4151 : &root->root_key, &root->root_item);
4152 :
4153 0 : err = btrfs_end_transaction(trans);
4154 0 : if (err)
4155 0 : return err;
4156 : return ret;
4157 : }
4158 :
4159 : /*
4160 : * recover relocation interrupted by system crash.
4161 : *
4162 : * this function resumes merging reloc trees with corresponding fs trees.
4163 : * this is important for keeping the sharing of tree blocks
4164 : */
4165 0 : int btrfs_recover_relocation(struct btrfs_fs_info *fs_info)
4166 : {
4167 0 : LIST_HEAD(reloc_roots);
4168 0 : struct btrfs_key key;
4169 0 : struct btrfs_root *fs_root;
4170 0 : struct btrfs_root *reloc_root;
4171 0 : struct btrfs_path *path;
4172 0 : struct extent_buffer *leaf;
4173 0 : struct reloc_control *rc = NULL;
4174 0 : struct btrfs_trans_handle *trans;
4175 0 : int ret;
4176 0 : int err = 0;
4177 :
4178 0 : path = btrfs_alloc_path();
4179 0 : if (!path)
4180 : return -ENOMEM;
4181 0 : path->reada = READA_BACK;
4182 :
4183 0 : key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4184 0 : key.type = BTRFS_ROOT_ITEM_KEY;
4185 0 : key.offset = (u64)-1;
4186 :
4187 0 : while (1) {
4188 0 : ret = btrfs_search_slot(NULL, fs_info->tree_root, &key,
4189 : path, 0, 0);
4190 0 : if (ret < 0) {
4191 0 : err = ret;
4192 0 : goto out;
4193 : }
4194 0 : if (ret > 0) {
4195 0 : if (path->slots[0] == 0)
4196 : break;
4197 0 : path->slots[0]--;
4198 : }
4199 0 : leaf = path->nodes[0];
4200 0 : btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4201 0 : btrfs_release_path(path);
4202 :
4203 0 : if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
4204 0 : key.type != BTRFS_ROOT_ITEM_KEY)
4205 : break;
4206 :
4207 0 : reloc_root = btrfs_read_tree_root(fs_info->tree_root, &key);
4208 0 : if (IS_ERR(reloc_root)) {
4209 0 : err = PTR_ERR(reloc_root);
4210 0 : goto out;
4211 : }
4212 :
4213 0 : set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
4214 0 : list_add(&reloc_root->root_list, &reloc_roots);
4215 :
4216 0 : if (btrfs_root_refs(&reloc_root->root_item) > 0) {
4217 0 : fs_root = btrfs_get_fs_root(fs_info,
4218 : reloc_root->root_key.offset, false);
4219 0 : if (IS_ERR(fs_root)) {
4220 0 : ret = PTR_ERR(fs_root);
4221 0 : if (ret != -ENOENT) {
4222 0 : err = ret;
4223 0 : goto out;
4224 : }
4225 0 : ret = mark_garbage_root(reloc_root);
4226 0 : if (ret < 0) {
4227 0 : err = ret;
4228 0 : goto out;
4229 : }
4230 : } else {
4231 0 : btrfs_put_root(fs_root);
4232 : }
4233 : }
4234 :
4235 0 : if (key.offset == 0)
4236 : break;
4237 :
4238 0 : key.offset--;
4239 : }
4240 0 : btrfs_release_path(path);
4241 :
4242 0 : if (list_empty(&reloc_roots))
4243 0 : goto out;
4244 :
4245 0 : rc = alloc_reloc_control(fs_info);
4246 0 : if (!rc) {
4247 0 : err = -ENOMEM;
4248 0 : goto out;
4249 : }
4250 :
4251 0 : ret = reloc_chunk_start(fs_info);
4252 0 : if (ret < 0) {
4253 0 : err = ret;
4254 0 : goto out_end;
4255 : }
4256 :
4257 0 : rc->extent_root = btrfs_extent_root(fs_info, 0);
4258 :
4259 0 : set_reloc_control(rc);
4260 :
4261 0 : trans = btrfs_join_transaction(rc->extent_root);
4262 0 : if (IS_ERR(trans)) {
4263 0 : err = PTR_ERR(trans);
4264 0 : goto out_unset;
4265 : }
4266 :
4267 0 : rc->merge_reloc_tree = 1;
4268 :
4269 0 : while (!list_empty(&reloc_roots)) {
4270 0 : reloc_root = list_entry(reloc_roots.next,
4271 : struct btrfs_root, root_list);
4272 0 : list_del(&reloc_root->root_list);
4273 :
4274 0 : if (btrfs_root_refs(&reloc_root->root_item) == 0) {
4275 0 : list_add_tail(&reloc_root->root_list,
4276 : &rc->reloc_roots);
4277 0 : continue;
4278 : }
4279 :
4280 0 : fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
4281 : false);
4282 0 : if (IS_ERR(fs_root)) {
4283 0 : err = PTR_ERR(fs_root);
4284 0 : list_add_tail(&reloc_root->root_list, &reloc_roots);
4285 0 : btrfs_end_transaction(trans);
4286 0 : goto out_unset;
4287 : }
4288 :
4289 0 : err = __add_reloc_root(reloc_root);
4290 0 : ASSERT(err != -EEXIST);
4291 0 : if (err) {
4292 0 : list_add_tail(&reloc_root->root_list, &reloc_roots);
4293 0 : btrfs_put_root(fs_root);
4294 0 : btrfs_end_transaction(trans);
4295 0 : goto out_unset;
4296 : }
4297 0 : fs_root->reloc_root = btrfs_grab_root(reloc_root);
4298 0 : btrfs_put_root(fs_root);
4299 : }
4300 :
4301 0 : err = btrfs_commit_transaction(trans);
4302 0 : if (err)
4303 0 : goto out_unset;
4304 :
4305 0 : merge_reloc_roots(rc);
4306 :
4307 0 : unset_reloc_control(rc);
4308 :
4309 0 : trans = btrfs_join_transaction(rc->extent_root);
4310 0 : if (IS_ERR(trans)) {
4311 0 : err = PTR_ERR(trans);
4312 0 : goto out_clean;
4313 : }
4314 0 : err = btrfs_commit_transaction(trans);
4315 0 : out_clean:
4316 0 : ret = clean_dirty_subvols(rc);
4317 0 : if (ret < 0 && !err)
4318 0 : err = ret;
4319 0 : out_unset:
4320 0 : unset_reloc_control(rc);
4321 0 : out_end:
4322 0 : reloc_chunk_end(fs_info);
4323 0 : free_reloc_control(rc);
4324 0 : out:
4325 0 : free_reloc_roots(&reloc_roots);
4326 :
4327 0 : btrfs_free_path(path);
4328 :
4329 0 : if (err == 0) {
4330 : /* cleanup orphan inode in data relocation tree */
4331 0 : fs_root = btrfs_grab_root(fs_info->data_reloc_root);
4332 0 : ASSERT(fs_root);
4333 0 : err = btrfs_orphan_cleanup(fs_root);
4334 0 : btrfs_put_root(fs_root);
4335 : }
4336 : return err;
4337 : }
4338 :
4339 : /*
4340 : * helper to add ordered checksum for data relocation.
4341 : *
4342 : * cloning checksum properly handles the nodatasum extents.
4343 : * it also saves CPU time to re-calculate the checksum.
4344 : */
4345 0 : int btrfs_reloc_clone_csums(struct btrfs_ordered_extent *ordered)
4346 : {
4347 0 : struct btrfs_inode *inode = BTRFS_I(ordered->inode);
4348 0 : struct btrfs_fs_info *fs_info = inode->root->fs_info;
4349 0 : u64 disk_bytenr = ordered->file_offset + inode->index_cnt;
4350 0 : struct btrfs_root *csum_root = btrfs_csum_root(fs_info, disk_bytenr);
4351 0 : LIST_HEAD(list);
4352 0 : int ret;
4353 :
4354 0 : ret = btrfs_lookup_csums_list(csum_root, disk_bytenr,
4355 0 : disk_bytenr + ordered->num_bytes - 1,
4356 : &list, 0, false);
4357 0 : if (ret)
4358 : return ret;
4359 :
4360 0 : while (!list_empty(&list)) {
4361 0 : struct btrfs_ordered_sum *sums =
4362 0 : list_entry(list.next, struct btrfs_ordered_sum, list);
4363 :
4364 0 : list_del_init(&sums->list);
4365 :
4366 : /*
4367 : * We need to offset the new_bytenr based on where the csum is.
4368 : * We need to do this because we will read in entire prealloc
4369 : * extents but we may have written to say the middle of the
4370 : * prealloc extent, so we need to make sure the csum goes with
4371 : * the right disk offset.
4372 : *
4373 : * We can do this because the data reloc inode refers strictly
4374 : * to the on disk bytes, so we don't have to worry about
4375 : * disk_len vs real len like with real inodes since it's all
4376 : * disk length.
4377 : */
4378 0 : sums->logical = ordered->disk_bytenr + sums->logical - disk_bytenr;
4379 0 : btrfs_add_ordered_sum(ordered, sums);
4380 : }
4381 :
4382 : return 0;
4383 : }
4384 :
4385 0 : int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
4386 : struct btrfs_root *root, struct extent_buffer *buf,
4387 : struct extent_buffer *cow)
4388 : {
4389 0 : struct btrfs_fs_info *fs_info = root->fs_info;
4390 0 : struct reloc_control *rc;
4391 0 : struct btrfs_backref_node *node;
4392 0 : int first_cow = 0;
4393 0 : int level;
4394 0 : int ret = 0;
4395 :
4396 0 : rc = fs_info->reloc_ctl;
4397 0 : if (!rc)
4398 : return 0;
4399 :
4400 0 : BUG_ON(rc->stage == UPDATE_DATA_PTRS && btrfs_is_data_reloc_root(root));
4401 :
4402 0 : level = btrfs_header_level(buf);
4403 0 : if (btrfs_header_generation(buf) <=
4404 : btrfs_root_last_snapshot(&root->root_item))
4405 0 : first_cow = 1;
4406 :
4407 0 : if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID &&
4408 : rc->create_reloc_tree) {
4409 0 : WARN_ON(!first_cow && level == 0);
4410 :
4411 0 : node = rc->backref_cache.path[level];
4412 0 : BUG_ON(node->bytenr != buf->start &&
4413 : node->new_bytenr != buf->start);
4414 :
4415 0 : btrfs_backref_drop_node_buffer(node);
4416 0 : atomic_inc(&cow->refs);
4417 0 : node->eb = cow;
4418 0 : node->new_bytenr = cow->start;
4419 :
4420 0 : if (!node->pending) {
4421 0 : list_move_tail(&node->list,
4422 : &rc->backref_cache.pending[level]);
4423 0 : node->pending = 1;
4424 : }
4425 :
4426 0 : if (first_cow)
4427 0 : mark_block_processed(rc, node);
4428 :
4429 0 : if (first_cow && level > 0)
4430 0 : rc->nodes_relocated += buf->len;
4431 : }
4432 :
4433 0 : if (level == 0 && first_cow && rc->stage == UPDATE_DATA_PTRS)
4434 0 : ret = replace_file_extents(trans, rc, root, cow);
4435 : return ret;
4436 : }
4437 :
4438 : /*
4439 : * called before creating snapshot. it calculates metadata reservation
4440 : * required for relocating tree blocks in the snapshot
4441 : */
4442 0 : void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
4443 : u64 *bytes_to_reserve)
4444 : {
4445 0 : struct btrfs_root *root = pending->root;
4446 0 : struct reloc_control *rc = root->fs_info->reloc_ctl;
4447 :
4448 0 : if (!rc || !have_reloc_root(root))
4449 0 : return;
4450 :
4451 0 : if (!rc->merge_reloc_tree)
4452 : return;
4453 :
4454 0 : root = root->reloc_root;
4455 0 : BUG_ON(btrfs_root_refs(&root->root_item) == 0);
4456 : /*
4457 : * relocation is in the stage of merging trees. the space
4458 : * used by merging a reloc tree is twice the size of
4459 : * relocated tree nodes in the worst case. half for cowing
4460 : * the reloc tree, half for cowing the fs tree. the space
4461 : * used by cowing the reloc tree will be freed after the
4462 : * tree is dropped. if we create snapshot, cowing the fs
4463 : * tree may use more space than it frees. so we need
4464 : * reserve extra space.
4465 : */
4466 0 : *bytes_to_reserve += rc->nodes_relocated;
4467 : }
4468 :
4469 : /*
4470 : * called after snapshot is created. migrate block reservation
4471 : * and create reloc root for the newly created snapshot
4472 : *
4473 : * This is similar to btrfs_init_reloc_root(), we come out of here with two
4474 : * references held on the reloc_root, one for root->reloc_root and one for
4475 : * rc->reloc_roots.
4476 : */
4477 0 : int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
4478 : struct btrfs_pending_snapshot *pending)
4479 : {
4480 0 : struct btrfs_root *root = pending->root;
4481 0 : struct btrfs_root *reloc_root;
4482 0 : struct btrfs_root *new_root;
4483 0 : struct reloc_control *rc = root->fs_info->reloc_ctl;
4484 0 : int ret;
4485 :
4486 0 : if (!rc || !have_reloc_root(root))
4487 0 : return 0;
4488 :
4489 0 : rc = root->fs_info->reloc_ctl;
4490 0 : rc->merging_rsv_size += rc->nodes_relocated;
4491 :
4492 0 : if (rc->merge_reloc_tree) {
4493 0 : ret = btrfs_block_rsv_migrate(&pending->block_rsv,
4494 : rc->block_rsv,
4495 : rc->nodes_relocated, true);
4496 0 : if (ret)
4497 : return ret;
4498 : }
4499 :
4500 0 : new_root = pending->snap;
4501 0 : reloc_root = create_reloc_root(trans, root->reloc_root,
4502 : new_root->root_key.objectid);
4503 0 : if (IS_ERR(reloc_root))
4504 0 : return PTR_ERR(reloc_root);
4505 :
4506 0 : ret = __add_reloc_root(reloc_root);
4507 0 : ASSERT(ret != -EEXIST);
4508 0 : if (ret) {
4509 : /* Pairs with create_reloc_root */
4510 0 : btrfs_put_root(reloc_root);
4511 0 : return ret;
4512 : }
4513 0 : new_root->reloc_root = btrfs_grab_root(reloc_root);
4514 :
4515 0 : if (rc->create_reloc_tree)
4516 0 : ret = clone_backref_node(trans, rc, root, reloc_root);
4517 : return ret;
4518 : }
4519 :
4520 : /*
4521 : * Get the current bytenr for the block group which is being relocated.
4522 : *
4523 : * Return U64_MAX if no running relocation.
4524 : */
4525 0 : u64 btrfs_get_reloc_bg_bytenr(struct btrfs_fs_info *fs_info)
4526 : {
4527 0 : u64 logical = U64_MAX;
4528 :
4529 0 : lockdep_assert_held(&fs_info->reloc_mutex);
4530 :
4531 0 : if (fs_info->reloc_ctl && fs_info->reloc_ctl->block_group)
4532 0 : logical = fs_info->reloc_ctl->block_group->start;
4533 0 : return logical;
4534 : }
|