Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_mount.h"
12 : #include "xfs_defer.h"
13 : #include "xfs_btree.h"
14 : #include "xfs_btree_staging.h"
15 : #include "xfs_btree_mem.h"
16 : #include "xfs_bit.h"
17 : #include "xfs_log_format.h"
18 : #include "xfs_trans.h"
19 : #include "xfs_sb.h"
20 : #include "xfs_alloc.h"
21 : #include "xfs_alloc_btree.h"
22 : #include "xfs_ialloc.h"
23 : #include "xfs_ialloc_btree.h"
24 : #include "xfs_rmap.h"
25 : #include "xfs_rmap_btree.h"
26 : #include "xfs_inode.h"
27 : #include "xfs_icache.h"
28 : #include "xfs_bmap.h"
29 : #include "xfs_bmap_btree.h"
30 : #include "xfs_refcount.h"
31 : #include "xfs_refcount_btree.h"
32 : #include "xfs_ag.h"
33 : #include "scrub/xfs_scrub.h"
34 : #include "scrub/scrub.h"
35 : #include "scrub/common.h"
36 : #include "scrub/btree.h"
37 : #include "scrub/trace.h"
38 : #include "scrub/repair.h"
39 : #include "scrub/bitmap.h"
40 : #include "scrub/xfile.h"
41 : #include "scrub/xfarray.h"
42 : #include "scrub/iscan.h"
43 : #include "scrub/newbt.h"
44 : #include "scrub/reap.h"
45 : #include "scrub/xfbtree.h"
46 :
47 : /*
48 : * Reverse Mapping Btree Repair
49 : * ============================
50 : *
51 : * This is the most involved of all the AG space btree rebuilds. Everywhere
52 : * else in XFS we lock inodes and then AG data structures, but generating the
53 : * list of rmap records requires that we be able to scan both block mapping
54 : * btrees of every inode in the filesystem to see if it owns any extents in
55 : * this AG. We can't tolerate any inode updates while we do this, so we
56 : * freeze the filesystem to lock everyone else out, and grant ourselves
57 : * special privileges to run transactions with regular background reclamation
58 : * turned off.
59 : *
60 : * We also have to be very careful not to allow inode reclaim to start a
61 : * transaction because all transactions (other than our own) will block.
62 : * Deferred inode inactivation helps us out there.
63 : *
64 : * I) Reverse mappings for all non-space metadata and file data are collected
65 : * according to the following algorithm:
66 : *
67 : * 1. For each fork of each inode:
68 : * 1.1. Create a bitmap BMBIT to track bmbt blocks if necessary.
69 : * 1.2. If the incore extent map isn't loaded, walk the bmbt to accumulate
70 : * bmaps into rmap records (see 1.1.4). Set bits in BMBIT for each btree
71 : * block.
72 : * 1.3. If the incore extent map is loaded but the fork is in btree format,
73 : * just visit the bmbt blocks to set the corresponding BMBIT areas.
74 : * 1.4. From the incore extent map, accumulate each bmap that falls into our
75 : * target AG. Remember, multiple bmap records can map to a single rmap
76 : * record, so we cannot simply emit rmap records 1:1.
77 : * 1.5. Emit rmap records for each extent in BMBIT and free it.
78 : * 2. Create bitmaps INOBIT and ICHUNKBIT.
79 : * 3. For each record in the inobt, set the corresponding areas in ICHUNKBIT,
80 : * and set bits in INOBIT for each btree block. If the inobt has no records
81 : * at all, we must be careful to record its root in INOBIT.
82 : * 4. For each block in the finobt, set the corresponding INOBIT area.
83 : * 5. Emit rmap records for each extent in INOBIT and ICHUNKBIT and free them.
84 : * 6. Create bitmaps REFCBIT and COWBIT.
85 : * 7. For each CoW staging extent in the refcountbt, set the corresponding
86 : * areas in COWBIT.
87 : * 8. For each block in the refcountbt, set the corresponding REFCBIT area.
88 : * 9. Emit rmap records for each extent in REFCBIT and COWBIT and free them.
89 : * A. Emit rmap for the AG headers.
90 : * B. Emit rmap for the log, if there is one.
91 : *
92 : * II) The rmapbt shape and space metadata rmaps are computed as follows:
93 : *
94 : * 1. Count the rmaps collected in the previous step. (= NR)
95 : * 2. Estimate the number of rmapbt blocks needed to store NR records. (= RMB)
96 : * 3. Reserve RMB blocks through the newbt using the allocator in normap mode.
97 : * 4. Create bitmap AGBIT.
98 : * 5. For each reservation in the newbt, set the corresponding areas in AGBIT.
99 : * 6. For each block in the AGFL, bnobt, and cntbt, set the bits in AGBIT.
100 : * 7. Count the extents in AGBIT. (= AGNR)
101 : * 8. Estimate the number of rmapbt blocks needed for NR + AGNR rmaps. (= RMB')
102 : * 9. If RMB' >= RMB, reserve RMB' - RMB more newbt blocks, set RMB = RMB',
103 : * and clear AGBIT. Go to step 5.
104 : * A. Emit rmaps for each extent in AGBIT.
105 : *
106 : * III) The rmapbt is constructed and set in place as follows:
107 : *
108 : * 1. Sort the rmap records.
109 : * 2. Bulk load the rmaps.
110 : *
111 : * IV) Reap the old btree blocks.
112 : *
113 : * 1. Create a bitmap OLDRMBIT.
114 : * 2. For each gap in the new rmapbt, set the corresponding areas of OLDRMBIT.
115 : * 3. For each extent in the bnobt, clear the corresponding parts of OLDRMBIT.
116 : * 4. Reap the extents corresponding to the set areas in OLDRMBIT. These are
117 : * the parts of the AG that the rmap didn't find during its scan of the
118 : * primary metadata and aren't known to be in the free space, which implies
119 : * that they were the old rmapbt blocks.
120 : * 5. Commit.
121 : *
122 : * We use the 'xrep_rmap' prefix for all the rmap functions.
123 : */
124 :
125 : /* Context for collecting rmaps */
126 : struct xrep_rmap {
127 : /* new rmapbt information */
128 : struct xrep_newbt new_btree;
129 :
130 : /* lock for the xfbtree and xfile */
131 : struct mutex lock;
132 :
133 : /* rmap records generated from primary metadata */
134 : struct xfbtree *rmap_btree;
135 :
136 : struct xfs_scrub *sc;
137 :
138 : /* in-memory btree cursor for the xfs_btree_bload iteration */
139 : struct xfs_btree_cur *mcur;
140 :
141 : /* Hooks into rmap update code. */
142 : struct xfs_rmap_hook hooks;
143 :
144 : /* inode scan cursor */
145 : struct xchk_iscan iscan;
146 :
147 : /* Number of non-freespace records found. */
148 : unsigned long long nr_records;
149 :
150 : /* bnobt/cntbt contribution to btreeblks */
151 : xfs_agblock_t freesp_btblocks;
152 :
153 : /* old agf_rmap_blocks counter */
154 : unsigned int old_rmapbt_fsbcount;
155 : };
156 :
157 : /* Set us up to repair reverse mapping btrees. */
158 : int
159 4497 : xrep_setup_ag_rmapbt(
160 : struct xfs_scrub *sc)
161 : {
162 4497 : struct xrep_rmap *rr;
163 4497 : char *descr;
164 4497 : int error;
165 :
166 4497 : xchk_fsgates_enable(sc, XCHK_FSGATES_RMAP);
167 :
168 4496 : descr = xchk_xfile_ag_descr(sc, "reverse mapping records");
169 4477 : error = xrep_setup_buftarg(sc, descr);
170 4497 : kfree(descr);
171 4497 : if (error)
172 : return error;
173 :
174 4497 : rr = kzalloc(sizeof(struct xrep_rmap), XCHK_GFP_FLAGS);
175 4497 : if (!rr)
176 : return -ENOMEM;
177 :
178 4497 : rr->sc = sc;
179 4497 : sc->buf = rr;
180 4497 : return 0;
181 : }
182 :
183 : /* Make sure there's nothing funny about this mapping. */
184 : STATIC int
185 10842968 : xrep_rmap_check_mapping(
186 : struct xfs_scrub *sc,
187 : const struct xfs_rmap_irec *rec)
188 : {
189 10842968 : enum xbtree_recpacking outcome;
190 10842968 : int error;
191 :
192 10842968 : if (xfs_rmap_check_perag_irec(sc->sa.pag, rec) != NULL)
193 : return -EFSCORRUPTED;
194 :
195 : /* Make sure this isn't free space. */
196 10842985 : error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock,
197 10842985 : rec->rm_blockcount, &outcome);
198 10842962 : if (error)
199 : return error;
200 10842962 : if (outcome != XBTREE_RECPACKING_EMPTY)
201 0 : return -EFSCORRUPTED;
202 :
203 : return 0;
204 : }
205 :
206 : /* Store a reverse-mapping record. */
207 : static inline int
208 10996024 : xrep_rmap_stash(
209 : struct xrep_rmap *rr,
210 : xfs_agblock_t startblock,
211 : xfs_extlen_t blockcount,
212 : uint64_t owner,
213 : uint64_t offset,
214 : unsigned int flags)
215 : {
216 10996024 : struct xfs_rmap_irec rmap = {
217 : .rm_startblock = startblock,
218 : .rm_blockcount = blockcount,
219 : .rm_owner = owner,
220 : .rm_offset = offset,
221 : .rm_flags = flags,
222 : };
223 10996024 : struct xfs_scrub *sc = rr->sc;
224 10996024 : struct xfs_btree_cur *mcur;
225 10996024 : struct xfs_buf *mhead_bp;
226 10996024 : int error = 0;
227 :
228 10996024 : if (xchk_should_terminate(sc, &error))
229 1 : return error;
230 :
231 10996036 : if (xchk_iscan_aborted(&rr->iscan))
232 : return -EFSCORRUPTED;
233 :
234 10996037 : trace_xrep_rmap_found(sc->mp, sc->sa.pag->pag_agno, &rmap);
235 :
236 10996041 : mutex_lock(&rr->lock);
237 10996048 : error = xfbtree_head_read_buf(rr->rmap_btree, sc->tp, &mhead_bp);
238 10996069 : if (error)
239 0 : goto out_abort;
240 :
241 10996069 : mcur = xfs_rmapbt_mem_cursor(sc->sa.pag, sc->tp, mhead_bp,
242 : rr->rmap_btree);
243 10996067 : error = xfs_rmap_map_raw(mcur, &rmap);
244 10995994 : xfs_btree_del_cursor(mcur, error);
245 10996054 : if (error)
246 0 : goto out_cancel;
247 :
248 10996054 : error = xfbtree_trans_commit(rr->rmap_btree, sc->tp);
249 10996077 : if (error)
250 0 : goto out_abort;
251 :
252 10996077 : mutex_unlock(&rr->lock);
253 10996077 : return 0;
254 :
255 : out_cancel:
256 0 : xfbtree_trans_cancel(rr->rmap_btree, sc->tp);
257 0 : out_abort:
258 0 : xchk_iscan_abort(&rr->iscan);
259 0 : mutex_unlock(&rr->lock);
260 0 : return error;
261 : }
262 :
263 : struct xrep_rmap_stash_run {
264 : struct xrep_rmap *rr;
265 : uint64_t owner;
266 : unsigned int rmap_flags;
267 : };
268 :
269 : static int
270 338087 : xrep_rmap_stash_run(
271 : uint64_t start,
272 : uint64_t len,
273 : void *priv)
274 : {
275 338087 : struct xrep_rmap_stash_run *rsr = priv;
276 338087 : struct xrep_rmap *rr = rsr->rr;
277 :
278 338087 : return xrep_rmap_stash(rr, start, len, rsr->owner, 0, rsr->rmap_flags);
279 : }
280 :
281 : /*
282 : * Emit rmaps for every extent of bits set in the bitmap. Caller must ensure
283 : * that the ranges are in units of FS blocks.
284 : */
285 : STATIC int
286 839878 : xrep_rmap_stash_bitmap(
287 : struct xrep_rmap *rr,
288 : struct xagb_bitmap *bitmap,
289 : const struct xfs_owner_info *oinfo)
290 : {
291 839878 : struct xrep_rmap_stash_run rsr = {
292 : .rr = rr,
293 839878 : .owner = oinfo->oi_owner,
294 : .rmap_flags = 0,
295 : };
296 :
297 839878 : if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
298 0 : rsr.rmap_flags |= XFS_RMAP_ATTR_FORK;
299 839878 : if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
300 817560 : rsr.rmap_flags |= XFS_RMAP_BMBT_BLOCK;
301 :
302 839878 : return xagb_bitmap_walk(bitmap, xrep_rmap_stash_run, &rsr);
303 : }
304 :
305 : /* Section (I): Finding all file and bmbt extents. */
306 :
307 : /* Context for accumulating rmaps for an inode fork. */
308 : struct xrep_rmap_ifork {
309 : /*
310 : * Accumulate rmap data here to turn multiple adjacent bmaps into a
311 : * single rmap.
312 : */
313 : struct xfs_rmap_irec accum;
314 :
315 : /* Bitmap of bmbt blocks in this AG. */
316 : struct xagb_bitmap bmbt_blocks;
317 :
318 : struct xrep_rmap *rr;
319 :
320 : /* Which inode fork? */
321 : int whichfork;
322 : };
323 :
324 : /* Stash an rmap that we accumulated while walking an inode fork. */
325 : STATIC int
326 18973386 : xrep_rmap_stash_accumulated(
327 : struct xrep_rmap_ifork *rf)
328 : {
329 18973386 : if (rf->accum.rm_blockcount == 0)
330 : return 0;
331 :
332 10652373 : return xrep_rmap_stash(rf->rr, rf->accum.rm_startblock,
333 : rf->accum.rm_blockcount, rf->accum.rm_owner,
334 : rf->accum.rm_offset, rf->accum.rm_flags);
335 : }
336 :
337 : /* Accumulate a bmbt record. */
338 : STATIC int
339 42618914 : xrep_rmap_visit_bmbt(
340 : struct xfs_btree_cur *cur,
341 : struct xfs_bmbt_irec *rec,
342 : void *priv)
343 : {
344 42618914 : struct xrep_rmap_ifork *rf = priv;
345 42618914 : struct xfs_mount *mp = rf->rr->sc->mp;
346 42618914 : struct xfs_rmap_irec *accum = &rf->accum;
347 42618914 : xfs_agblock_t agbno;
348 42618914 : unsigned int rmap_flags = 0;
349 42618914 : int error;
350 :
351 42618914 : if (XFS_FSB_TO_AGNO(mp, rec->br_startblock) !=
352 42618914 : rf->rr->sc->sa.pag->pag_agno)
353 : return 0;
354 :
355 10652393 : agbno = XFS_FSB_TO_AGBNO(mp, rec->br_startblock);
356 10652393 : if (rf->whichfork == XFS_ATTR_FORK)
357 21495 : rmap_flags |= XFS_RMAP_ATTR_FORK;
358 10652393 : if (rec->br_state == XFS_EXT_UNWRITTEN)
359 2657741 : rmap_flags |= XFS_RMAP_UNWRITTEN;
360 :
361 : /* If this bmap is adjacent to the previous one, just add it. */
362 10652393 : if (accum->rm_blockcount > 0 &&
363 8720582 : rec->br_startoff == accum->rm_offset + accum->rm_blockcount &&
364 1592423 : agbno == accum->rm_startblock + accum->rm_blockcount &&
365 1004584 : rmap_flags == accum->rm_flags) {
366 0 : accum->rm_blockcount += rec->br_blockcount;
367 0 : return 0;
368 : }
369 :
370 : /* Otherwise stash the old rmap and start accumulating a new one. */
371 10652393 : error = xrep_rmap_stash_accumulated(rf);
372 10652358 : if (error)
373 : return error;
374 :
375 10652357 : accum->rm_startblock = agbno;
376 10652357 : accum->rm_blockcount = rec->br_blockcount;
377 10652357 : accum->rm_offset = rec->br_startoff;
378 10652357 : accum->rm_flags = rmap_flags;
379 10652357 : return 0;
380 : }
381 :
382 : /* Add a btree block to the bitmap. */
383 : STATIC int
384 1659585 : xrep_rmap_visit_iroot_btree_block(
385 : struct xfs_btree_cur *cur,
386 : int level,
387 : void *priv)
388 : {
389 1659585 : struct xrep_rmap_ifork *rf = priv;
390 1659585 : struct xfs_buf *bp;
391 1659585 : xfs_fsblock_t fsbno;
392 1659585 : xfs_agblock_t agbno;
393 :
394 1659585 : xfs_btree_get_block(cur, level, &bp);
395 1659585 : if (!bp)
396 : return 0;
397 :
398 842025 : fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
399 842025 : if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != rf->rr->sc->sa.pag->pag_agno)
400 : return 0;
401 :
402 210652 : agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
403 210652 : return xagb_bitmap_set(&rf->bmbt_blocks, agbno, 1);
404 : }
405 :
406 : /*
407 : * Iterate a metadata btree rooted in an inode to collect rmap records for
408 : * anything in this fork that matches the AG.
409 : */
410 : STATIC int
411 817560 : xrep_rmap_scan_iroot_btree(
412 : struct xrep_rmap_ifork *rf,
413 : struct xfs_btree_cur *cur)
414 : {
415 817560 : struct xfs_owner_info oinfo;
416 817560 : struct xrep_rmap *rr = rf->rr;
417 817560 : int error;
418 :
419 817560 : xagb_bitmap_init(&rf->bmbt_blocks);
420 :
421 : /* Record all the blocks in the btree itself. */
422 817560 : error = xfs_btree_visit_blocks(cur, xrep_rmap_visit_iroot_btree_block,
423 : XFS_BTREE_VISIT_ALL, rf);
424 817559 : if (error)
425 0 : goto out;
426 :
427 : /* Emit rmaps for the btree blocks. */
428 817559 : xfs_rmap_ino_bmbt_owner(&oinfo, rf->accum.rm_owner, rf->whichfork);
429 817559 : error = xrep_rmap_stash_bitmap(rr, &rf->bmbt_blocks, &oinfo);
430 817560 : if (error)
431 0 : goto out;
432 :
433 : /* Stash any remaining accumulated rmaps. */
434 817560 : error = xrep_rmap_stash_accumulated(rf);
435 817560 : out:
436 817560 : xagb_bitmap_destroy(&rf->bmbt_blocks);
437 817559 : return error;
438 : }
439 :
440 : static inline bool
441 : is_rt_data_fork(
442 : struct xfs_inode *ip,
443 : int whichfork)
444 : {
445 : return XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK;
446 : }
447 :
448 : /*
449 : * Iterate the block mapping btree to collect rmap records for anything in this
450 : * fork that matches the AG. Sets @mappings_done to true if we've scanned the
451 : * block mappings in this fork.
452 : */
453 : STATIC int
454 817560 : xrep_rmap_scan_bmbt(
455 : struct xrep_rmap_ifork *rf,
456 : struct xfs_inode *ip,
457 : bool *mappings_done)
458 : {
459 817560 : struct xrep_rmap *rr = rf->rr;
460 817560 : struct xfs_btree_cur *cur;
461 817560 : struct xfs_ifork *ifp;
462 817560 : int error;
463 :
464 817560 : *mappings_done = false;
465 817560 : ifp = xfs_ifork_ptr(ip, rf->whichfork);
466 817560 : cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, rf->whichfork);
467 :
468 1635118 : if (!xfs_ifork_is_realtime(ip, rf->whichfork) &&
469 : xfs_need_iread_extents(ifp)) {
470 : /*
471 : * If the incore extent cache isn't loaded, scan the bmbt for
472 : * mapping records. This avoids loading the incore extent
473 : * tree, which will increase memory pressure at a time when
474 : * we're trying to run as quickly as we possibly can. Ignore
475 : * realtime extents.
476 : */
477 37458 : error = xfs_bmap_query_all(cur, xrep_rmap_visit_bmbt, rf);
478 37458 : if (error)
479 0 : goto out_cur;
480 :
481 37458 : *mappings_done = true;
482 : }
483 :
484 : /* Scan for the bmbt blocks, which always live on the data device. */
485 817559 : error = xrep_rmap_scan_iroot_btree(rf, cur);
486 817559 : out_cur:
487 817559 : xfs_btree_del_cursor(cur, error);
488 817560 : return error;
489 : }
490 :
491 : /*
492 : * Iterate the in-core extent cache to collect rmap records for anything in
493 : * this fork that matches the AG.
494 : */
495 : STATIC int
496 7503619 : xrep_rmap_scan_iext(
497 : struct xrep_rmap_ifork *rf,
498 : struct xfs_ifork *ifp)
499 : {
500 7503619 : struct xfs_bmbt_irec rec;
501 7503619 : struct xfs_iext_cursor icur;
502 7503619 : int error;
503 :
504 49447744 : for_each_xfs_iext(ifp, &icur, &rec) {
505 41943957 : if (isnullstartblock(rec.br_startblock))
506 12543 : continue;
507 41931414 : error = xrep_rmap_visit_bmbt(NULL, &rec, rf);
508 41931583 : if (error)
509 1 : return error;
510 : }
511 :
512 7503588 : return xrep_rmap_stash_accumulated(rf);
513 : }
514 :
515 : /* Find all the extents from a given AG in an inode fork. */
516 : STATIC int
517 34811909 : xrep_rmap_scan_ifork(
518 : struct xrep_rmap *rr,
519 : struct xfs_inode *ip,
520 : int whichfork)
521 : {
522 34811909 : struct xrep_rmap_ifork rf = {
523 34811909 : .accum = { .rm_owner = ip->i_ino, },
524 : .rr = rr,
525 : .whichfork = whichfork,
526 : };
527 34811909 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
528 34811915 : int error = 0;
529 :
530 34811915 : if (!ifp)
531 : return 0;
532 :
533 34798502 : if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
534 817560 : bool mappings_done;
535 :
536 : /*
537 : * Scan the bmap btree for data device mappings. This includes
538 : * the btree blocks themselves, even if this is a realtime
539 : * file.
540 : */
541 817560 : error = xrep_rmap_scan_bmbt(&rf, ip, &mappings_done);
542 817560 : if (error || mappings_done)
543 37458 : return error;
544 33980942 : } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) {
545 : return 0;
546 : }
547 :
548 : /* Scan incore extent cache if this isn't a realtime file. */
549 7503625 : if (xfs_ifork_is_realtime(ip, whichfork))
550 : return 0;
551 :
552 7503612 : return xrep_rmap_scan_iext(&rf, ifp);
553 : }
554 :
555 : /*
556 : * Take ILOCK on a file that we want to scan.
557 : *
558 : * Select ILOCK_EXCL if the file has an unloaded data bmbt or has an unloaded
559 : * attr bmbt. Otherwise, take ILOCK_SHARED.
560 : */
561 : static inline unsigned int
562 17405927 : xrep_rmap_scan_ilock(
563 : struct xfs_inode *ip)
564 : {
565 17405927 : uint lock_mode = XFS_ILOCK_SHARED;
566 :
567 17405927 : if (xfs_need_iread_extents(&ip->i_df)) {
568 37459 : lock_mode = XFS_ILOCK_EXCL;
569 37459 : goto lock;
570 : }
571 :
572 34724077 : if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
573 0 : lock_mode = XFS_ILOCK_EXCL;
574 :
575 17368746 : lock:
576 17406205 : xfs_ilock(ip, lock_mode);
577 17406219 : return lock_mode;
578 : }
579 :
580 : /* Record reverse mappings for a file. */
581 : STATIC int
582 17405885 : xrep_rmap_scan_inode(
583 : struct xrep_rmap *rr,
584 : struct xfs_inode *ip)
585 : {
586 17405885 : unsigned int lock_mode = xrep_rmap_scan_ilock(ip);
587 17406228 : int error;
588 :
589 : /* Check the data fork. */
590 17406228 : error = xrep_rmap_scan_ifork(rr, ip, XFS_DATA_FORK);
591 17406054 : if (error)
592 1 : goto out_unlock;
593 :
594 : /* Check the attr fork. */
595 17406053 : error = xrep_rmap_scan_ifork(rr, ip, XFS_ATTR_FORK);
596 17406161 : if (error)
597 0 : goto out_unlock;
598 :
599 : /* COW fork extents are "owned" by the refcount btree. */
600 :
601 17406161 : xchk_iscan_mark_visited(&rr->iscan, ip);
602 17405923 : out_unlock:
603 17405923 : xfs_iunlock(ip, lock_mode);
604 17406324 : return error;
605 : }
606 :
607 : /* Section (I): Find all AG metadata extents except for free space metadata. */
608 :
609 : struct xrep_rmap_inodes {
610 : struct xrep_rmap *rr;
611 : struct xagb_bitmap inobt_blocks; /* INOBIT */
612 : struct xagb_bitmap ichunk_blocks; /* ICHUNKBIT */
613 : };
614 :
615 : /* Record inode btree rmaps. */
616 : STATIC int
617 91203 : xrep_rmap_walk_inobt(
618 : struct xfs_btree_cur *cur,
619 : const union xfs_btree_rec *rec,
620 : void *priv)
621 : {
622 91203 : struct xfs_inobt_rec_incore irec;
623 91203 : struct xrep_rmap_inodes *ri = priv;
624 91203 : struct xfs_mount *mp = cur->bc_mp;
625 91203 : xfs_agblock_t agbno;
626 91203 : xfs_agino_t agino;
627 91203 : xfs_agino_t iperhole;
628 91203 : unsigned int i;
629 91203 : int error;
630 :
631 : /* Record the inobt blocks. */
632 91203 : error = xagb_bitmap_set_btcur_path(&ri->inobt_blocks, cur);
633 91203 : if (error)
634 : return error;
635 :
636 91203 : xfs_inobt_btrec_to_irec(mp, rec, &irec);
637 91203 : if (xfs_inobt_check_irec(cur, &irec) != NULL)
638 : return -EFSCORRUPTED;
639 :
640 91203 : agino = irec.ir_startino;
641 :
642 : /* Record a non-sparse inode chunk. */
643 91203 : if (!xfs_inobt_issparse(irec.ir_holemask)) {
644 46635 : agbno = XFS_AGINO_TO_AGBNO(mp, agino);
645 :
646 46635 : return xagb_bitmap_set(&ri->ichunk_blocks, agbno,
647 46635 : XFS_INODES_PER_CHUNK / mp->m_sb.sb_inopblock);
648 : }
649 :
650 : /* Iterate each chunk. */
651 44568 : iperhole = max_t(xfs_agino_t, mp->m_sb.sb_inopblock,
652 : XFS_INODES_PER_HOLEMASK_BIT);
653 44568 : for (i = 0, agino = irec.ir_startino;
654 391716 : i < XFS_INOBT_HOLEMASK_BITS;
655 347148 : i += iperhole / XFS_INODES_PER_HOLEMASK_BIT, agino += iperhole) {
656 : /* Skip holes. */
657 347148 : if (irec.ir_holemask & (1 << i))
658 173574 : continue;
659 :
660 : /* Record the inode chunk otherwise. */
661 173574 : agbno = XFS_AGINO_TO_AGBNO(mp, agino);
662 173574 : error = xagb_bitmap_set(&ri->ichunk_blocks, agbno,
663 173574 : iperhole / mp->m_sb.sb_inopblock);
664 173574 : if (error)
665 0 : return error;
666 : }
667 :
668 : return 0;
669 : }
670 :
671 : /* Collect rmaps for the blocks containing inode btrees and the inode chunks. */
672 : STATIC int
673 4470 : xrep_rmap_find_inode_rmaps(
674 : struct xrep_rmap *rr)
675 : {
676 4470 : struct xrep_rmap_inodes ri = {
677 : .rr = rr,
678 : };
679 4470 : struct xfs_scrub *sc = rr->sc;
680 4470 : int error;
681 :
682 4470 : xagb_bitmap_init(&ri.inobt_blocks);
683 4469 : xagb_bitmap_init(&ri.ichunk_blocks);
684 :
685 : /*
686 : * Iterate every record in the inobt so we can capture all the inode
687 : * chunks and the blocks in the inobt itself.
688 : */
689 4466 : error = xfs_btree_query_all(sc->sa.ino_cur, xrep_rmap_walk_inobt, &ri);
690 4470 : if (error)
691 0 : goto out_bitmap;
692 :
693 : /*
694 : * Note that if there are zero records in the inobt then query_all does
695 : * nothing and we have to account the empty inobt root manually.
696 : */
697 4470 : if (xagb_bitmap_empty(&ri.ichunk_blocks)) {
698 2432 : struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
699 :
700 2432 : error = xagb_bitmap_set(&ri.inobt_blocks,
701 2432 : be32_to_cpu(agi->agi_root), 1);
702 2430 : if (error)
703 0 : goto out_bitmap;
704 : }
705 :
706 : /* Scan the finobt too. */
707 4468 : if (xfs_has_finobt(sc->mp)) {
708 4468 : error = xagb_bitmap_set_btblocks(&ri.inobt_blocks,
709 : sc->sa.fino_cur);
710 4471 : if (error)
711 0 : goto out_bitmap;
712 : }
713 :
714 : /* Generate rmaps for everything. */
715 4471 : error = xrep_rmap_stash_bitmap(rr, &ri.inobt_blocks,
716 : &XFS_RMAP_OINFO_INOBT);
717 4470 : if (error)
718 0 : goto out_bitmap;
719 4470 : error = xrep_rmap_stash_bitmap(rr, &ri.ichunk_blocks,
720 : &XFS_RMAP_OINFO_INODES);
721 :
722 4471 : out_bitmap:
723 4471 : xagb_bitmap_destroy(&ri.inobt_blocks);
724 4468 : xagb_bitmap_destroy(&ri.ichunk_blocks);
725 4468 : return error;
726 : }
727 :
728 : /* Record a CoW staging extent. */
729 : STATIC int
730 3697 : xrep_rmap_walk_cowblocks(
731 : struct xfs_btree_cur *cur,
732 : const struct xfs_refcount_irec *irec,
733 : void *priv)
734 : {
735 3697 : struct xagb_bitmap *bitmap = priv;
736 :
737 3697 : if (!xfs_refcount_check_domain(irec) ||
738 3697 : irec->rc_domain != XFS_REFC_DOMAIN_COW)
739 : return -EFSCORRUPTED;
740 :
741 3697 : return xagb_bitmap_set(bitmap, irec->rc_startblock, irec->rc_blockcount);
742 : }
743 :
744 : /*
745 : * Collect rmaps for the blocks containing the refcount btree, and all CoW
746 : * staging extents.
747 : */
748 : STATIC int
749 4468 : xrep_rmap_find_refcount_rmaps(
750 : struct xrep_rmap *rr)
751 : {
752 4468 : struct xagb_bitmap refcountbt_blocks; /* REFCBIT */
753 4468 : struct xagb_bitmap cow_blocks; /* COWBIT */
754 4468 : struct xfs_refcount_irec low = {
755 : .rc_startblock = 0,
756 : .rc_domain = XFS_REFC_DOMAIN_COW,
757 : };
758 4468 : struct xfs_refcount_irec high = {
759 : .rc_startblock = -1U,
760 : .rc_domain = XFS_REFC_DOMAIN_COW,
761 : };
762 4468 : struct xfs_scrub *sc = rr->sc;
763 4468 : int error;
764 :
765 4468 : if (!xfs_has_reflink(sc->mp))
766 : return 0;
767 :
768 4468 : xagb_bitmap_init(&refcountbt_blocks);
769 4470 : xagb_bitmap_init(&cow_blocks);
770 :
771 : /* refcountbt */
772 4469 : error = xagb_bitmap_set_btblocks(&refcountbt_blocks, sc->sa.refc_cur);
773 4471 : if (error)
774 0 : goto out_bitmap;
775 :
776 : /* Collect rmaps for CoW staging extents. */
777 4471 : error = xfs_refcount_query_range(sc->sa.refc_cur, &low, &high,
778 : xrep_rmap_walk_cowblocks, &cow_blocks);
779 4468 : if (error)
780 0 : goto out_bitmap;
781 :
782 : /* Generate rmaps for everything. */
783 4468 : error = xrep_rmap_stash_bitmap(rr, &cow_blocks, &XFS_RMAP_OINFO_COW);
784 4470 : if (error)
785 0 : goto out_bitmap;
786 4470 : error = xrep_rmap_stash_bitmap(rr, &refcountbt_blocks,
787 : &XFS_RMAP_OINFO_REFC);
788 :
789 4471 : out_bitmap:
790 4471 : xagb_bitmap_destroy(&cow_blocks);
791 4471 : xagb_bitmap_destroy(&refcountbt_blocks);
792 4471 : return error;
793 : }
794 :
795 : /* Generate rmaps for the AG headers (AGI/AGF/AGFL) */
796 : STATIC int
797 4471 : xrep_rmap_find_agheader_rmaps(
798 : struct xrep_rmap *rr)
799 : {
800 4471 : struct xfs_scrub *sc = rr->sc;
801 :
802 : /* Create a record for the AG sb->agfl. */
803 8942 : return xrep_rmap_stash(rr, XFS_SB_BLOCK(sc->mp),
804 4471 : XFS_AGFL_BLOCK(sc->mp) - XFS_SB_BLOCK(sc->mp) + 1,
805 : XFS_RMAP_OWN_FS, 0, 0);
806 : }
807 :
808 : /* Generate rmaps for the log, if it's in this AG. */
809 : STATIC int
810 4471 : xrep_rmap_find_log_rmaps(
811 : struct xrep_rmap *rr)
812 : {
813 4471 : struct xfs_scrub *sc = rr->sc;
814 :
815 8942 : if (!xfs_ag_contains_log(sc->mp, sc->sa.pag->pag_agno))
816 : return 0;
817 :
818 1114 : return xrep_rmap_stash(rr,
819 1114 : XFS_FSB_TO_AGBNO(sc->mp, sc->mp->m_sb.sb_logstart),
820 : sc->mp->m_sb.sb_logblocks, XFS_RMAP_OWN_LOG, 0, 0);
821 : }
822 :
823 : /* Check and count all the records that we gathered. */
824 : STATIC int
825 10842966 : xrep_rmap_check_record(
826 : struct xfs_btree_cur *cur,
827 : const struct xfs_rmap_irec *rec,
828 : void *priv)
829 : {
830 10842966 : struct xrep_rmap *rr = priv;
831 10842966 : int error;
832 :
833 10842966 : error = xrep_rmap_check_mapping(rr->sc, rec);
834 10842974 : if (error)
835 : return error;
836 :
837 10842974 : rr->nr_records++;
838 10842974 : return 0;
839 : }
840 :
841 : /*
842 : * Generate all the reverse-mappings for this AG, a list of the old rmapbt
843 : * blocks, and the new btreeblks count. Figure out if we have enough free
844 : * space to reconstruct the inode btrees. The caller must clean up the lists
845 : * if anything goes wrong. This implements section (I) above.
846 : */
847 : STATIC int
848 4465 : xrep_rmap_find_rmaps(
849 : struct xrep_rmap *rr)
850 : {
851 4465 : struct xfs_scrub *sc = rr->sc;
852 4465 : struct xchk_ag *sa = &sc->sa;
853 4465 : struct xfs_inode *ip;
854 4465 : struct xfs_buf *mhead_bp;
855 4465 : struct xfs_btree_cur *mcur;
856 4465 : int error;
857 :
858 : /* Find all the per-AG metadata. */
859 4465 : xrep_ag_btcur_init(sc, &sc->sa);
860 :
861 4463 : error = xrep_rmap_find_inode_rmaps(rr);
862 4471 : if (error)
863 0 : goto end_agscan;
864 :
865 4471 : error = xrep_rmap_find_refcount_rmaps(rr);
866 4471 : if (error)
867 0 : goto end_agscan;
868 :
869 4471 : error = xrep_rmap_find_agheader_rmaps(rr);
870 4471 : if (error)
871 0 : goto end_agscan;
872 :
873 4471 : error = xrep_rmap_find_log_rmaps(rr);
874 4471 : end_agscan:
875 4471 : xchk_ag_btcur_free(&sc->sa);
876 4471 : if (error)
877 : return error;
878 :
879 : /*
880 : * Set up for a potentially lengthy filesystem scan by reducing our
881 : * transaction resource usage for the duration. Specifically:
882 : *
883 : * Unlock the AG header buffers and cancel the transaction to release
884 : * the log grant space while we scan the filesystem.
885 : *
886 : * Create a new empty transaction to eliminate the possibility of the
887 : * inode scan deadlocking on cyclical metadata.
888 : *
889 : * We pass the empty transaction to the file scanning function to avoid
890 : * repeatedly cycling empty transactions. This can be done even though
891 : * we take the IOLOCK to quiesce the file because empty transactions
892 : * do not take sb_internal.
893 : */
894 4471 : sa->agf_bp = NULL;
895 4471 : sa->agi_bp = NULL;
896 4471 : xchk_trans_cancel(sc);
897 4471 : error = xchk_trans_alloc_empty(sc);
898 4471 : if (error)
899 : return error;
900 :
901 : /* Iterate all AGs for inodes rmaps. */
902 17410740 : while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) {
903 17405916 : error = xrep_rmap_scan_inode(rr, ip);
904 17406370 : xchk_irele(sc, ip);
905 17406271 : if (error)
906 : break;
907 :
908 17406270 : if (xchk_should_terminate(sc, &error))
909 : break;
910 : }
911 4471 : xchk_iscan_iter_finish(&rr->iscan);
912 4471 : if (error)
913 : return error;
914 :
915 : /*
916 : * Switch out for a real transaction and lock the AG headers in
917 : * preparation for building a new tree.
918 : */
919 4470 : xchk_trans_cancel(sc);
920 4470 : error = xchk_setup_fs(sc);
921 4470 : if (error)
922 : return error;
923 4470 : error = xchk_perag_drain_and_lock(sc);
924 4470 : if (error)
925 : return error;
926 :
927 : /*
928 : * If a hook failed to update the in-memory btree, we lack the data to
929 : * continue the repair.
930 : */
931 4449 : if (xchk_iscan_aborted(&rr->iscan))
932 : return -EFSCORRUPTED;
933 :
934 : /*
935 : * Now that we have everything locked again, we need to count the
936 : * number of rmap records stashed in the btree. This should reflect
937 : * all actively-owned space in the filesystem. At the same time, check
938 : * all our records before we start building a new btree, which requires
939 : * a bnobt cursor.
940 : */
941 4449 : error = xfbtree_head_read_buf(rr->rmap_btree, NULL, &mhead_bp);
942 4449 : if (error)
943 : return error;
944 :
945 4449 : mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, mhead_bp,
946 : rr->rmap_btree);
947 4449 : sc->sa.bno_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
948 : sc->sa.pag, XFS_BTNUM_BNO);
949 :
950 4449 : rr->nr_records = 0;
951 4449 : error = xfs_rmap_query_all(mcur, xrep_rmap_check_record, rr);
952 :
953 4449 : xfs_btree_del_cursor(sc->sa.bno_cur, error);
954 4449 : sc->sa.bno_cur = NULL;
955 4449 : xfs_btree_del_cursor(mcur, error);
956 4449 : xfs_buf_relse(mhead_bp);
957 :
958 4449 : return error;
959 : }
960 :
961 : /* Section (II): Reserving space for new rmapbt and setting free space bitmap */
962 :
963 : struct xrep_rmap_agfl {
964 : struct xagb_bitmap *bitmap;
965 : xfs_agnumber_t agno;
966 : };
967 :
968 : /* Add an AGFL block to the rmap list. */
969 : STATIC int
970 29637 : xrep_rmap_walk_agfl(
971 : struct xfs_mount *mp,
972 : xfs_agblock_t agbno,
973 : void *priv)
974 : {
975 29637 : struct xrep_rmap_agfl *ra = priv;
976 :
977 29637 : return xagb_bitmap_set(ra->bitmap, agbno, 1);
978 : }
979 :
980 : /*
981 : * Run one round of reserving space for the new rmapbt and recomputing the
982 : * number of blocks needed to store the previously observed rmapbt records and
983 : * the ones we'll create for the free space metadata. When we don't need more
984 : * blocks, return a bitmap of OWN_AG extents in @freesp_blocks and set @done to
985 : * true.
986 : */
987 : STATIC int
988 4551 : xrep_rmap_try_reserve(
989 : struct xrep_rmap *rr,
990 : struct xfs_btree_cur *rmap_cur,
991 : struct xagb_bitmap *freesp_blocks,
992 : uint64_t *blocks_reserved,
993 : bool *done)
994 : {
995 4551 : struct xrep_rmap_agfl ra = {
996 : .bitmap = freesp_blocks,
997 4551 : .agno = rr->sc->sa.pag->pag_agno,
998 : };
999 4551 : struct xfs_scrub *sc = rr->sc;
1000 4551 : struct xrep_newbt_resv *resv, *n;
1001 4551 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1002 4551 : struct xfs_buf *agfl_bp;
1003 4551 : uint64_t nr_blocks; /* RMB */
1004 4551 : uint64_t freesp_records;
1005 4551 : int error;
1006 :
1007 : /*
1008 : * We're going to recompute new_btree.bload.nr_blocks at the end of
1009 : * this function to reflect however many btree blocks we need to store
1010 : * all the rmap records (including the ones that reflect the changes we
1011 : * made to support the new rmapbt blocks), so we save the old value
1012 : * here so we can decide if we've reserved enough blocks.
1013 : */
1014 4551 : nr_blocks = rr->new_btree.bload.nr_blocks;
1015 :
1016 : /*
1017 : * Make sure we've reserved enough space for the new btree. This can
1018 : * change the shape of the free space btrees, which can cause secondary
1019 : * interactions with the rmap records because all three space btrees
1020 : * have the same rmap owner. We'll account for all that below.
1021 : */
1022 4551 : error = xrep_newbt_alloc_blocks(&rr->new_btree,
1023 4551 : nr_blocks - *blocks_reserved);
1024 4551 : if (error)
1025 : return error;
1026 :
1027 4551 : *blocks_reserved = rr->new_btree.bload.nr_blocks;
1028 :
1029 : /* Clear everything in the bitmap. */
1030 4551 : xagb_bitmap_destroy(freesp_blocks);
1031 :
1032 : /* Set all the bnobt blocks in the bitmap. */
1033 4550 : sc->sa.bno_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
1034 : sc->sa.pag, XFS_BTNUM_BNO);
1035 4552 : error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.bno_cur);
1036 4552 : xfs_btree_del_cursor(sc->sa.bno_cur, error);
1037 4553 : sc->sa.bno_cur = NULL;
1038 4553 : if (error)
1039 : return error;
1040 :
1041 : /* Set all the cntbt blocks in the bitmap. */
1042 4553 : sc->sa.cnt_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
1043 : sc->sa.pag, XFS_BTNUM_CNT);
1044 4553 : error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.cnt_cur);
1045 4552 : xfs_btree_del_cursor(sc->sa.cnt_cur, error);
1046 4553 : sc->sa.cnt_cur = NULL;
1047 4553 : if (error)
1048 : return error;
1049 :
1050 : /* Record our new btreeblks value. */
1051 4553 : rr->freesp_btblocks = xagb_bitmap_hweight(freesp_blocks) - 2;
1052 :
1053 : /* Set all the new rmapbt blocks in the bitmap. */
1054 10242 : for_each_xrep_newbt_reservation(&rr->new_btree, resv, n) {
1055 5689 : error = xagb_bitmap_set(freesp_blocks, resv->agbno, resv->len);
1056 5689 : if (error)
1057 0 : return error;
1058 : }
1059 :
1060 : /* Set all the AGFL blocks in the bitmap. */
1061 4553 : error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
1062 4553 : if (error)
1063 : return error;
1064 :
1065 4553 : error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xrep_rmap_walk_agfl, &ra);
1066 4552 : if (error)
1067 : return error;
1068 :
1069 : /* Count the extents in the bitmap. */
1070 4552 : freesp_records = xagb_bitmap_count_set_regions(freesp_blocks);
1071 :
1072 : /* Compute how many blocks we'll need for all the rmaps. */
1073 4550 : error = xfs_btree_bload_compute_geometry(rmap_cur,
1074 4550 : &rr->new_btree.bload, rr->nr_records + freesp_records);
1075 4551 : if (error)
1076 : return error;
1077 :
1078 : /* We're done when we don't need more blocks. */
1079 4551 : *done = nr_blocks >= rr->new_btree.bload.nr_blocks;
1080 4551 : return 0;
1081 : }
1082 :
1083 : /*
1084 : * Iteratively reserve space for rmap btree while recording OWN_AG rmaps for
1085 : * the free space metadata. This implements section (II) above.
1086 : */
1087 : STATIC int
1088 4448 : xrep_rmap_reserve_space(
1089 : struct xrep_rmap *rr,
1090 : struct xfs_btree_cur *rmap_cur)
1091 : {
1092 4448 : struct xagb_bitmap freesp_blocks; /* AGBIT */
1093 4448 : uint64_t blocks_reserved = 0;
1094 4448 : bool done = false;
1095 4448 : int error;
1096 :
1097 : /* Compute how many blocks we'll need for the rmaps collected so far. */
1098 4448 : error = xfs_btree_bload_compute_geometry(rmap_cur,
1099 : &rr->new_btree.bload, rr->nr_records);
1100 4445 : if (error)
1101 : return error;
1102 :
1103 : /* Last chance to abort before we start committing fixes. */
1104 4445 : if (xchk_should_terminate(rr->sc, &error))
1105 0 : return error;
1106 :
1107 4447 : xagb_bitmap_init(&freesp_blocks);
1108 :
1109 : /*
1110 : * Iteratively reserve space for the new rmapbt and recompute the
1111 : * number of blocks needed to store the previously observed rmapbt
1112 : * records and the ones we'll create for the free space metadata.
1113 : * Finish when we don't need more blocks.
1114 : */
1115 4550 : do {
1116 4550 : error = xrep_rmap_try_reserve(rr, rmap_cur, &freesp_blocks,
1117 : &blocks_reserved, &done);
1118 4553 : if (error)
1119 0 : goto out_bitmap;
1120 4553 : } while (!done);
1121 :
1122 : /* Emit rmaps for everything in the free space bitmap. */
1123 4449 : xrep_ag_btcur_init(rr->sc, &rr->sc->sa);
1124 4448 : error = xrep_rmap_stash_bitmap(rr, &freesp_blocks, &XFS_RMAP_OINFO_AG);
1125 4449 : xchk_ag_btcur_free(&rr->sc->sa);
1126 :
1127 4449 : out_bitmap:
1128 4449 : xagb_bitmap_destroy(&freesp_blocks);
1129 4449 : return error;
1130 : }
1131 :
1132 : /* Section (III): Building the new rmap btree. */
1133 :
1134 : /* Update the AGF counters. */
1135 : STATIC int
1136 4449 : xrep_rmap_reset_counters(
1137 : struct xrep_rmap *rr)
1138 : {
1139 4449 : struct xfs_scrub *sc = rr->sc;
1140 4449 : struct xfs_perag *pag = sc->sa.pag;
1141 4449 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1142 4449 : xfs_agblock_t rmap_btblocks;
1143 :
1144 : /*
1145 : * The AGF header contains extra information related to the reverse
1146 : * mapping btree, so we must update those fields here.
1147 : */
1148 4449 : rmap_btblocks = rr->new_btree.afake.af_blocks - 1;
1149 4449 : agf->agf_btreeblks = cpu_to_be32(rr->freesp_btblocks + rmap_btblocks);
1150 4449 : xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_BTREEBLKS);
1151 :
1152 : /*
1153 : * After we commit the new btree to disk, it is possible that the
1154 : * process to reap the old btree blocks will race with the AIL trying
1155 : * to checkpoint the old btree blocks into the filesystem. If the new
1156 : * tree is shorter than the old one, the rmapbt write verifier will
1157 : * fail and the AIL will shut down the filesystem.
1158 : *
1159 : * To avoid this, save the old incore btree height values as the alt
1160 : * height values before re-initializing the perag info from the updated
1161 : * AGF to capture all the new values.
1162 : */
1163 4449 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] =
1164 4449 : pag->pagf_levels[XFS_BTNUM_RMAPi];
1165 :
1166 : /* Reinitialize with the values we just logged. */
1167 4449 : return xrep_reinit_pagf(sc);
1168 : }
1169 :
1170 : /* Retrieve rmapbt data for bulk load. */
1171 : STATIC int
1172 86670 : xrep_rmap_get_records(
1173 : struct xfs_btree_cur *cur,
1174 : unsigned int idx,
1175 : struct xfs_btree_block *block,
1176 : unsigned int nr_wanted,
1177 : void *priv)
1178 : {
1179 86670 : struct xrep_rmap *rr = priv;
1180 86670 : union xfs_btree_rec *block_rec;
1181 86670 : unsigned int loaded;
1182 86670 : int error;
1183 :
1184 10953535 : for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
1185 10866864 : int stat = 0;
1186 :
1187 10866864 : error = xfs_btree_increment(rr->mcur, 0, &stat);
1188 10866798 : if (error)
1189 0 : return error;
1190 10866798 : if (!stat)
1191 : return -EFSCORRUPTED;
1192 :
1193 10866798 : error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat);
1194 10866808 : if (error)
1195 0 : return error;
1196 10866808 : if (!stat)
1197 : return -EFSCORRUPTED;
1198 :
1199 10866808 : block_rec = xfs_btree_rec_addr(cur, idx, block);
1200 10866853 : cur->bc_ops->init_rec_from_cur(cur, block_rec);
1201 : }
1202 :
1203 86671 : return loaded;
1204 : }
1205 :
1206 : /* Feed one of the new btree blocks to the bulk loader. */
1207 : STATIC int
1208 88734 : xrep_rmap_claim_block(
1209 : struct xfs_btree_cur *cur,
1210 : union xfs_btree_ptr *ptr,
1211 : void *priv)
1212 : {
1213 88734 : struct xrep_rmap *rr = priv;
1214 88734 : int error;
1215 :
1216 88734 : error = xrep_newbt_relog_autoreap(&rr->new_btree);
1217 88733 : if (error)
1218 : return error;
1219 :
1220 88733 : return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
1221 : }
1222 :
1223 : /* Custom allocation function for new rmap btrees. */
1224 : STATIC int
1225 5478 : xrep_rmap_alloc_vextent(
1226 : struct xfs_scrub *sc,
1227 : struct xfs_alloc_arg *args,
1228 : xfs_fsblock_t alloc_hint)
1229 : {
1230 5478 : int error;
1231 :
1232 : /*
1233 : * We don't want an rmap update on the allocation, since we iteratively
1234 : * compute the OWN_AG records /after/ allocating blocks for the records
1235 : * that we already know we need to store. Therefore, fix the freelist
1236 : * with the NORMAP flag set so that we don't also try to create an rmap
1237 : * for new AGFL blocks.
1238 : */
1239 5478 : error = xrep_fix_freelist(sc, XFS_ALLOC_FLAG_NORMAP);
1240 5480 : if (error)
1241 : return error;
1242 :
1243 : /*
1244 : * If xrep_fix_freelist fixed the freelist by moving blocks from the
1245 : * free space btrees or by removing blocks from the AGFL and queueing
1246 : * an EFI to free the block, the transaction will be dirty. This
1247 : * second case is of interest to us.
1248 : *
1249 : * Later on, we will need to compare gaps in the new recordset against
1250 : * the block usage of all OWN_AG owners in order to free the old
1251 : * btree's blocks, which means that we can't have EFIs for former AGFL
1252 : * blocks attached to the repair transaction when we commit the new
1253 : * btree.
1254 : *
1255 : * xrep_newbt_alloc_blocks guarantees this for us by calling
1256 : * xrep_defer_finish to commit anything that fix_freelist may have
1257 : * added to the transaction.
1258 : */
1259 5480 : return xfs_alloc_vextent_near_bno(args, alloc_hint);
1260 : }
1261 :
1262 :
1263 : /* Count the records in this btree. */
1264 : STATIC int
1265 4448 : xrep_rmap_count_records(
1266 : struct xfs_btree_cur *cur,
1267 : unsigned long long *nr)
1268 : {
1269 4448 : int running = 1;
1270 4448 : int error;
1271 :
1272 4448 : *nr = 0;
1273 :
1274 4448 : error = xfs_btree_goto_left_edge(cur);
1275 4457 : if (error)
1276 : return error;
1277 :
1278 10874326 : while (running && !(error = xfs_btree_increment(cur, 0, &running))) {
1279 10869869 : if (running)
1280 10865446 : (*nr)++;
1281 : }
1282 :
1283 : return error;
1284 : }
1285 : /*
1286 : * Use the collected rmap information to stage a new rmap btree. If this is
1287 : * successful we'll return with the new btree root information logged to the
1288 : * repair transaction but not yet committed. This implements section (III)
1289 : * above.
1290 : */
1291 : STATIC int
1292 4449 : xrep_rmap_build_new_tree(
1293 : struct xrep_rmap *rr)
1294 : {
1295 4449 : struct xfs_scrub *sc = rr->sc;
1296 4449 : struct xfs_perag *pag = sc->sa.pag;
1297 4449 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1298 4449 : struct xfs_btree_cur *rmap_cur;
1299 4449 : struct xfs_buf *mhead_bp;
1300 4449 : xfs_fsblock_t fsbno;
1301 4449 : int error;
1302 :
1303 : /*
1304 : * Preserve the old rmapbt block count so that we can adjust the
1305 : * per-AG rmapbt reservation after we commit the new btree root and
1306 : * want to dispose of the old btree blocks.
1307 : */
1308 4449 : rr->old_rmapbt_fsbcount = be32_to_cpu(agf->agf_rmap_blocks);
1309 :
1310 : /*
1311 : * Prepare to construct the new btree by reserving disk space for the
1312 : * new btree and setting up all the accounting information we'll need
1313 : * to root the new btree while it's under construction and before we
1314 : * attach it to the AG header. The new blocks are accounted to the
1315 : * rmapbt per-AG reservation, which we will adjust further after
1316 : * committing the new btree.
1317 : */
1318 4449 : fsbno = XFS_AGB_TO_FSB(sc->mp, pag->pag_agno, XFS_RMAP_BLOCK(sc->mp));
1319 4449 : xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_SKIP_UPDATE,
1320 : fsbno, XFS_AG_RESV_RMAPBT);
1321 4449 : rr->new_btree.bload.get_records = xrep_rmap_get_records;
1322 4449 : rr->new_btree.bload.claim_block = xrep_rmap_claim_block;
1323 4449 : rr->new_btree.alloc_vextent = xrep_rmap_alloc_vextent;
1324 4449 : rmap_cur = xfs_rmapbt_stage_cursor(sc->mp, &rr->new_btree.afake, pag);
1325 :
1326 : /*
1327 : * Initialize @rr->new_btree, reserve space for the new rmapbt,
1328 : * and compute OWN_AG rmaps.
1329 : */
1330 4448 : error = xrep_rmap_reserve_space(rr, rmap_cur);
1331 4449 : if (error)
1332 0 : goto err_cur;
1333 :
1334 : /*
1335 : * Count the rmapbt records again, because the space reservation
1336 : * for the rmapbt itself probably added more records to the btree.
1337 : */
1338 4449 : error = xfbtree_head_read_buf(rr->rmap_btree, NULL, &mhead_bp);
1339 4449 : if (error)
1340 0 : goto err_cur;
1341 :
1342 4449 : rr->mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, mhead_bp,
1343 : rr->rmap_btree);
1344 :
1345 4449 : error = xrep_rmap_count_records(rr->mcur, &rr->nr_records);
1346 4448 : if (error)
1347 0 : goto err_mcur;
1348 :
1349 : /*
1350 : * Due to btree slack factors, it's possible for a new btree to be one
1351 : * level taller than the old btree. Update the incore btree height so
1352 : * that we don't trip the verifiers when writing the new btree blocks
1353 : * to disk.
1354 : */
1355 4448 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] =
1356 4448 : rr->new_btree.bload.btree_height;
1357 :
1358 : /*
1359 : * Move the cursor to the left edge of the tree so that the first
1360 : * increment in ->get_records positions us at the first record.
1361 : */
1362 4448 : error = xfs_btree_goto_left_edge(rr->mcur);
1363 4449 : if (error)
1364 0 : goto err_level;
1365 :
1366 : /* Add all observed rmap records. */
1367 4449 : error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr);
1368 4449 : if (error)
1369 0 : goto err_level;
1370 :
1371 : /*
1372 : * Install the new btree in the AG header. After this point the old
1373 : * btree is no longer accessible and the new tree is live.
1374 : */
1375 4449 : xfs_rmapbt_commit_staged_btree(rmap_cur, sc->tp, sc->sa.agf_bp);
1376 4449 : xfs_btree_del_cursor(rmap_cur, 0);
1377 4449 : xfs_btree_del_cursor(rr->mcur, 0);
1378 4449 : rr->mcur = NULL;
1379 4449 : xfs_buf_relse(mhead_bp);
1380 :
1381 : /*
1382 : * Now that we've written the new btree to disk, we don't need to keep
1383 : * updating the in-memory btree. Abort the scan to stop live updates.
1384 : */
1385 4449 : xchk_iscan_abort(&rr->iscan);
1386 :
1387 : /*
1388 : * The newly committed rmap recordset includes mappings for the blocks
1389 : * that we reserved to build the new btree. If there is excess space
1390 : * reservation to be freed, the corresponding rmap records must also be
1391 : * removed.
1392 : */
1393 4449 : rr->new_btree.oinfo = XFS_RMAP_OINFO_AG;
1394 :
1395 : /* Reset the AGF counters now that we've changed the btree shape. */
1396 4449 : error = xrep_rmap_reset_counters(rr);
1397 4449 : if (error)
1398 0 : goto err_newbt;
1399 :
1400 : /* Dispose of any unused blocks and the accounting information. */
1401 4449 : error = xrep_newbt_commit(&rr->new_btree);
1402 4449 : if (error)
1403 : return error;
1404 :
1405 4449 : return xrep_roll_ag_trans(sc);
1406 :
1407 0 : err_level:
1408 0 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] = 0;
1409 0 : err_mcur:
1410 0 : xfs_btree_del_cursor(rr->mcur, error);
1411 0 : xfs_buf_relse(mhead_bp);
1412 0 : err_cur:
1413 0 : xfs_btree_del_cursor(rmap_cur, error);
1414 0 : err_newbt:
1415 0 : xrep_newbt_cancel(&rr->new_btree);
1416 0 : return error;
1417 : }
1418 :
1419 : /* Section (IV): Reaping the old btree. */
1420 :
1421 : struct xrep_rmap_find_gaps {
1422 : struct xagb_bitmap rmap_gaps;
1423 : xfs_agblock_t next_agbno;
1424 : };
1425 :
1426 : /* Subtract each free extent in the bnobt from the rmap gaps. */
1427 : STATIC int
1428 1167690 : xrep_rmap_find_freesp(
1429 : struct xfs_btree_cur *cur,
1430 : const struct xfs_alloc_rec_incore *rec,
1431 : void *priv)
1432 : {
1433 1167690 : struct xrep_rmap_find_gaps *rfg = priv;
1434 :
1435 2335381 : return xagb_bitmap_clear(&rfg->rmap_gaps, rec->ar_startblock,
1436 1167690 : rec->ar_blockcount);
1437 : }
1438 :
1439 : /* Record the free space we find, as part of cleaning out the btree. */
1440 : STATIC int
1441 10866643 : xrep_rmap_find_gaps(
1442 : struct xfs_btree_cur *cur,
1443 : const struct xfs_rmap_irec *rec,
1444 : void *priv)
1445 : {
1446 10866643 : struct xrep_rmap_find_gaps *rfg = priv;
1447 10866643 : int error;
1448 :
1449 10866643 : if (rec->rm_startblock > rfg->next_agbno) {
1450 1168559 : error = xagb_bitmap_set(&rfg->rmap_gaps, rfg->next_agbno,
1451 : rec->rm_startblock - rfg->next_agbno);
1452 1168560 : if (error)
1453 : return error;
1454 : }
1455 :
1456 10866644 : rfg->next_agbno = max_t(xfs_agblock_t, rfg->next_agbno,
1457 : rec->rm_startblock + rec->rm_blockcount);
1458 10866644 : return 0;
1459 : }
1460 :
1461 : /*
1462 : * Reap the old rmapbt blocks. Now that the rmapbt is fully rebuilt, we make
1463 : * a list of gaps in the rmap records and a list of the extents mentioned in
1464 : * the bnobt. Any block that's in the new rmapbt gap list but not mentioned
1465 : * in the bnobt is a block from the old rmapbt and can be removed.
1466 : */
1467 : STATIC int
1468 4448 : xrep_rmap_remove_old_tree(
1469 : struct xrep_rmap *rr)
1470 : {
1471 4448 : struct xrep_rmap_find_gaps rfg = {
1472 : .next_agbno = 0,
1473 : };
1474 4448 : struct xfs_scrub *sc = rr->sc;
1475 4448 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1476 4448 : struct xfs_perag *pag = sc->sa.pag;
1477 4448 : struct xfs_btree_cur *mcur;
1478 4448 : struct xfs_buf *mhead_bp;
1479 4448 : xfs_agblock_t agend;
1480 4448 : int error;
1481 :
1482 4448 : xagb_bitmap_init(&rfg.rmap_gaps);
1483 :
1484 : /* Compute free space from the new rmapbt. */
1485 4447 : error = xfbtree_head_read_buf(rr->rmap_btree, NULL, &mhead_bp);
1486 4448 : mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, mhead_bp,
1487 : rr->rmap_btree);
1488 :
1489 4447 : error = xfs_rmap_query_all(mcur, xrep_rmap_find_gaps, &rfg);
1490 4449 : xfs_btree_del_cursor(mcur, error);
1491 4449 : xfs_buf_relse(mhead_bp);
1492 4449 : if (error)
1493 0 : goto out_bitmap;
1494 :
1495 : /* Insert a record for space between the last rmap and EOAG. */
1496 4449 : agend = be32_to_cpu(agf->agf_length);
1497 4449 : if (rfg.next_agbno < agend) {
1498 4449 : error = xagb_bitmap_set(&rfg.rmap_gaps, rfg.next_agbno,
1499 : agend - rfg.next_agbno);
1500 4449 : if (error)
1501 0 : goto out_bitmap;
1502 : }
1503 :
1504 : /* Compute free space from the existing bnobt. */
1505 4449 : sc->sa.bno_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
1506 : sc->sa.pag, XFS_BTNUM_BNO);
1507 4449 : error = xfs_alloc_query_all(sc->sa.bno_cur, xrep_rmap_find_freesp,
1508 : &rfg);
1509 4449 : xfs_btree_del_cursor(sc->sa.bno_cur, error);
1510 4449 : sc->sa.bno_cur = NULL;
1511 4449 : if (error)
1512 0 : goto out_bitmap;
1513 :
1514 : /*
1515 : * Free the "free" blocks that the new rmapbt knows about but the bnobt
1516 : * doesn't--these are the old rmapbt blocks. Credit the old rmapbt
1517 : * block usage count back to the per-AG rmapbt reservation (and not
1518 : * fdblocks, since the rmap btree lives in free space) to keep the
1519 : * reservation and free space accounting correct.
1520 : */
1521 4449 : error = xrep_reap_agblocks(sc, &rfg.rmap_gaps,
1522 : &XFS_RMAP_OINFO_ANY_OWNER, XFS_AG_RESV_RMAPBT);
1523 4448 : if (error)
1524 0 : goto out_bitmap;
1525 :
1526 : /*
1527 : * Now that we've zapped all the old rmapbt blocks we can turn off
1528 : * the alternate height mechanism and reset the per-AG space
1529 : * reservation.
1530 : */
1531 4448 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] = 0;
1532 4448 : sc->flags |= XREP_RESET_PERAG_RESV;
1533 4448 : out_bitmap:
1534 4448 : xagb_bitmap_destroy(&rfg.rmap_gaps);
1535 4448 : return error;
1536 : }
1537 :
1538 : static inline bool
1539 211907 : xrep_rmapbt_want_live_update(
1540 : struct xchk_iscan *iscan,
1541 : const struct xfs_owner_info *oi)
1542 : {
1543 211907 : if (xchk_iscan_aborted(iscan))
1544 : return false;
1545 :
1546 : /*
1547 : * Before unlocking the AG header to perform the inode scan, we
1548 : * recorded reverse mappings for all AG metadata except for the OWN_AG
1549 : * metadata. IOWs, the in-memory btree knows about the AG headers, the
1550 : * two inode btrees, the CoW staging extents, and the refcount btrees.
1551 : * For these types of metadata, we need to record the live updates in
1552 : * the in-memory rmap btree.
1553 : *
1554 : * However, we do not scan the free space btrees or the AGFL until we
1555 : * have re-locked the AGF and are ready to reserve space for the new
1556 : * new rmap btree, so we do not want live updates for OWN_AG metadata.
1557 : */
1558 203901 : if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner))
1559 1424 : return oi->oi_owner != XFS_RMAP_OWN_AG;
1560 :
1561 : /* Ignore updates to files that the scanner hasn't visited yet. */
1562 202477 : return xchk_iscan_want_live_update(iscan, oi->oi_owner);
1563 : }
1564 :
1565 : /*
1566 : * Apply a rmapbt update from the regular filesystem into our shadow btree.
1567 : * We're running from the thread that owns the AGF buffer and is generating
1568 : * the update, so we must be careful about which parts of the struct xrep_rmap
1569 : * that we change.
1570 : */
1571 : static int
1572 211906 : xrep_rmapbt_live_update(
1573 : struct notifier_block *nb,
1574 : unsigned long action,
1575 : void *data)
1576 : {
1577 211906 : struct xfs_rmap_update_params *p = data;
1578 211906 : struct xrep_rmap *rr;
1579 211906 : struct xfs_mount *mp;
1580 211906 : struct xfs_btree_cur *mcur;
1581 211906 : struct xfs_buf *mhead_bp;
1582 211906 : struct xfs_trans *tp;
1583 211906 : void *txcookie;
1584 211906 : int error;
1585 :
1586 211906 : rr = container_of(nb, struct xrep_rmap, hooks.update_hook.nb);
1587 211906 : mp = rr->sc->mp;
1588 :
1589 211906 : if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo))
1590 116242 : goto out_unlock;
1591 :
1592 95663 : trace_xrep_rmap_live_update(mp, rr->sc->sa.pag->pag_agno, action, p);
1593 :
1594 95663 : error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp);
1595 95663 : if (error)
1596 0 : goto out_abort;
1597 :
1598 95663 : mutex_lock(&rr->lock);
1599 95663 : error = xfbtree_head_read_buf(rr->rmap_btree, tp, &mhead_bp);
1600 95663 : if (error)
1601 0 : goto out_cancel;
1602 :
1603 95663 : mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, tp, mhead_bp,
1604 : rr->rmap_btree);
1605 95663 : error = __xfs_rmap_finish_intent(mcur, action, p->startblock,
1606 : p->blockcount, &p->oinfo, p->unwritten);
1607 95663 : xfs_btree_del_cursor(mcur, error);
1608 95663 : if (error)
1609 0 : goto out_cancel;
1610 :
1611 95663 : error = xfbtree_trans_commit(rr->rmap_btree, tp);
1612 95663 : if (error)
1613 0 : goto out_cancel;
1614 :
1615 95663 : xrep_trans_cancel_hook_dummy(&txcookie, tp);
1616 95663 : mutex_unlock(&rr->lock);
1617 95663 : return NOTIFY_DONE;
1618 :
1619 0 : out_cancel:
1620 0 : xfbtree_trans_cancel(rr->rmap_btree, tp);
1621 0 : xrep_trans_cancel_hook_dummy(&txcookie, tp);
1622 0 : out_abort:
1623 0 : mutex_unlock(&rr->lock);
1624 0 : xchk_iscan_abort(&rr->iscan);
1625 : out_unlock:
1626 : return NOTIFY_DONE;
1627 : }
1628 :
1629 : /* Set up the filesystem scan components. */
1630 : STATIC int
1631 4471 : xrep_rmap_setup_scan(
1632 : struct xrep_rmap *rr)
1633 : {
1634 4471 : struct xfs_scrub *sc = rr->sc;
1635 4471 : int error;
1636 :
1637 4471 : mutex_init(&rr->lock);
1638 :
1639 : /* Set up in-memory rmap btree */
1640 4471 : error = xfs_rmapbt_mem_create(sc->mp, sc->sa.pag->pag_agno,
1641 : sc->xfile_buftarg, &rr->rmap_btree);
1642 4468 : if (error)
1643 0 : goto out_mutex;
1644 :
1645 : /* Retry iget every tenth of a second for up to 30 seconds. */
1646 4468 : xchk_iscan_start(sc, 30000, 100, &rr->iscan);
1647 :
1648 : /*
1649 : * Hook into live rmap operations so that we can update our in-memory
1650 : * btree to reflect live changes on the filesystem. Since we drop the
1651 : * AGF buffer to scan all the inodes, we need this piece to avoid
1652 : * installing a stale btree.
1653 : */
1654 4470 : ASSERT(sc->flags & XCHK_FSGATES_RMAP);
1655 4470 : xfs_hook_setup(&rr->hooks.update_hook, xrep_rmapbt_live_update);
1656 4470 : error = xfs_rmap_hook_add(sc->sa.pag, &rr->hooks);
1657 4459 : if (error)
1658 0 : goto out_iscan;
1659 : return 0;
1660 :
1661 : out_iscan:
1662 0 : xchk_iscan_teardown(&rr->iscan);
1663 0 : xfbtree_destroy(rr->rmap_btree);
1664 0 : out_mutex:
1665 0 : mutex_destroy(&rr->lock);
1666 0 : return error;
1667 : }
1668 :
1669 : /* Tear down scan components. */
1670 : STATIC void
1671 4470 : xrep_rmap_teardown(
1672 : struct xrep_rmap *rr)
1673 : {
1674 4470 : struct xfs_scrub *sc = rr->sc;
1675 :
1676 4470 : xchk_iscan_abort(&rr->iscan);
1677 4470 : xfs_rmap_hook_del(sc->sa.pag, &rr->hooks);
1678 4470 : xchk_iscan_teardown(&rr->iscan);
1679 4469 : xfbtree_destroy(rr->rmap_btree);
1680 4471 : mutex_destroy(&rr->lock);
1681 4471 : }
1682 :
1683 : /* Repair the rmap btree for some AG. */
1684 : int
1685 4471 : xrep_rmapbt(
1686 : struct xfs_scrub *sc)
1687 : {
1688 4471 : struct xrep_rmap *rr = sc->buf;
1689 4471 : int error;
1690 :
1691 4471 : error = xrep_rmap_setup_scan(rr);
1692 4464 : if (error)
1693 : return error;
1694 :
1695 : /*
1696 : * Collect rmaps for everything in this AG that isn't space metadata.
1697 : * These rmaps won't change even as we try to allocate blocks.
1698 : */
1699 4464 : error = xrep_rmap_find_rmaps(rr);
1700 4471 : if (error)
1701 22 : goto out_records;
1702 :
1703 : /* Rebuild the rmap information. */
1704 4449 : error = xrep_rmap_build_new_tree(rr);
1705 4447 : if (error)
1706 0 : goto out_records;
1707 :
1708 : /* Kill the old tree. */
1709 4447 : error = xrep_rmap_remove_old_tree(rr);
1710 :
1711 4470 : out_records:
1712 4470 : xrep_rmap_teardown(rr);
1713 4470 : return error;
1714 : }
|