Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_mount.h"
12 : #include "xfs_defer.h"
13 : #include "xfs_btree.h"
14 : #include "xfs_btree_staging.h"
15 : #include "xfs_btree_mem.h"
16 : #include "xfs_bit.h"
17 : #include "xfs_log_format.h"
18 : #include "xfs_trans.h"
19 : #include "xfs_sb.h"
20 : #include "xfs_alloc.h"
21 : #include "xfs_alloc_btree.h"
22 : #include "xfs_ialloc.h"
23 : #include "xfs_ialloc_btree.h"
24 : #include "xfs_rmap.h"
25 : #include "xfs_rmap_btree.h"
26 : #include "xfs_inode.h"
27 : #include "xfs_icache.h"
28 : #include "xfs_bmap.h"
29 : #include "xfs_bmap_btree.h"
30 : #include "xfs_refcount.h"
31 : #include "xfs_refcount_btree.h"
32 : #include "xfs_ag.h"
33 : #include "scrub/xfs_scrub.h"
34 : #include "scrub/scrub.h"
35 : #include "scrub/common.h"
36 : #include "scrub/btree.h"
37 : #include "scrub/trace.h"
38 : #include "scrub/repair.h"
39 : #include "scrub/bitmap.h"
40 : #include "scrub/xfile.h"
41 : #include "scrub/xfarray.h"
42 : #include "scrub/iscan.h"
43 : #include "scrub/newbt.h"
44 : #include "scrub/reap.h"
45 : #include "scrub/xfbtree.h"
46 :
47 : /*
48 : * Reverse Mapping Btree Repair
49 : * ============================
50 : *
51 : * This is the most involved of all the AG space btree rebuilds. Everywhere
52 : * else in XFS we lock inodes and then AG data structures, but generating the
53 : * list of rmap records requires that we be able to scan both block mapping
54 : * btrees of every inode in the filesystem to see if it owns any extents in
55 : * this AG. We can't tolerate any inode updates while we do this, so we
56 : * freeze the filesystem to lock everyone else out, and grant ourselves
57 : * special privileges to run transactions with regular background reclamation
58 : * turned off.
59 : *
60 : * We also have to be very careful not to allow inode reclaim to start a
61 : * transaction because all transactions (other than our own) will block.
62 : * Deferred inode inactivation helps us out there.
63 : *
64 : * I) Reverse mappings for all non-space metadata and file data are collected
65 : * according to the following algorithm:
66 : *
67 : * 1. For each fork of each inode:
68 : * 1.1. Create a bitmap BMBIT to track bmbt blocks if necessary.
69 : * 1.2. If the incore extent map isn't loaded, walk the bmbt to accumulate
70 : * bmaps into rmap records (see 1.1.4). Set bits in BMBIT for each btree
71 : * block.
72 : * 1.3. If the incore extent map is loaded but the fork is in btree format,
73 : * just visit the bmbt blocks to set the corresponding BMBIT areas.
74 : * 1.4. From the incore extent map, accumulate each bmap that falls into our
75 : * target AG. Remember, multiple bmap records can map to a single rmap
76 : * record, so we cannot simply emit rmap records 1:1.
77 : * 1.5. Emit rmap records for each extent in BMBIT and free it.
78 : * 2. Create bitmaps INOBIT and ICHUNKBIT.
79 : * 3. For each record in the inobt, set the corresponding areas in ICHUNKBIT,
80 : * and set bits in INOBIT for each btree block. If the inobt has no records
81 : * at all, we must be careful to record its root in INOBIT.
82 : * 4. For each block in the finobt, set the corresponding INOBIT area.
83 : * 5. Emit rmap records for each extent in INOBIT and ICHUNKBIT and free them.
84 : * 6. Create bitmaps REFCBIT and COWBIT.
85 : * 7. For each CoW staging extent in the refcountbt, set the corresponding
86 : * areas in COWBIT.
87 : * 8. For each block in the refcountbt, set the corresponding REFCBIT area.
88 : * 9. Emit rmap records for each extent in REFCBIT and COWBIT and free them.
89 : * A. Emit rmap for the AG headers.
90 : * B. Emit rmap for the log, if there is one.
91 : *
92 : * II) The rmapbt shape and space metadata rmaps are computed as follows:
93 : *
94 : * 1. Count the rmaps collected in the previous step. (= NR)
95 : * 2. Estimate the number of rmapbt blocks needed to store NR records. (= RMB)
96 : * 3. Reserve RMB blocks through the newbt using the allocator in normap mode.
97 : * 4. Create bitmap AGBIT.
98 : * 5. For each reservation in the newbt, set the corresponding areas in AGBIT.
99 : * 6. For each block in the AGFL, bnobt, and cntbt, set the bits in AGBIT.
100 : * 7. Count the extents in AGBIT. (= AGNR)
101 : * 8. Estimate the number of rmapbt blocks needed for NR + AGNR rmaps. (= RMB')
102 : * 9. If RMB' >= RMB, reserve RMB' - RMB more newbt blocks, set RMB = RMB',
103 : * and clear AGBIT. Go to step 5.
104 : * A. Emit rmaps for each extent in AGBIT.
105 : *
106 : * III) The rmapbt is constructed and set in place as follows:
107 : *
108 : * 1. Sort the rmap records.
109 : * 2. Bulk load the rmaps.
110 : *
111 : * IV) Reap the old btree blocks.
112 : *
113 : * 1. Create a bitmap OLDRMBIT.
114 : * 2. For each gap in the new rmapbt, set the corresponding areas of OLDRMBIT.
115 : * 3. For each extent in the bnobt, clear the corresponding parts of OLDRMBIT.
116 : * 4. Reap the extents corresponding to the set areas in OLDRMBIT. These are
117 : * the parts of the AG that the rmap didn't find during its scan of the
118 : * primary metadata and aren't known to be in the free space, which implies
119 : * that they were the old rmapbt blocks.
120 : * 5. Commit.
121 : *
122 : * We use the 'xrep_rmap' prefix for all the rmap functions.
123 : */
124 :
125 : /* Context for collecting rmaps */
126 : struct xrep_rmap {
127 : /* new rmapbt information */
128 : struct xrep_newbt new_btree;
129 :
130 : /* lock for the xfbtree and xfile */
131 : struct mutex lock;
132 :
133 : /* rmap records generated from primary metadata */
134 : struct xfbtree *rmap_btree;
135 :
136 : struct xfs_scrub *sc;
137 :
138 : /* in-memory btree cursor for the xfs_btree_bload iteration */
139 : struct xfs_btree_cur *mcur;
140 :
141 : /* Hooks into rmap update code. */
142 : struct xfs_rmap_hook hooks;
143 :
144 : /* inode scan cursor */
145 : struct xchk_iscan iscan;
146 :
147 : /* Number of non-freespace records found. */
148 : unsigned long long nr_records;
149 :
150 : /* bnobt/cntbt contribution to btreeblks */
151 : xfs_agblock_t freesp_btblocks;
152 :
153 : /* old agf_rmap_blocks counter */
154 : unsigned int old_rmapbt_fsbcount;
155 : };
156 :
157 : /* Set us up to repair reverse mapping btrees. */
158 : int
159 15937 : xrep_setup_ag_rmapbt(
160 : struct xfs_scrub *sc)
161 : {
162 15937 : struct xrep_rmap *rr;
163 15937 : char *descr;
164 15937 : int error;
165 :
166 15937 : xchk_fsgates_enable(sc, XCHK_FSGATES_RMAP);
167 :
168 15931 : descr = xchk_xfile_ag_descr(sc, "reverse mapping records");
169 15886 : error = xrep_setup_buftarg(sc, descr);
170 15937 : kfree(descr);
171 15937 : if (error)
172 : return error;
173 :
174 15937 : rr = kzalloc(sizeof(struct xrep_rmap), XCHK_GFP_FLAGS);
175 15937 : if (!rr)
176 : return -ENOMEM;
177 :
178 15937 : rr->sc = sc;
179 15937 : sc->buf = rr;
180 15937 : return 0;
181 : }
182 :
183 : /* Make sure there's nothing funny about this mapping. */
184 : STATIC int
185 51419838 : xrep_rmap_check_mapping(
186 : struct xfs_scrub *sc,
187 : const struct xfs_rmap_irec *rec)
188 : {
189 51419838 : enum xbtree_recpacking outcome;
190 51419838 : int error;
191 :
192 51419838 : if (xfs_rmap_check_perag_irec(sc->sa.pag, rec) != NULL)
193 : return -EFSCORRUPTED;
194 :
195 : /* Make sure this isn't free space. */
196 51419852 : error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rm_startblock,
197 51419852 : rec->rm_blockcount, &outcome);
198 51419584 : if (error)
199 : return error;
200 51419584 : if (outcome != XBTREE_RECPACKING_EMPTY)
201 0 : return -EFSCORRUPTED;
202 :
203 : return 0;
204 : }
205 :
206 : /* Store a reverse-mapping record. */
207 : static inline int
208 52623729 : xrep_rmap_stash(
209 : struct xrep_rmap *rr,
210 : xfs_agblock_t startblock,
211 : xfs_extlen_t blockcount,
212 : uint64_t owner,
213 : uint64_t offset,
214 : unsigned int flags)
215 : {
216 52623729 : struct xfs_rmap_irec rmap = {
217 : .rm_startblock = startblock,
218 : .rm_blockcount = blockcount,
219 : .rm_owner = owner,
220 : .rm_offset = offset,
221 : .rm_flags = flags,
222 : };
223 52623729 : struct xfs_scrub *sc = rr->sc;
224 52623729 : struct xfs_btree_cur *mcur;
225 52623729 : struct xfs_buf *mhead_bp;
226 52623729 : int error = 0;
227 :
228 52623729 : if (xchk_should_terminate(sc, &error))
229 2 : return error;
230 :
231 52623678 : if (xchk_iscan_aborted(&rr->iscan))
232 : return -EFSCORRUPTED;
233 :
234 52622680 : trace_xrep_rmap_found(sc->mp, sc->sa.pag->pag_agno, &rmap);
235 :
236 52622587 : mutex_lock(&rr->lock);
237 52624053 : error = xfbtree_head_read_buf(rr->rmap_btree, sc->tp, &mhead_bp);
238 52623574 : if (error)
239 0 : goto out_abort;
240 :
241 52623574 : mcur = xfs_rmapbt_mem_cursor(sc->sa.pag, sc->tp, mhead_bp,
242 : rr->rmap_btree);
243 52623334 : error = xfs_rmap_map_raw(mcur, &rmap);
244 52624743 : xfs_btree_del_cursor(mcur, error);
245 52625362 : if (error)
246 0 : goto out_cancel;
247 :
248 52625362 : error = xfbtree_trans_commit(rr->rmap_btree, sc->tp);
249 52625154 : if (error)
250 0 : goto out_abort;
251 :
252 52625154 : mutex_unlock(&rr->lock);
253 52625154 : return 0;
254 :
255 : out_cancel:
256 0 : xfbtree_trans_cancel(rr->rmap_btree, sc->tp);
257 0 : out_abort:
258 0 : xchk_iscan_abort(&rr->iscan);
259 0 : mutex_unlock(&rr->lock);
260 0 : return error;
261 : }
262 :
263 : struct xrep_rmap_stash_run {
264 : struct xrep_rmap *rr;
265 : uint64_t owner;
266 : unsigned int rmap_flags;
267 : };
268 :
269 : static int
270 1906657 : xrep_rmap_stash_run(
271 : uint64_t start,
272 : uint64_t len,
273 : void *priv)
274 : {
275 1906657 : struct xrep_rmap_stash_run *rsr = priv;
276 1906657 : struct xrep_rmap *rr = rsr->rr;
277 :
278 1906657 : return xrep_rmap_stash(rr, start, len, rsr->owner, 0, rsr->rmap_flags);
279 : }
280 :
281 : /*
282 : * Emit rmaps for every extent of bits set in the bitmap. Caller must ensure
283 : * that the ranges are in units of FS blocks.
284 : */
285 : STATIC int
286 3915967 : xrep_rmap_stash_bitmap(
287 : struct xrep_rmap *rr,
288 : struct xagb_bitmap *bitmap,
289 : const struct xfs_owner_info *oinfo)
290 : {
291 3915967 : struct xrep_rmap_stash_run rsr = {
292 : .rr = rr,
293 3915967 : .owner = oinfo->oi_owner,
294 : .rmap_flags = 0,
295 : };
296 :
297 3915967 : if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
298 12 : rsr.rmap_flags |= XFS_RMAP_ATTR_FORK;
299 3915967 : if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
300 3838740 : rsr.rmap_flags |= XFS_RMAP_BMBT_BLOCK;
301 :
302 3915967 : return xagb_bitmap_walk(bitmap, xrep_rmap_stash_run, &rsr);
303 : }
304 :
305 : /* Section (I): Finding all file and bmbt extents. */
306 :
307 : /* Context for accumulating rmaps for an inode fork. */
308 : struct xrep_rmap_ifork {
309 : /*
310 : * Accumulate rmap data here to turn multiple adjacent bmaps into a
311 : * single rmap.
312 : */
313 : struct xfs_rmap_irec accum;
314 :
315 : /* Bitmap of bmbt blocks in this AG. */
316 : struct xagb_bitmap bmbt_blocks;
317 :
318 : struct xrep_rmap *rr;
319 :
320 : /* Which inode fork? */
321 : int whichfork;
322 : };
323 :
324 : /* Stash an rmap that we accumulated while walking an inode fork. */
325 : STATIC int
326 91759722 : xrep_rmap_stash_accumulated(
327 : struct xrep_rmap_ifork *rf)
328 : {
329 91759722 : if (rf->accum.rm_blockcount == 0)
330 : return 0;
331 :
332 50699639 : return xrep_rmap_stash(rf->rr, rf->accum.rm_startblock,
333 : rf->accum.rm_blockcount, rf->accum.rm_owner,
334 : rf->accum.rm_offset, rf->accum.rm_flags);
335 : }
336 :
337 : /* Accumulate a bmbt record. */
338 : STATIC int
339 199770890 : xrep_rmap_visit_bmbt(
340 : struct xfs_btree_cur *cur,
341 : struct xfs_bmbt_irec *rec,
342 : void *priv)
343 : {
344 199770890 : struct xrep_rmap_ifork *rf = priv;
345 199770890 : struct xfs_mount *mp = rf->rr->sc->mp;
346 199770890 : struct xfs_rmap_irec *accum = &rf->accum;
347 199770890 : xfs_agblock_t agbno;
348 199770890 : unsigned int rmap_flags = 0;
349 199770890 : int error;
350 :
351 199770890 : if (XFS_FSB_TO_AGNO(mp, rec->br_startblock) !=
352 199770890 : rf->rr->sc->sa.pag->pag_agno)
353 : return 0;
354 :
355 50699929 : agbno = XFS_FSB_TO_AGBNO(mp, rec->br_startblock);
356 50700177 : if (rf->whichfork == XFS_ATTR_FORK)
357 77889 : rmap_flags |= XFS_RMAP_ATTR_FORK;
358 50700177 : if (rec->br_state == XFS_EXT_UNWRITTEN)
359 11663179 : rmap_flags |= XFS_RMAP_UNWRITTEN;
360 :
361 : /* If this bmap is adjacent to the previous one, just add it. */
362 50700177 : if (accum->rm_blockcount > 0 &&
363 41411166 : rec->br_startoff == accum->rm_offset + accum->rm_blockcount &&
364 13444353 : agbno == accum->rm_startblock + accum->rm_blockcount &&
365 4742834 : rmap_flags == accum->rm_flags) {
366 0 : accum->rm_blockcount += rec->br_blockcount;
367 0 : return 0;
368 : }
369 :
370 : /* Otherwise stash the old rmap and start accumulating a new one. */
371 50700177 : error = xrep_rmap_stash_accumulated(rf);
372 50700004 : if (error)
373 : return error;
374 :
375 50700004 : accum->rm_startblock = agbno;
376 50700004 : accum->rm_blockcount = rec->br_blockcount;
377 50700004 : accum->rm_offset = rec->br_startoff;
378 50700004 : accum->rm_flags = rmap_flags;
379 50700004 : return 0;
380 : }
381 :
382 : /* Add a btree block to the bitmap. */
383 : STATIC int
384 7895165 : xrep_rmap_visit_iroot_btree_block(
385 : struct xfs_btree_cur *cur,
386 : int level,
387 : void *priv)
388 : {
389 7895165 : struct xrep_rmap_ifork *rf = priv;
390 7895165 : struct xfs_buf *bp;
391 7895165 : xfs_fsblock_t fsbno;
392 7895165 : xfs_agblock_t agbno;
393 :
394 7895165 : xfs_btree_get_block(cur, level, &bp);
395 7895212 : if (!bp)
396 : return 0;
397 :
398 4056475 : fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp));
399 4056480 : if (XFS_FSB_TO_AGNO(cur->bc_mp, fsbno) != rf->rr->sc->sa.pag->pag_agno)
400 : return 0;
401 :
402 987332 : agbno = XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno);
403 987332 : return xagb_bitmap_set(&rf->bmbt_blocks, agbno, 1);
404 : }
405 :
406 : /*
407 : * Iterate a metadata btree rooted in an inode to collect rmap records for
408 : * anything in this fork that matches the AG.
409 : */
410 : STATIC int
411 3838773 : xrep_rmap_scan_iroot_btree(
412 : struct xrep_rmap_ifork *rf,
413 : struct xfs_btree_cur *cur)
414 : {
415 3838773 : struct xfs_owner_info oinfo;
416 3838773 : struct xrep_rmap *rr = rf->rr;
417 3838773 : int error;
418 :
419 3838773 : xagb_bitmap_init(&rf->bmbt_blocks);
420 :
421 : /* Record all the blocks in the btree itself. */
422 3838773 : error = xfs_btree_visit_blocks(cur, xrep_rmap_visit_iroot_btree_block,
423 : XFS_BTREE_VISIT_ALL, rf);
424 3838776 : if (error)
425 0 : goto out;
426 :
427 : /* Emit rmaps for the btree blocks. */
428 3838776 : xfs_rmap_ino_bmbt_owner(&oinfo, rf->accum.rm_owner, rf->whichfork);
429 3838776 : error = xrep_rmap_stash_bitmap(rr, &rf->bmbt_blocks, &oinfo);
430 3838723 : if (error)
431 0 : goto out;
432 :
433 : /* Stash any remaining accumulated rmaps. */
434 3838723 : error = xrep_rmap_stash_accumulated(rf);
435 3838732 : out:
436 3838732 : xagb_bitmap_destroy(&rf->bmbt_blocks);
437 3838721 : return error;
438 : }
439 :
440 : static inline bool
441 : is_rt_data_fork(
442 : struct xfs_inode *ip,
443 : int whichfork)
444 : {
445 : return XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK;
446 : }
447 :
448 : /*
449 : * Iterate the block mapping btree to collect rmap records for anything in this
450 : * fork that matches the AG. Sets @mappings_done to true if we've scanned the
451 : * block mappings in this fork.
452 : */
453 : STATIC int
454 3838775 : xrep_rmap_scan_bmbt(
455 : struct xrep_rmap_ifork *rf,
456 : struct xfs_inode *ip,
457 : bool *mappings_done)
458 : {
459 3838775 : struct xrep_rmap *rr = rf->rr;
460 3838775 : struct xfs_btree_cur *cur;
461 3838775 : struct xfs_ifork *ifp;
462 3838775 : int error;
463 :
464 3838775 : *mappings_done = false;
465 3838775 : ifp = xfs_ifork_ptr(ip, rf->whichfork);
466 3838770 : cur = xfs_bmbt_init_cursor(rr->sc->mp, rr->sc->tp, ip, rf->whichfork);
467 :
468 7677544 : if (!xfs_ifork_is_realtime(ip, rf->whichfork) &&
469 : xfs_need_iread_extents(ifp)) {
470 : /*
471 : * If the incore extent cache isn't loaded, scan the bmbt for
472 : * mapping records. This avoids loading the incore extent
473 : * tree, which will increase memory pressure at a time when
474 : * we're trying to run as quickly as we possibly can. Ignore
475 : * realtime extents.
476 : */
477 8 : error = xfs_bmap_query_all(cur, xrep_rmap_visit_bmbt, rf);
478 8 : if (error)
479 0 : goto out_cur;
480 :
481 8 : *mappings_done = true;
482 : }
483 :
484 : /* Scan for the bmbt blocks, which always live on the data device. */
485 3838767 : error = xrep_rmap_scan_iroot_btree(rf, cur);
486 3838734 : out_cur:
487 3838734 : xfs_btree_del_cursor(cur, error);
488 3838761 : return error;
489 : }
490 :
491 : /*
492 : * Iterate the in-core extent cache to collect rmap records for anything in
493 : * this fork that matches the AG.
494 : */
495 : STATIC int
496 37255308 : xrep_rmap_scan_iext(
497 : struct xrep_rmap_ifork *rf,
498 : struct xfs_ifork *ifp)
499 : {
500 37255308 : struct xfs_bmbt_irec rec;
501 37255308 : struct xfs_iext_cursor icur;
502 37255308 : int error;
503 :
504 236832506 : for_each_xfs_iext(ifp, &icur, &rec) {
505 199577550 : if (isnullstartblock(rec.br_startblock))
506 33536 : continue;
507 199544014 : error = xrep_rmap_visit_bmbt(NULL, &rec, rf);
508 199543662 : if (error)
509 0 : return error;
510 : }
511 :
512 37223215 : return xrep_rmap_stash_accumulated(rf);
513 : }
514 :
515 : /* Find all the extents from a given AG in an inode fork. */
516 : STATIC int
517 142897682 : xrep_rmap_scan_ifork(
518 : struct xrep_rmap *rr,
519 : struct xfs_inode *ip,
520 : int whichfork)
521 : {
522 142897682 : struct xrep_rmap_ifork rf = {
523 142897682 : .accum = { .rm_owner = ip->i_ino, },
524 : .rr = rr,
525 : .whichfork = whichfork,
526 : };
527 142897682 : struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
528 142883362 : int error = 0;
529 :
530 142883362 : if (!ifp)
531 : return 0;
532 :
533 142826447 : if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
534 3838763 : bool mappings_done;
535 :
536 : /*
537 : * Scan the bmap btree for data device mappings. This includes
538 : * the btree blocks themselves, even if this is a realtime
539 : * file.
540 : */
541 3838763 : error = xrep_rmap_scan_bmbt(&rf, ip, &mappings_done);
542 3838772 : if (error || mappings_done)
543 8 : return error;
544 138987684 : } else if (ifp->if_format != XFS_DINODE_FMT_EXTENTS) {
545 : return 0;
546 : }
547 :
548 : /* Scan incore extent cache if this isn't a realtime file. */
549 37260414 : if (xfs_ifork_is_realtime(ip, whichfork))
550 : return 0;
551 :
552 37249158 : return xrep_rmap_scan_iext(&rf, ifp);
553 : }
554 :
555 : /*
556 : * Take ILOCK on a file that we want to scan.
557 : *
558 : * Select ILOCK_EXCL if the file has an unloaded data bmbt or has an unloaded
559 : * attr bmbt. Otherwise, take ILOCK_SHARED.
560 : */
561 : static inline unsigned int
562 71569711 : xrep_rmap_scan_ilock(
563 : struct xfs_inode *ip)
564 : {
565 71569711 : uint lock_mode = XFS_ILOCK_SHARED;
566 :
567 71569711 : if (xfs_need_iread_extents(&ip->i_df)) {
568 8 : lock_mode = XFS_ILOCK_EXCL;
569 8 : goto lock;
570 : }
571 :
572 143096472 : if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
573 0 : lock_mode = XFS_ILOCK_EXCL;
574 :
575 71581851 : lock:
576 71581859 : xfs_ilock(ip, lock_mode);
577 71594155 : return lock_mode;
578 : }
579 :
580 : /* Record reverse mappings for a file. */
581 : STATIC int
582 71572112 : xrep_rmap_scan_inode(
583 : struct xrep_rmap *rr,
584 : struct xfs_inode *ip)
585 : {
586 71572112 : unsigned int lock_mode = xrep_rmap_scan_ilock(ip);
587 71594098 : int error;
588 :
589 : /* Check the data fork. */
590 71594098 : error = xrep_rmap_scan_ifork(rr, ip, XFS_DATA_FORK);
591 71546654 : if (error)
592 2 : goto out_unlock;
593 :
594 : /* Check the attr fork. */
595 71546652 : error = xrep_rmap_scan_ifork(rr, ip, XFS_ATTR_FORK);
596 71510936 : if (error)
597 0 : goto out_unlock;
598 :
599 : /* COW fork extents are "owned" by the refcount btree. */
600 :
601 71510936 : xchk_iscan_mark_visited(&rr->iscan, ip);
602 71565701 : out_unlock:
603 71565701 : xfs_iunlock(ip, lock_mode);
604 71488491 : return error;
605 : }
606 :
607 : /* Section (I): Find all AG metadata extents except for free space metadata. */
608 :
609 : struct xrep_rmap_inodes {
610 : struct xrep_rmap *rr;
611 : struct xagb_bitmap inobt_blocks; /* INOBIT */
612 : struct xagb_bitmap ichunk_blocks; /* ICHUNKBIT */
613 : };
614 :
615 : /* Record inode btree rmaps. */
616 : STATIC int
617 345412 : xrep_rmap_walk_inobt(
618 : struct xfs_btree_cur *cur,
619 : const union xfs_btree_rec *rec,
620 : void *priv)
621 : {
622 345412 : struct xfs_inobt_rec_incore irec;
623 345412 : struct xrep_rmap_inodes *ri = priv;
624 345412 : struct xfs_mount *mp = cur->bc_mp;
625 345412 : xfs_agblock_t agbno;
626 345412 : xfs_agino_t agino;
627 345412 : xfs_agino_t iperhole;
628 345412 : unsigned int i;
629 345412 : int error;
630 :
631 : /* Record the inobt blocks. */
632 345412 : error = xagb_bitmap_set_btcur_path(&ri->inobt_blocks, cur);
633 345412 : if (error)
634 : return error;
635 :
636 345414 : xfs_inobt_btrec_to_irec(mp, rec, &irec);
637 345411 : if (xfs_inobt_check_irec(cur, &irec) != NULL)
638 : return -EFSCORRUPTED;
639 :
640 345415 : agino = irec.ir_startino;
641 :
642 : /* Record a non-sparse inode chunk. */
643 345415 : if (!xfs_inobt_issparse(irec.ir_holemask)) {
644 189823 : agbno = XFS_AGINO_TO_AGBNO(mp, agino);
645 :
646 189823 : return xagb_bitmap_set(&ri->ichunk_blocks, agbno,
647 189823 : XFS_INODES_PER_CHUNK / mp->m_sb.sb_inopblock);
648 : }
649 :
650 : /* Iterate each chunk. */
651 155592 : iperhole = max_t(xfs_agino_t, mp->m_sb.sb_inopblock,
652 : XFS_INODES_PER_HOLEMASK_BIT);
653 155592 : for (i = 0, agino = irec.ir_startino;
654 1400332 : i < XFS_INOBT_HOLEMASK_BITS;
655 1244740 : i += iperhole / XFS_INODES_PER_HOLEMASK_BIT, agino += iperhole) {
656 : /* Skip holes. */
657 1244740 : if (irec.ir_holemask & (1 << i))
658 622369 : continue;
659 :
660 : /* Record the inode chunk otherwise. */
661 622371 : agbno = XFS_AGINO_TO_AGBNO(mp, agino);
662 622371 : error = xagb_bitmap_set(&ri->ichunk_blocks, agbno,
663 622371 : iperhole / mp->m_sb.sb_inopblock);
664 622371 : if (error)
665 0 : return error;
666 : }
667 :
668 : return 0;
669 : }
670 :
671 : /* Collect rmaps for the blocks containing inode btrees and the inode chunks. */
672 : STATIC int
673 15504 : xrep_rmap_find_inode_rmaps(
674 : struct xrep_rmap *rr)
675 : {
676 15504 : struct xrep_rmap_inodes ri = {
677 : .rr = rr,
678 : };
679 15504 : struct xfs_scrub *sc = rr->sc;
680 15504 : int error;
681 :
682 15504 : xagb_bitmap_init(&ri.inobt_blocks);
683 15429 : xagb_bitmap_init(&ri.ichunk_blocks);
684 :
685 : /*
686 : * Iterate every record in the inobt so we can capture all the inode
687 : * chunks and the blocks in the inobt itself.
688 : */
689 15437 : error = xfs_btree_query_all(sc->sa.ino_cur, xrep_rmap_walk_inobt, &ri);
690 15534 : if (error)
691 0 : goto out_bitmap;
692 :
693 : /*
694 : * Note that if there are zero records in the inobt then query_all does
695 : * nothing and we have to account the empty inobt root manually.
696 : */
697 15534 : if (xagb_bitmap_empty(&ri.ichunk_blocks)) {
698 9345 : struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
699 :
700 9345 : error = xagb_bitmap_set(&ri.inobt_blocks,
701 9345 : be32_to_cpu(agi->agi_root), 1);
702 9393 : if (error)
703 0 : goto out_bitmap;
704 : }
705 :
706 : /* Scan the finobt too. */
707 15492 : if (xfs_has_finobt(sc->mp)) {
708 15492 : error = xagb_bitmap_set_btblocks(&ri.inobt_blocks,
709 : sc->sa.fino_cur);
710 15522 : if (error)
711 0 : goto out_bitmap;
712 : }
713 :
714 : /* Generate rmaps for everything. */
715 15522 : error = xrep_rmap_stash_bitmap(rr, &ri.inobt_blocks,
716 : &XFS_RMAP_OINFO_INOBT);
717 15511 : if (error)
718 0 : goto out_bitmap;
719 15511 : error = xrep_rmap_stash_bitmap(rr, &ri.ichunk_blocks,
720 : &XFS_RMAP_OINFO_INODES);
721 :
722 15472 : out_bitmap:
723 15472 : xagb_bitmap_destroy(&ri.inobt_blocks);
724 15502 : xagb_bitmap_destroy(&ri.ichunk_blocks);
725 15500 : return error;
726 : }
727 :
728 : /* Record a CoW staging extent. */
729 : STATIC int
730 470507 : xrep_rmap_walk_cowblocks(
731 : struct xfs_btree_cur *cur,
732 : const struct xfs_refcount_irec *irec,
733 : void *priv)
734 : {
735 470507 : struct xagb_bitmap *bitmap = priv;
736 :
737 470507 : if (!xfs_refcount_check_domain(irec) ||
738 470507 : irec->rc_domain != XFS_REFC_DOMAIN_COW)
739 : return -EFSCORRUPTED;
740 :
741 470507 : return xagb_bitmap_set(bitmap, irec->rc_startblock, irec->rc_blockcount);
742 : }
743 :
744 : /*
745 : * Collect rmaps for the blocks containing the refcount btree, and all CoW
746 : * staging extents.
747 : */
748 : STATIC int
749 15480 : xrep_rmap_find_refcount_rmaps(
750 : struct xrep_rmap *rr)
751 : {
752 15480 : struct xagb_bitmap refcountbt_blocks; /* REFCBIT */
753 15480 : struct xagb_bitmap cow_blocks; /* COWBIT */
754 15480 : struct xfs_refcount_irec low = {
755 : .rc_startblock = 0,
756 : .rc_domain = XFS_REFC_DOMAIN_COW,
757 : };
758 15480 : struct xfs_refcount_irec high = {
759 : .rc_startblock = -1U,
760 : .rc_domain = XFS_REFC_DOMAIN_COW,
761 : };
762 15480 : struct xfs_scrub *sc = rr->sc;
763 15480 : int error;
764 :
765 15480 : if (!xfs_has_reflink(sc->mp))
766 : return 0;
767 :
768 15468 : xagb_bitmap_init(&refcountbt_blocks);
769 15449 : xagb_bitmap_init(&cow_blocks);
770 :
771 : /* refcountbt */
772 15467 : error = xagb_bitmap_set_btblocks(&refcountbt_blocks, sc->sa.refc_cur);
773 15528 : if (error)
774 0 : goto out_bitmap;
775 :
776 : /* Collect rmaps for CoW staging extents. */
777 15528 : error = xfs_refcount_query_range(sc->sa.refc_cur, &low, &high,
778 : xrep_rmap_walk_cowblocks, &cow_blocks);
779 15497 : if (error)
780 0 : goto out_bitmap;
781 :
782 : /* Generate rmaps for everything. */
783 15497 : error = xrep_rmap_stash_bitmap(rr, &cow_blocks, &XFS_RMAP_OINFO_COW);
784 15500 : if (error)
785 0 : goto out_bitmap;
786 15500 : error = xrep_rmap_stash_bitmap(rr, &refcountbt_blocks,
787 : &XFS_RMAP_OINFO_REFC);
788 :
789 15548 : out_bitmap:
790 15548 : xagb_bitmap_destroy(&cow_blocks);
791 15516 : xagb_bitmap_destroy(&refcountbt_blocks);
792 15516 : return error;
793 : }
794 :
795 : /* Generate rmaps for the AG headers (AGI/AGF/AGFL) */
796 : STATIC int
797 15502 : xrep_rmap_find_agheader_rmaps(
798 : struct xrep_rmap *rr)
799 : {
800 15502 : struct xfs_scrub *sc = rr->sc;
801 :
802 : /* Create a record for the AG sb->agfl. */
803 15502 : return xrep_rmap_stash(rr, XFS_SB_BLOCK(sc->mp),
804 15502 : XFS_AGFL_BLOCK(sc->mp) - XFS_SB_BLOCK(sc->mp) + 1,
805 : XFS_RMAP_OWN_FS, 0, 0);
806 : }
807 :
808 : /* Generate rmaps for the log, if it's in this AG. */
809 : STATIC int
810 15538 : xrep_rmap_find_log_rmaps(
811 : struct xrep_rmap *rr)
812 : {
813 15538 : struct xfs_scrub *sc = rr->sc;
814 :
815 15538 : if (!xfs_ag_contains_log(sc->mp, sc->sa.pag->pag_agno))
816 : return 0;
817 :
818 2447 : return xrep_rmap_stash(rr,
819 2447 : XFS_FSB_TO_AGBNO(sc->mp, sc->mp->m_sb.sb_logstart),
820 2447 : sc->mp->m_sb.sb_logblocks, XFS_RMAP_OWN_LOG, 0, 0);
821 : }
822 :
823 : /* Check and count all the records that we gathered. */
824 : STATIC int
825 51419736 : xrep_rmap_check_record(
826 : struct xfs_btree_cur *cur,
827 : const struct xfs_rmap_irec *rec,
828 : void *priv)
829 : {
830 51419736 : struct xrep_rmap *rr = priv;
831 51419736 : int error;
832 :
833 51419736 : error = xrep_rmap_check_mapping(rr->sc, rec);
834 51419846 : if (error)
835 : return error;
836 :
837 51419846 : rr->nr_records++;
838 51419846 : return 0;
839 : }
840 :
841 : /*
842 : * Generate all the reverse-mappings for this AG, a list of the old rmapbt
843 : * blocks, and the new btreeblks count. Figure out if we have enough free
844 : * space to reconstruct the inode btrees. The caller must clean up the lists
845 : * if anything goes wrong. This implements section (I) above.
846 : */
847 : STATIC int
848 15477 : xrep_rmap_find_rmaps(
849 : struct xrep_rmap *rr)
850 : {
851 15477 : struct xfs_scrub *sc = rr->sc;
852 15477 : struct xchk_ag *sa = &sc->sa;
853 15477 : struct xfs_inode *ip;
854 15477 : struct xfs_buf *mhead_bp;
855 15477 : struct xfs_btree_cur *mcur;
856 15477 : int error;
857 :
858 : /* Find all the per-AG metadata. */
859 15477 : xrep_ag_btcur_init(sc, &sc->sa);
860 :
861 15551 : error = xrep_rmap_find_inode_rmaps(rr);
862 15508 : if (error)
863 0 : goto end_agscan;
864 :
865 15508 : error = xrep_rmap_find_refcount_rmaps(rr);
866 15496 : if (error)
867 0 : goto end_agscan;
868 :
869 15496 : error = xrep_rmap_find_agheader_rmaps(rr);
870 15547 : if (error)
871 0 : goto end_agscan;
872 :
873 15547 : error = xrep_rmap_find_log_rmaps(rr);
874 15519 : end_agscan:
875 15519 : xchk_ag_btcur_free(&sc->sa);
876 15556 : if (error)
877 : return error;
878 :
879 : /*
880 : * Set up for a potentially lengthy filesystem scan by reducing our
881 : * transaction resource usage for the duration. Specifically:
882 : *
883 : * Unlock the AG header buffers and cancel the transaction to release
884 : * the log grant space while we scan the filesystem.
885 : *
886 : * Create a new empty transaction to eliminate the possibility of the
887 : * inode scan deadlocking on cyclical metadata.
888 : *
889 : * We pass the empty transaction to the file scanning function to avoid
890 : * repeatedly cycling empty transactions. This can be done even though
891 : * we take the IOLOCK to quiesce the file because empty transactions
892 : * do not take sb_internal.
893 : */
894 15557 : sa->agf_bp = NULL;
895 15557 : sa->agi_bp = NULL;
896 15557 : xchk_trans_cancel(sc);
897 15543 : error = xchk_trans_alloc_empty(sc);
898 15508 : if (error)
899 : return error;
900 :
901 : /* Iterate all AGs for inodes rmaps. */
902 71605058 : while ((error = xchk_iscan_iter(&rr->iscan, &ip)) == 1) {
903 71584004 : error = xrep_rmap_scan_inode(rr, ip);
904 71478026 : xchk_irele(sc, ip);
905 71623050 : if (error)
906 : break;
907 :
908 71623048 : if (xchk_should_terminate(sc, &error))
909 : break;
910 : }
911 15556 : xchk_iscan_iter_finish(&rr->iscan);
912 15551 : if (error)
913 : return error;
914 :
915 : /*
916 : * Switch out for a real transaction and lock the AG headers in
917 : * preparation for building a new tree.
918 : */
919 15549 : xchk_trans_cancel(sc);
920 15531 : error = xchk_setup_fs(sc);
921 15501 : if (error)
922 : return error;
923 15492 : error = xchk_perag_drain_and_lock(sc);
924 15476 : if (error)
925 : return error;
926 :
927 : /*
928 : * If a hook failed to update the in-memory btree, we lack the data to
929 : * continue the repair.
930 : */
931 15353 : if (xchk_iscan_aborted(&rr->iscan))
932 : return -EFSCORRUPTED;
933 :
934 : /*
935 : * Now that we have everything locked again, we need to count the
936 : * number of rmap records stashed in the btree. This should reflect
937 : * all actively-owned space in the filesystem. At the same time, check
938 : * all our records before we start building a new btree, which requires
939 : * a bnobt cursor.
940 : */
941 15347 : error = xfbtree_head_read_buf(rr->rmap_btree, NULL, &mhead_bp);
942 15426 : if (error)
943 : return error;
944 :
945 15424 : mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, mhead_bp,
946 : rr->rmap_btree);
947 15417 : sc->sa.bno_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
948 : sc->sa.pag, XFS_BTNUM_BNO);
949 :
950 15418 : rr->nr_records = 0;
951 15418 : error = xfs_rmap_query_all(mcur, xrep_rmap_check_record, rr);
952 :
953 15389 : xfs_btree_del_cursor(sc->sa.bno_cur, error);
954 15396 : sc->sa.bno_cur = NULL;
955 15396 : xfs_btree_del_cursor(mcur, error);
956 15402 : xfs_buf_relse(mhead_bp);
957 :
958 15390 : return error;
959 : }
960 :
961 : /* Section (II): Reserving space for new rmapbt and setting free space bitmap */
962 :
963 : struct xrep_rmap_agfl {
964 : struct xagb_bitmap *bitmap;
965 : xfs_agnumber_t agno;
966 : };
967 :
968 : /* Add an AGFL block to the rmap list. */
969 : STATIC int
970 104528 : xrep_rmap_walk_agfl(
971 : struct xfs_mount *mp,
972 : xfs_agblock_t agbno,
973 : void *priv)
974 : {
975 104528 : struct xrep_rmap_agfl *ra = priv;
976 :
977 104528 : return xagb_bitmap_set(ra->bitmap, agbno, 1);
978 : }
979 :
980 : /*
981 : * Run one round of reserving space for the new rmapbt and recomputing the
982 : * number of blocks needed to store the previously observed rmapbt records and
983 : * the ones we'll create for the free space metadata. When we don't need more
984 : * blocks, return a bitmap of OWN_AG extents in @freesp_blocks and set @done to
985 : * true.
986 : */
987 : STATIC int
988 15815 : xrep_rmap_try_reserve(
989 : struct xrep_rmap *rr,
990 : struct xfs_btree_cur *rmap_cur,
991 : struct xagb_bitmap *freesp_blocks,
992 : uint64_t *blocks_reserved,
993 : bool *done)
994 : {
995 15815 : struct xrep_rmap_agfl ra = {
996 : .bitmap = freesp_blocks,
997 15815 : .agno = rr->sc->sa.pag->pag_agno,
998 : };
999 15815 : struct xfs_scrub *sc = rr->sc;
1000 15815 : struct xrep_newbt_resv *resv, *n;
1001 15815 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1002 15815 : struct xfs_buf *agfl_bp;
1003 15815 : uint64_t nr_blocks; /* RMB */
1004 15815 : uint64_t freesp_records;
1005 15815 : int error;
1006 :
1007 : /*
1008 : * We're going to recompute new_btree.bload.nr_blocks at the end of
1009 : * this function to reflect however many btree blocks we need to store
1010 : * all the rmap records (including the ones that reflect the changes we
1011 : * made to support the new rmapbt blocks), so we save the old value
1012 : * here so we can decide if we've reserved enough blocks.
1013 : */
1014 15815 : nr_blocks = rr->new_btree.bload.nr_blocks;
1015 :
1016 : /*
1017 : * Make sure we've reserved enough space for the new btree. This can
1018 : * change the shape of the free space btrees, which can cause secondary
1019 : * interactions with the rmap records because all three space btrees
1020 : * have the same rmap owner. We'll account for all that below.
1021 : */
1022 15815 : error = xrep_newbt_alloc_blocks(&rr->new_btree,
1023 15815 : nr_blocks - *blocks_reserved);
1024 15887 : if (error)
1025 : return error;
1026 :
1027 15819 : *blocks_reserved = rr->new_btree.bload.nr_blocks;
1028 :
1029 : /* Clear everything in the bitmap. */
1030 15819 : xagb_bitmap_destroy(freesp_blocks);
1031 :
1032 : /* Set all the bnobt blocks in the bitmap. */
1033 15796 : sc->sa.bno_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
1034 : sc->sa.pag, XFS_BTNUM_BNO);
1035 15953 : error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.bno_cur);
1036 15832 : xfs_btree_del_cursor(sc->sa.bno_cur, error);
1037 15970 : sc->sa.bno_cur = NULL;
1038 15970 : if (error)
1039 : return error;
1040 :
1041 : /* Set all the cntbt blocks in the bitmap. */
1042 15973 : sc->sa.cnt_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
1043 : sc->sa.pag, XFS_BTNUM_CNT);
1044 15947 : error = xagb_bitmap_set_btblocks(freesp_blocks, sc->sa.cnt_cur);
1045 15892 : xfs_btree_del_cursor(sc->sa.cnt_cur, error);
1046 15982 : sc->sa.cnt_cur = NULL;
1047 15982 : if (error)
1048 : return error;
1049 :
1050 : /* Record our new btreeblks value. */
1051 15981 : rr->freesp_btblocks = xagb_bitmap_hweight(freesp_blocks) - 2;
1052 :
1053 : /* Set all the new rmapbt blocks in the bitmap. */
1054 35332 : for_each_xrep_newbt_reservation(&rr->new_btree, resv, n) {
1055 19613 : error = xagb_bitmap_set(freesp_blocks, resv->agbno, resv->len);
1056 19475 : if (error)
1057 0 : return error;
1058 : }
1059 :
1060 : /* Set all the AGFL blocks in the bitmap. */
1061 15719 : error = xfs_alloc_read_agfl(sc->sa.pag, sc->tp, &agfl_bp);
1062 15909 : if (error)
1063 : return error;
1064 :
1065 15910 : error = xfs_agfl_walk(sc->mp, agf, agfl_bp, xrep_rmap_walk_agfl, &ra);
1066 15856 : if (error)
1067 : return error;
1068 :
1069 : /* Count the extents in the bitmap. */
1070 15854 : freesp_records = xagb_bitmap_count_set_regions(freesp_blocks);
1071 :
1072 : /* Compute how many blocks we'll need for all the rmaps. */
1073 15761 : error = xfs_btree_bload_compute_geometry(rmap_cur,
1074 15761 : &rr->new_btree.bload, rr->nr_records + freesp_records);
1075 15922 : if (error)
1076 : return error;
1077 :
1078 : /* We're done when we don't need more blocks. */
1079 15922 : *done = nr_blocks >= rr->new_btree.bload.nr_blocks;
1080 15922 : return 0;
1081 : }
1082 :
1083 : /*
1084 : * Iteratively reserve space for rmap btree while recording OWN_AG rmaps for
1085 : * the free space metadata. This implements section (II) above.
1086 : */
1087 : STATIC int
1088 15335 : xrep_rmap_reserve_space(
1089 : struct xrep_rmap *rr,
1090 : struct xfs_btree_cur *rmap_cur)
1091 : {
1092 15335 : struct xagb_bitmap freesp_blocks; /* AGBIT */
1093 15335 : uint64_t blocks_reserved = 0;
1094 15335 : bool done = false;
1095 15335 : int error;
1096 :
1097 : /* Compute how many blocks we'll need for the rmaps collected so far. */
1098 15335 : error = xfs_btree_bload_compute_geometry(rmap_cur,
1099 : &rr->new_btree.bload, rr->nr_records);
1100 15268 : if (error)
1101 : return error;
1102 :
1103 : /* Last chance to abort before we start committing fixes. */
1104 15254 : if (xchk_should_terminate(rr->sc, &error))
1105 0 : return error;
1106 :
1107 15295 : xagb_bitmap_init(&freesp_blocks);
1108 :
1109 : /*
1110 : * Iteratively reserve space for the new rmapbt and recompute the
1111 : * number of blocks needed to store the previously observed rmapbt
1112 : * records and the ones we'll create for the free space metadata.
1113 : * Finish when we don't need more blocks.
1114 : */
1115 15844 : do {
1116 15844 : error = xrep_rmap_try_reserve(rr, rmap_cur, &freesp_blocks,
1117 : &blocks_reserved, &done);
1118 15897 : if (error)
1119 16 : goto out_bitmap;
1120 15881 : } while (!done);
1121 :
1122 : /* Emit rmaps for everything in the free space bitmap. */
1123 15286 : xrep_ag_btcur_init(rr->sc, &rr->sc->sa);
1124 15333 : error = xrep_rmap_stash_bitmap(rr, &freesp_blocks, &XFS_RMAP_OINFO_AG);
1125 15353 : xchk_ag_btcur_free(&rr->sc->sa);
1126 :
1127 15355 : out_bitmap:
1128 15355 : xagb_bitmap_destroy(&freesp_blocks);
1129 15282 : return error;
1130 : }
1131 :
1132 : /* Section (III): Building the new rmap btree. */
1133 :
1134 : /* Update the AGF counters. */
1135 : STATIC int
1136 15418 : xrep_rmap_reset_counters(
1137 : struct xrep_rmap *rr)
1138 : {
1139 15418 : struct xfs_scrub *sc = rr->sc;
1140 15418 : struct xfs_perag *pag = sc->sa.pag;
1141 15418 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1142 15418 : xfs_agblock_t rmap_btblocks;
1143 :
1144 : /*
1145 : * The AGF header contains extra information related to the reverse
1146 : * mapping btree, so we must update those fields here.
1147 : */
1148 15418 : rmap_btblocks = rr->new_btree.afake.af_blocks - 1;
1149 15418 : agf->agf_btreeblks = cpu_to_be32(rr->freesp_btblocks + rmap_btblocks);
1150 15418 : xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_BTREEBLKS);
1151 :
1152 : /*
1153 : * After we commit the new btree to disk, it is possible that the
1154 : * process to reap the old btree blocks will race with the AIL trying
1155 : * to checkpoint the old btree blocks into the filesystem. If the new
1156 : * tree is shorter than the old one, the rmapbt write verifier will
1157 : * fail and the AIL will shut down the filesystem.
1158 : *
1159 : * To avoid this, save the old incore btree height values as the alt
1160 : * height values before re-initializing the perag info from the updated
1161 : * AGF to capture all the new values.
1162 : */
1163 15416 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] =
1164 15416 : pag->pagf_levels[XFS_BTNUM_RMAPi];
1165 :
1166 : /* Reinitialize with the values we just logged. */
1167 15416 : return xrep_reinit_pagf(sc);
1168 : }
1169 :
1170 : /* Retrieve rmapbt data for bulk load. */
1171 : STATIC int
1172 413036 : xrep_rmap_get_records(
1173 : struct xfs_btree_cur *cur,
1174 : unsigned int idx,
1175 : struct xfs_btree_block *block,
1176 : unsigned int nr_wanted,
1177 : void *priv)
1178 : {
1179 413036 : struct xrep_rmap *rr = priv;
1180 413036 : union xfs_btree_rec *block_rec;
1181 413036 : unsigned int loaded;
1182 413036 : int error;
1183 :
1184 51336167 : for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
1185 50923020 : int stat = 0;
1186 :
1187 50923020 : error = xfs_btree_increment(rr->mcur, 0, &stat);
1188 50923217 : if (error)
1189 0 : return error;
1190 50923217 : if (!stat)
1191 : return -EFSCORRUPTED;
1192 :
1193 50923217 : error = xfs_rmap_get_rec(rr->mcur, &cur->bc_rec.r, &stat);
1194 50923463 : if (error)
1195 0 : return error;
1196 50923463 : if (!stat)
1197 : return -EFSCORRUPTED;
1198 :
1199 50923463 : block_rec = xfs_btree_rec_addr(cur, idx, block);
1200 50923218 : cur->bc_ops->init_rec_from_cur(cur, block_rec);
1201 : }
1202 :
1203 413147 : return loaded;
1204 : }
1205 :
1206 : /* Feed one of the new btree blocks to the bulk loader. */
1207 : STATIC int
1208 421456 : xrep_rmap_claim_block(
1209 : struct xfs_btree_cur *cur,
1210 : union xfs_btree_ptr *ptr,
1211 : void *priv)
1212 : {
1213 421456 : struct xrep_rmap *rr = priv;
1214 421456 : int error;
1215 :
1216 421456 : error = xrep_newbt_relog_autoreap(&rr->new_btree);
1217 421542 : if (error)
1218 : return error;
1219 :
1220 421542 : return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
1221 : }
1222 :
1223 : /* Custom allocation function for new rmap btrees. */
1224 : STATIC int
1225 18896 : xrep_rmap_alloc_vextent(
1226 : struct xfs_scrub *sc,
1227 : struct xfs_alloc_arg *args,
1228 : xfs_fsblock_t alloc_hint)
1229 : {
1230 18896 : int error;
1231 :
1232 : /*
1233 : * We don't want an rmap update on the allocation, since we iteratively
1234 : * compute the OWN_AG records /after/ allocating blocks for the records
1235 : * that we already know we need to store. Therefore, fix the freelist
1236 : * with the NORMAP flag set so that we don't also try to create an rmap
1237 : * for new AGFL blocks.
1238 : */
1239 18896 : error = xrep_fix_freelist(sc, XFS_ALLOC_FLAG_NORMAP);
1240 19063 : if (error)
1241 : return error;
1242 :
1243 : /*
1244 : * If xrep_fix_freelist fixed the freelist by moving blocks from the
1245 : * free space btrees or by removing blocks from the AGFL and queueing
1246 : * an EFI to free the block, the transaction will be dirty. This
1247 : * second case is of interest to us.
1248 : *
1249 : * Later on, we will need to compare gaps in the new recordset against
1250 : * the block usage of all OWN_AG owners in order to free the old
1251 : * btree's blocks, which means that we can't have EFIs for former AGFL
1252 : * blocks attached to the repair transaction when we commit the new
1253 : * btree.
1254 : *
1255 : * xrep_newbt_alloc_blocks guarantees this for us by calling
1256 : * xrep_defer_finish to commit anything that fix_freelist may have
1257 : * added to the transaction.
1258 : */
1259 19062 : return xfs_alloc_vextent_near_bno(args, alloc_hint);
1260 : }
1261 :
1262 :
1263 : /* Count the records in this btree. */
1264 : STATIC int
1265 15342 : xrep_rmap_count_records(
1266 : struct xfs_btree_cur *cur,
1267 : unsigned long long *nr)
1268 : {
1269 15342 : int running = 1;
1270 15342 : int error;
1271 :
1272 15342 : *nr = 0;
1273 :
1274 15342 : error = xfs_btree_goto_left_edge(cur);
1275 15323 : if (error)
1276 : return error;
1277 :
1278 50954353 : while (running && !(error = xfs_btree_increment(cur, 0, &running))) {
1279 50939030 : if (running)
1280 50923735 : (*nr)++;
1281 : }
1282 :
1283 : return error;
1284 : }
1285 : /*
1286 : * Use the collected rmap information to stage a new rmap btree. If this is
1287 : * successful we'll return with the new btree root information logged to the
1288 : * repair transaction but not yet committed. This implements section (III)
1289 : * above.
1290 : */
1291 : STATIC int
1292 15378 : xrep_rmap_build_new_tree(
1293 : struct xrep_rmap *rr)
1294 : {
1295 15378 : struct xfs_scrub *sc = rr->sc;
1296 15378 : struct xfs_perag *pag = sc->sa.pag;
1297 15378 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1298 15378 : struct xfs_btree_cur *rmap_cur;
1299 15378 : struct xfs_buf *mhead_bp;
1300 15378 : xfs_fsblock_t fsbno;
1301 15378 : int error;
1302 :
1303 : /*
1304 : * Preserve the old rmapbt block count so that we can adjust the
1305 : * per-AG rmapbt reservation after we commit the new btree root and
1306 : * want to dispose of the old btree blocks.
1307 : */
1308 15378 : rr->old_rmapbt_fsbcount = be32_to_cpu(agf->agf_rmap_blocks);
1309 :
1310 : /*
1311 : * Prepare to construct the new btree by reserving disk space for the
1312 : * new btree and setting up all the accounting information we'll need
1313 : * to root the new btree while it's under construction and before we
1314 : * attach it to the AG header. The new blocks are accounted to the
1315 : * rmapbt per-AG reservation, which we will adjust further after
1316 : * committing the new btree.
1317 : */
1318 15378 : fsbno = XFS_AGB_TO_FSB(sc->mp, pag->pag_agno, XFS_RMAP_BLOCK(sc->mp));
1319 15378 : xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_SKIP_UPDATE,
1320 : fsbno, XFS_AG_RESV_RMAPBT);
1321 15304 : rr->new_btree.bload.get_records = xrep_rmap_get_records;
1322 15304 : rr->new_btree.bload.claim_block = xrep_rmap_claim_block;
1323 15304 : rr->new_btree.alloc_vextent = xrep_rmap_alloc_vextent;
1324 15304 : rmap_cur = xfs_rmapbt_stage_cursor(sc->mp, &rr->new_btree.afake, pag);
1325 :
1326 : /*
1327 : * Initialize @rr->new_btree, reserve space for the new rmapbt,
1328 : * and compute OWN_AG rmaps.
1329 : */
1330 15332 : error = xrep_rmap_reserve_space(rr, rmap_cur);
1331 15274 : if (error)
1332 16 : goto err_cur;
1333 :
1334 : /*
1335 : * Count the rmapbt records again, because the space reservation
1336 : * for the rmapbt itself probably added more records to the btree.
1337 : */
1338 15258 : error = xfbtree_head_read_buf(rr->rmap_btree, NULL, &mhead_bp);
1339 15346 : if (error)
1340 0 : goto err_cur;
1341 :
1342 15346 : rr->mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, mhead_bp,
1343 : rr->rmap_btree);
1344 :
1345 15359 : error = xrep_rmap_count_records(rr->mcur, &rr->nr_records);
1346 15377 : if (error)
1347 0 : goto err_mcur;
1348 :
1349 : /*
1350 : * Due to btree slack factors, it's possible for a new btree to be one
1351 : * level taller than the old btree. Update the incore btree height so
1352 : * that we don't trip the verifiers when writing the new btree blocks
1353 : * to disk.
1354 : */
1355 15377 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] =
1356 15377 : rr->new_btree.bload.btree_height;
1357 :
1358 : /*
1359 : * Move the cursor to the left edge of the tree so that the first
1360 : * increment in ->get_records positions us at the first record.
1361 : */
1362 15377 : error = xfs_btree_goto_left_edge(rr->mcur);
1363 15315 : if (error)
1364 0 : goto err_level;
1365 :
1366 : /* Add all observed rmap records. */
1367 15315 : error = xfs_btree_bload(rmap_cur, &rr->new_btree.bload, rr);
1368 15417 : if (error)
1369 0 : goto err_level;
1370 :
1371 : /*
1372 : * Install the new btree in the AG header. After this point the old
1373 : * btree is no longer accessible and the new tree is live.
1374 : */
1375 15417 : xfs_rmapbt_commit_staged_btree(rmap_cur, sc->tp, sc->sa.agf_bp);
1376 15398 : xfs_btree_del_cursor(rmap_cur, 0);
1377 15400 : xfs_btree_del_cursor(rr->mcur, 0);
1378 15404 : rr->mcur = NULL;
1379 15404 : xfs_buf_relse(mhead_bp);
1380 :
1381 : /*
1382 : * Now that we've written the new btree to disk, we don't need to keep
1383 : * updating the in-memory btree. Abort the scan to stop live updates.
1384 : */
1385 15419 : xchk_iscan_abort(&rr->iscan);
1386 :
1387 : /*
1388 : * The newly committed rmap recordset includes mappings for the blocks
1389 : * that we reserved to build the new btree. If there is excess space
1390 : * reservation to be freed, the corresponding rmap records must also be
1391 : * removed.
1392 : */
1393 15421 : rr->new_btree.oinfo = XFS_RMAP_OINFO_AG;
1394 :
1395 : /* Reset the AGF counters now that we've changed the btree shape. */
1396 15421 : error = xrep_rmap_reset_counters(rr);
1397 15391 : if (error)
1398 0 : goto err_newbt;
1399 :
1400 : /* Dispose of any unused blocks and the accounting information. */
1401 15391 : error = xrep_newbt_commit(&rr->new_btree);
1402 15392 : if (error)
1403 : return error;
1404 :
1405 15391 : return xrep_roll_ag_trans(sc);
1406 :
1407 0 : err_level:
1408 0 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] = 0;
1409 0 : err_mcur:
1410 0 : xfs_btree_del_cursor(rr->mcur, error);
1411 0 : xfs_buf_relse(mhead_bp);
1412 16 : err_cur:
1413 16 : xfs_btree_del_cursor(rmap_cur, error);
1414 16 : err_newbt:
1415 16 : xrep_newbt_cancel(&rr->new_btree);
1416 16 : return error;
1417 : }
1418 :
1419 : /* Section (IV): Reaping the old btree. */
1420 :
1421 : struct xrep_rmap_find_gaps {
1422 : struct xagb_bitmap rmap_gaps;
1423 : xfs_agblock_t next_agbno;
1424 : };
1425 :
1426 : /* Subtract each free extent in the bnobt from the rmap gaps. */
1427 : STATIC int
1428 9753678 : xrep_rmap_find_freesp(
1429 : struct xfs_btree_cur *cur,
1430 : const struct xfs_alloc_rec_incore *rec,
1431 : void *priv)
1432 : {
1433 9753678 : struct xrep_rmap_find_gaps *rfg = priv;
1434 :
1435 19507264 : return xagb_bitmap_clear(&rfg->rmap_gaps, rec->ar_startblock,
1436 9753678 : rec->ar_blockcount);
1437 : }
1438 :
1439 : /* Record the free space we find, as part of cleaning out the btree. */
1440 : STATIC int
1441 50923138 : xrep_rmap_find_gaps(
1442 : struct xfs_btree_cur *cur,
1443 : const struct xfs_rmap_irec *rec,
1444 : void *priv)
1445 : {
1446 50923138 : struct xrep_rmap_find_gaps *rfg = priv;
1447 50923138 : int error;
1448 :
1449 50923138 : if (rec->rm_startblock > rfg->next_agbno) {
1450 9809288 : error = xagb_bitmap_set(&rfg->rmap_gaps, rfg->next_agbno,
1451 : rec->rm_startblock - rfg->next_agbno);
1452 9808981 : if (error)
1453 : return error;
1454 : }
1455 :
1456 50922831 : rfg->next_agbno = max_t(xfs_agblock_t, rfg->next_agbno,
1457 : rec->rm_startblock + rec->rm_blockcount);
1458 50922831 : return 0;
1459 : }
1460 :
1461 : /*
1462 : * Reap the old rmapbt blocks. Now that the rmapbt is fully rebuilt, we make
1463 : * a list of gaps in the rmap records and a list of the extents mentioned in
1464 : * the bnobt. Any block that's in the new rmapbt gap list but not mentioned
1465 : * in the bnobt is a block from the old rmapbt and can be removed.
1466 : */
1467 : STATIC int
1468 15313 : xrep_rmap_remove_old_tree(
1469 : struct xrep_rmap *rr)
1470 : {
1471 15313 : struct xrep_rmap_find_gaps rfg = {
1472 : .next_agbno = 0,
1473 : };
1474 15313 : struct xfs_scrub *sc = rr->sc;
1475 15313 : struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
1476 15313 : struct xfs_perag *pag = sc->sa.pag;
1477 15313 : struct xfs_btree_cur *mcur;
1478 15313 : struct xfs_buf *mhead_bp;
1479 15313 : xfs_agblock_t agend;
1480 15313 : int error;
1481 :
1482 15313 : xagb_bitmap_init(&rfg.rmap_gaps);
1483 :
1484 : /* Compute free space from the new rmapbt. */
1485 15307 : error = xfbtree_head_read_buf(rr->rmap_btree, NULL, &mhead_bp);
1486 15375 : mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, NULL, mhead_bp,
1487 : rr->rmap_btree);
1488 :
1489 15376 : error = xfs_rmap_query_all(mcur, xrep_rmap_find_gaps, &rfg);
1490 15311 : xfs_btree_del_cursor(mcur, error);
1491 15409 : xfs_buf_relse(mhead_bp);
1492 15413 : if (error)
1493 0 : goto out_bitmap;
1494 :
1495 : /* Insert a record for space between the last rmap and EOAG. */
1496 15413 : agend = be32_to_cpu(agf->agf_length);
1497 15413 : if (rfg.next_agbno < agend) {
1498 15404 : error = xagb_bitmap_set(&rfg.rmap_gaps, rfg.next_agbno,
1499 : agend - rfg.next_agbno);
1500 15320 : if (error)
1501 0 : goto out_bitmap;
1502 : }
1503 :
1504 : /* Compute free space from the existing bnobt. */
1505 15329 : sc->sa.bno_cur = xfs_allocbt_init_cursor(sc->mp, sc->tp, sc->sa.agf_bp,
1506 : sc->sa.pag, XFS_BTNUM_BNO);
1507 15388 : error = xfs_alloc_query_all(sc->sa.bno_cur, xrep_rmap_find_freesp,
1508 : &rfg);
1509 15367 : xfs_btree_del_cursor(sc->sa.bno_cur, error);
1510 15380 : sc->sa.bno_cur = NULL;
1511 15380 : if (error)
1512 0 : goto out_bitmap;
1513 :
1514 : /*
1515 : * Free the "free" blocks that the new rmapbt knows about but the bnobt
1516 : * doesn't--these are the old rmapbt blocks. Credit the old rmapbt
1517 : * block usage count back to the per-AG rmapbt reservation (and not
1518 : * fdblocks, since the rmap btree lives in free space) to keep the
1519 : * reservation and free space accounting correct.
1520 : */
1521 15380 : error = xrep_reap_agblocks(sc, &rfg.rmap_gaps,
1522 : &XFS_RMAP_OINFO_ANY_OWNER, XFS_AG_RESV_RMAPBT);
1523 15335 : if (error)
1524 0 : goto out_bitmap;
1525 :
1526 : /*
1527 : * Now that we've zapped all the old rmapbt blocks we can turn off
1528 : * the alternate height mechanism and reset the per-AG space
1529 : * reservation.
1530 : */
1531 15335 : pag->pagf_alt_levels[XFS_BTNUM_RMAPi] = 0;
1532 15335 : sc->flags |= XREP_RESET_PERAG_RESV;
1533 15335 : out_bitmap:
1534 15335 : xagb_bitmap_destroy(&rfg.rmap_gaps);
1535 15317 : return error;
1536 : }
1537 :
1538 : static inline bool
1539 497802 : xrep_rmapbt_want_live_update(
1540 : struct xchk_iscan *iscan,
1541 : const struct xfs_owner_info *oi)
1542 : {
1543 497802 : if (xchk_iscan_aborted(iscan))
1544 : return false;
1545 :
1546 : /*
1547 : * Before unlocking the AG header to perform the inode scan, we
1548 : * recorded reverse mappings for all AG metadata except for the OWN_AG
1549 : * metadata. IOWs, the in-memory btree knows about the AG headers, the
1550 : * two inode btrees, the CoW staging extents, and the refcount btrees.
1551 : * For these types of metadata, we need to record the live updates in
1552 : * the in-memory rmap btree.
1553 : *
1554 : * However, we do not scan the free space btrees or the AGFL until we
1555 : * have re-locked the AGF and are ready to reserve space for the new
1556 : * new rmap btree, so we do not want live updates for OWN_AG metadata.
1557 : */
1558 416126 : if (XFS_RMAP_NON_INODE_OWNER(oi->oi_owner))
1559 17488 : return oi->oi_owner != XFS_RMAP_OWN_AG;
1560 :
1561 : /* Ignore updates to files that the scanner hasn't visited yet. */
1562 398638 : return xchk_iscan_want_live_update(iscan, oi->oi_owner);
1563 : }
1564 :
1565 : /*
1566 : * Apply a rmapbt update from the regular filesystem into our shadow btree.
1567 : * We're running from the thread that owns the AGF buffer and is generating
1568 : * the update, so we must be careful about which parts of the struct xrep_rmap
1569 : * that we change.
1570 : */
1571 : static int
1572 497898 : xrep_rmapbt_live_update(
1573 : struct notifier_block *nb,
1574 : unsigned long action,
1575 : void *data)
1576 : {
1577 497898 : struct xfs_rmap_update_params *p = data;
1578 497898 : struct xrep_rmap *rr;
1579 497898 : struct xfs_mount *mp;
1580 497898 : struct xfs_btree_cur *mcur;
1581 497898 : struct xfs_buf *mhead_bp;
1582 497898 : struct xfs_trans *tp;
1583 497898 : void *txcookie;
1584 497898 : int error;
1585 :
1586 497898 : rr = container_of(nb, struct xrep_rmap, hooks.update_hook.nb);
1587 497898 : mp = rr->sc->mp;
1588 :
1589 497898 : if (!xrep_rmapbt_want_live_update(&rr->iscan, &p->oinfo))
1590 299914 : goto out_unlock;
1591 :
1592 197890 : trace_xrep_rmap_live_update(mp, rr->sc->sa.pag->pag_agno, action, p);
1593 :
1594 197890 : error = xrep_trans_alloc_hook_dummy(mp, &txcookie, &tp);
1595 197890 : if (error)
1596 0 : goto out_abort;
1597 :
1598 197890 : mutex_lock(&rr->lock);
1599 197890 : error = xfbtree_head_read_buf(rr->rmap_btree, tp, &mhead_bp);
1600 197890 : if (error)
1601 0 : goto out_cancel;
1602 :
1603 197890 : mcur = xfs_rmapbt_mem_cursor(rr->sc->sa.pag, tp, mhead_bp,
1604 : rr->rmap_btree);
1605 395780 : error = __xfs_rmap_finish_intent(mcur, action, p->startblock,
1606 197890 : p->blockcount, &p->oinfo, p->unwritten);
1607 197890 : xfs_btree_del_cursor(mcur, error);
1608 197890 : if (error)
1609 0 : goto out_cancel;
1610 :
1611 197890 : error = xfbtree_trans_commit(rr->rmap_btree, tp);
1612 197890 : if (error)
1613 0 : goto out_cancel;
1614 :
1615 197890 : xrep_trans_cancel_hook_dummy(&txcookie, tp);
1616 197890 : mutex_unlock(&rr->lock);
1617 197890 : return NOTIFY_DONE;
1618 :
1619 0 : out_cancel:
1620 0 : xfbtree_trans_cancel(rr->rmap_btree, tp);
1621 0 : xrep_trans_cancel_hook_dummy(&txcookie, tp);
1622 0 : out_abort:
1623 0 : mutex_unlock(&rr->lock);
1624 0 : xchk_iscan_abort(&rr->iscan);
1625 : out_unlock:
1626 : return NOTIFY_DONE;
1627 : }
1628 :
1629 : /* Set up the filesystem scan components. */
1630 : STATIC int
1631 15531 : xrep_rmap_setup_scan(
1632 : struct xrep_rmap *rr)
1633 : {
1634 15531 : struct xfs_scrub *sc = rr->sc;
1635 15531 : int error;
1636 :
1637 15531 : mutex_init(&rr->lock);
1638 :
1639 : /* Set up in-memory rmap btree */
1640 15508 : error = xfs_rmapbt_mem_create(sc->mp, sc->sa.pag->pag_agno,
1641 : sc->xfile_buftarg, &rr->rmap_btree);
1642 15521 : if (error)
1643 0 : goto out_mutex;
1644 :
1645 : /* Retry iget every tenth of a second for up to 30 seconds. */
1646 15521 : xchk_iscan_start(sc, 30000, 100, &rr->iscan);
1647 :
1648 : /*
1649 : * Hook into live rmap operations so that we can update our in-memory
1650 : * btree to reflect live changes on the filesystem. Since we drop the
1651 : * AGF buffer to scan all the inodes, we need this piece to avoid
1652 : * installing a stale btree.
1653 : */
1654 15436 : ASSERT(sc->flags & XCHK_FSGATES_RMAP);
1655 15436 : xfs_hook_setup(&rr->hooks.update_hook, xrep_rmapbt_live_update);
1656 15436 : error = xfs_rmap_hook_add(sc->sa.pag, &rr->hooks);
1657 15465 : if (error)
1658 0 : goto out_iscan;
1659 : return 0;
1660 :
1661 : out_iscan:
1662 0 : xchk_iscan_teardown(&rr->iscan);
1663 0 : xfbtree_destroy(rr->rmap_btree);
1664 0 : out_mutex:
1665 0 : mutex_destroy(&rr->lock);
1666 0 : return error;
1667 : }
1668 :
1669 : /* Tear down scan components. */
1670 : STATIC void
1671 15432 : xrep_rmap_teardown(
1672 : struct xrep_rmap *rr)
1673 : {
1674 15432 : struct xfs_scrub *sc = rr->sc;
1675 :
1676 15432 : xchk_iscan_abort(&rr->iscan);
1677 15555 : xfs_rmap_hook_del(sc->sa.pag, &rr->hooks);
1678 15353 : xchk_iscan_teardown(&rr->iscan);
1679 15377 : xfbtree_destroy(rr->rmap_btree);
1680 15496 : mutex_destroy(&rr->lock);
1681 15473 : }
1682 :
1683 : /* Repair the rmap btree for some AG. */
1684 : int
1685 15552 : xrep_rmapbt(
1686 : struct xfs_scrub *sc)
1687 : {
1688 15552 : struct xrep_rmap *rr = sc->buf;
1689 15552 : int error;
1690 :
1691 15552 : error = xrep_rmap_setup_scan(rr);
1692 15504 : if (error)
1693 : return error;
1694 :
1695 : /*
1696 : * Collect rmaps for everything in this AG that isn't space metadata.
1697 : * These rmaps won't change even as we try to allocate blocks.
1698 : */
1699 15458 : error = xrep_rmap_find_rmaps(rr);
1700 15514 : if (error)
1701 126 : goto out_records;
1702 :
1703 : /* Rebuild the rmap information. */
1704 15388 : error = xrep_rmap_build_new_tree(rr);
1705 15335 : if (error)
1706 16 : goto out_records;
1707 :
1708 : /* Kill the old tree. */
1709 15319 : error = xrep_rmap_remove_old_tree(rr);
1710 :
1711 15492 : out_records:
1712 15492 : xrep_rmap_teardown(rr);
1713 15492 : return error;
1714 : }
|