Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_mount.h"
12 : #include "xfs_defer.h"
13 : #include "xfs_btree.h"
14 : #include "xfs_btree_staging.h"
15 : #include "xfs_inode.h"
16 : #include "xfs_bit.h"
17 : #include "xfs_log_format.h"
18 : #include "xfs_trans.h"
19 : #include "xfs_sb.h"
20 : #include "xfs_alloc.h"
21 : #include "xfs_ialloc.h"
22 : #include "xfs_rmap.h"
23 : #include "xfs_rmap_btree.h"
24 : #include "xfs_refcount.h"
25 : #include "xfs_refcount_btree.h"
26 : #include "xfs_error.h"
27 : #include "xfs_ag.h"
28 : #include "xfs_health.h"
29 : #include "scrub/xfs_scrub.h"
30 : #include "scrub/scrub.h"
31 : #include "scrub/common.h"
32 : #include "scrub/btree.h"
33 : #include "scrub/trace.h"
34 : #include "scrub/repair.h"
35 : #include "scrub/bitmap.h"
36 : #include "scrub/xfile.h"
37 : #include "scrub/xfarray.h"
38 : #include "scrub/newbt.h"
39 : #include "scrub/reap.h"
40 : #include "scrub/rcbag.h"
41 :
42 : /*
43 : * Rebuilding the Reference Count Btree
44 : * ====================================
45 : *
46 : * This algorithm is "borrowed" from xfs_repair. Imagine the rmap
47 : * entries as rectangles representing extents of physical blocks, and
48 : * that the rectangles can be laid down to allow them to overlap each
49 : * other; then we know that we must emit a refcnt btree entry wherever
50 : * the amount of overlap changes, i.e. the emission stimulus is
51 : * level-triggered:
52 : *
53 : * - ---
54 : * -- ----- ---- --- ------
55 : * -- ---- ----------- ---- ---------
56 : * -------------------------------- -----------
57 : * ^ ^ ^^ ^^ ^ ^^ ^^^ ^^^^ ^ ^^ ^ ^ ^
58 : * 2 1 23 21 3 43 234 2123 1 01 2 3 0
59 : *
60 : * For our purposes, a rmap is a tuple (startblock, len, fileoff, owner).
61 : *
62 : * Note that in the actual refcnt btree we don't store the refcount < 2
63 : * cases because the bnobt tells us which blocks are free; single-use
64 : * blocks aren't recorded in the bnobt or the refcntbt. If the rmapbt
65 : * supports storing multiple entries covering a given block we could
66 : * theoretically dispense with the refcntbt and simply count rmaps, but
67 : * that's inefficient in the (hot) write path, so we'll take the cost of
68 : * the extra tree to save time. Also there's no guarantee that rmap
69 : * will be enabled.
70 : *
71 : * Given an array of rmaps sorted by physical block number, a starting
72 : * physical block (sp), a bag to hold rmaps that cover sp, and the next
73 : * physical block where the level changes (np), we can reconstruct the
74 : * refcount btree as follows:
75 : *
76 : * While there are still unprocessed rmaps in the array,
77 : * - Set sp to the physical block (pblk) of the next unprocessed rmap.
78 : * - Add to the bag all rmaps in the array where startblock == sp.
79 : * - Set np to the physical block where the bag size will change. This
80 : * is the minimum of (the pblk of the next unprocessed rmap) and
81 : * (startblock + len of each rmap in the bag).
82 : * - Record the bag size as old_bag_size.
83 : *
84 : * - While the bag isn't empty,
85 : * - Remove from the bag all rmaps where startblock + len == np.
86 : * - Add to the bag all rmaps in the array where startblock == np.
87 : * - If the bag size isn't old_bag_size, store the refcount entry
88 : * (sp, np - sp, bag_size) in the refcnt btree.
89 : * - If the bag is empty, break out of the inner loop.
90 : * - Set old_bag_size to the bag size
91 : * - Set sp = np.
92 : * - Set np to the physical block where the bag size will change.
93 : * This is the minimum of (the pblk of the next unprocessed rmap)
94 : * and (startblock + len of each rmap in the bag).
95 : *
96 : * Like all the other repairers, we make a list of all the refcount
97 : * records we need, then reinitialize the refcount btree root and
98 : * insert all the records.
99 : */
100 :
101 : struct xrep_refc {
102 : /* refcount extents */
103 : struct xfarray *refcount_records;
104 :
105 : /* new refcountbt information */
106 : struct xrep_newbt new_btree;
107 :
108 : /* old refcountbt blocks */
109 : struct xagb_bitmap old_refcountbt_blocks;
110 :
111 : struct xfs_scrub *sc;
112 :
113 : /* get_records()'s position in the refcount record array. */
114 : xfarray_idx_t array_cur;
115 :
116 : /* # of refcountbt blocks */
117 : xfs_extlen_t btblocks;
118 : };
119 :
120 : /* Set us up to repair refcount btrees. */
121 : int
122 16860 : xrep_setup_ag_refcountbt(
123 : struct xfs_scrub *sc)
124 : {
125 16860 : char *descr;
126 16860 : int error;
127 :
128 16860 : descr = xchk_xfile_ag_descr(sc, "rmap record bag");
129 16860 : error = xrep_setup_buftarg(sc, descr);
130 16860 : kfree(descr);
131 16860 : return error;
132 : }
133 :
134 : /* Check for any obvious conflicts with this shared/CoW staging extent. */
135 : STATIC int
136 1504554 : xrep_refc_check_ext(
137 : struct xfs_scrub *sc,
138 : const struct xfs_refcount_irec *rec)
139 : {
140 1504554 : enum xbtree_recpacking outcome;
141 1504554 : int error;
142 :
143 1504554 : if (xfs_refcount_check_perag_irec(sc->sa.pag, rec) != NULL)
144 : return -EFSCORRUPTED;
145 :
146 : /* Make sure this isn't free space. */
147 1504554 : error = xfs_alloc_has_records(sc->sa.bno_cur, rec->rc_startblock,
148 1504554 : rec->rc_blockcount, &outcome);
149 1504554 : if (error)
150 : return error;
151 1504554 : if (outcome != XBTREE_RECPACKING_EMPTY)
152 : return -EFSCORRUPTED;
153 :
154 : /* Must not be an inode chunk. */
155 1504554 : error = xfs_ialloc_has_inodes_at_extent(sc->sa.ino_cur,
156 1504554 : rec->rc_startblock, rec->rc_blockcount, &outcome);
157 1504554 : if (error)
158 : return error;
159 1504554 : if (outcome != XBTREE_RECPACKING_EMPTY)
160 0 : return -EFSCORRUPTED;
161 :
162 : return 0;
163 : }
164 :
165 : /* Record a reference count extent. */
166 : STATIC int
167 1504554 : xrep_refc_stash(
168 : struct xrep_refc *rr,
169 : enum xfs_refc_domain domain,
170 : xfs_agblock_t agbno,
171 : xfs_extlen_t len,
172 : uint64_t refcount)
173 : {
174 1504554 : struct xfs_refcount_irec irec = {
175 : .rc_startblock = agbno,
176 : .rc_blockcount = len,
177 : .rc_domain = domain,
178 : };
179 1504554 : struct xfs_scrub *sc = rr->sc;
180 1504554 : int error = 0;
181 :
182 1504554 : if (xchk_should_terminate(sc, &error))
183 0 : return error;
184 :
185 1504554 : irec.rc_refcount = min_t(uint64_t, MAXREFCOUNT, refcount);
186 :
187 1504554 : error = xrep_refc_check_ext(rr->sc, &irec);
188 1504554 : if (error)
189 : return error;
190 :
191 1504554 : trace_xrep_refc_found(sc->sa.pag, &irec);
192 :
193 1504554 : return xfarray_append(rr->refcount_records, &irec);
194 : }
195 :
196 : /* Record a CoW staging extent. */
197 : STATIC int
198 67499 : xrep_refc_stash_cow(
199 : struct xrep_refc *rr,
200 : xfs_agblock_t agbno,
201 : xfs_extlen_t len)
202 : {
203 67499 : return xrep_refc_stash(rr, XFS_REFC_DOMAIN_COW, agbno, len, 1);
204 : }
205 :
206 : /* Decide if an rmap could describe a shared extent. */
207 : static inline bool
208 518398267 : xrep_refc_rmap_shareable(
209 : struct xfs_mount *mp,
210 : const struct xfs_rmap_irec *rmap)
211 : {
212 : /* AG metadata are never sharable */
213 518398267 : if (XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
214 : return false;
215 :
216 : /* Metadata in files are never shareable */
217 513862150 : if (xfs_internal_inum(mp, rmap->rm_owner))
218 : return false;
219 :
220 : /* Metadata and unwritten file blocks are not shareable. */
221 478382272 : if (rmap->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK |
222 : XFS_RMAP_UNWRITTEN))
223 72087023 : return false;
224 :
225 : return true;
226 : }
227 :
228 : /*
229 : * Walk along the reverse mapping records until we find one that could describe
230 : * a shared extent.
231 : */
232 : STATIC int
233 406326347 : xrep_refc_walk_rmaps(
234 : struct xrep_refc *rr,
235 : struct xfs_rmap_irec *rmap,
236 : bool *have_rec)
237 : {
238 406326347 : struct xfs_btree_cur *cur = rr->sc->sa.rmap_cur;
239 406326347 : struct xfs_mount *mp = cur->bc_mp;
240 406326347 : int have_gt;
241 406326347 : int error = 0;
242 :
243 406326347 : *have_rec = false;
244 :
245 : /*
246 : * Loop through the remaining rmaps. Remember CoW staging
247 : * extents and the refcountbt blocks from the old tree for later
248 : * disposal. We can only share written data fork extents, so
249 : * keep looping until we find an rmap for one.
250 : */
251 518429360 : do {
252 518429360 : if (xchk_should_terminate(rr->sc, &error))
253 1 : return error;
254 :
255 518429278 : error = xfs_btree_increment(cur, 0, &have_gt);
256 518429321 : if (error)
257 0 : return error;
258 518429321 : if (!have_gt)
259 : return 0;
260 :
261 518398228 : error = xfs_rmap_get_rec(cur, rmap, &have_gt);
262 518398253 : if (error)
263 0 : return error;
264 518398253 : if (XFS_IS_CORRUPT(mp, !have_gt)) {
265 0 : xfs_btree_mark_sick(cur);
266 0 : return -EFSCORRUPTED;
267 : }
268 :
269 518398253 : if (rmap->rm_owner == XFS_RMAP_OWN_COW) {
270 67499 : error = xrep_refc_stash_cow(rr, rmap->rm_startblock,
271 : rmap->rm_blockcount);
272 67499 : if (error)
273 0 : return error;
274 518330754 : } else if (rmap->rm_owner == XFS_RMAP_OWN_REFC) {
275 : /* refcountbt block, dump it when we're done. */
276 16691 : rr->btblocks += rmap->rm_blockcount;
277 16691 : error = xagb_bitmap_set(&rr->old_refcountbt_blocks,
278 : rmap->rm_startblock,
279 : rmap->rm_blockcount);
280 16689 : if (error)
281 0 : return error;
282 : }
283 518398251 : } while (!xrep_refc_rmap_shareable(mp, rmap));
284 :
285 406295246 : *have_rec = true;
286 406295246 : return 0;
287 : }
288 :
289 : static inline uint32_t
290 : xrep_refc_encode_startblock(
291 : const struct xfs_refcount_irec *irec)
292 : {
293 22696554 : uint32_t start;
294 :
295 22696554 : start = irec->rc_startblock & ~XFS_REFC_COWFLAG;
296 22696554 : if (irec->rc_domain == XFS_REFC_DOMAIN_COW)
297 594765 : start |= XFS_REFC_COWFLAG;
298 :
299 22696554 : return start;
300 : }
301 :
302 : /* Sort in the same order as the ondisk records. */
303 : static int
304 11348277 : xrep_refc_extent_cmp(
305 : const void *a,
306 : const void *b)
307 : {
308 11348277 : const struct xfs_refcount_irec *ap = a;
309 11348277 : const struct xfs_refcount_irec *bp = b;
310 11348277 : uint32_t sa, sb;
311 :
312 11348277 : sa = xrep_refc_encode_startblock(ap);
313 11348277 : sb = xrep_refc_encode_startblock(bp);
314 :
315 11348277 : if (sa > sb)
316 : return 1;
317 6728117 : if (sa < sb)
318 6728117 : return -1;
319 : return 0;
320 : }
321 :
322 : /*
323 : * Sort the refcount extents by startblock or else the btree records will be in
324 : * the wrong order. Make sure the records do not overlap in physical space.
325 : */
326 : STATIC int
327 16688 : xrep_refc_sort_records(
328 : struct xrep_refc *rr)
329 : {
330 16688 : struct xfs_refcount_irec irec;
331 16688 : xfarray_idx_t cur;
332 16688 : enum xfs_refc_domain dom = XFS_REFC_DOMAIN_SHARED;
333 16688 : xfs_agblock_t next_agbno = 0;
334 16688 : int error;
335 :
336 16688 : error = xfarray_sort(rr->refcount_records, xrep_refc_extent_cmp,
337 : XFARRAY_SORT_KILLABLE);
338 16689 : if (error)
339 : return error;
340 :
341 1520995 : foreach_xfarray_idx(rr->refcount_records, cur) {
342 1504306 : if (xchk_should_terminate(rr->sc, &error))
343 0 : return error;
344 :
345 1504306 : error = xfarray_load(rr->refcount_records, cur, &irec);
346 1504306 : if (error)
347 0 : return error;
348 :
349 1504306 : if (dom == XFS_REFC_DOMAIN_SHARED &&
350 1448063 : irec.rc_domain == XFS_REFC_DOMAIN_COW) {
351 11256 : dom = irec.rc_domain;
352 11256 : next_agbno = 0;
353 : }
354 :
355 1504306 : if (dom != irec.rc_domain)
356 : return -EFSCORRUPTED;
357 1504306 : if (irec.rc_startblock < next_agbno)
358 : return -EFSCORRUPTED;
359 :
360 1504306 : next_agbno = irec.rc_startblock + irec.rc_blockcount;
361 : }
362 :
363 16690 : return error;
364 : }
365 :
366 : /*
367 : * Walk forward through the rmap btree to collect all rmaps starting at
368 : * @bno in @rmap_bag. These represent the file(s) that share ownership of
369 : * the current block. Upon return, the rmap cursor points to the last record
370 : * satisfying the startblock constraint.
371 : */
372 : static int
373 237098384 : xrep_refc_push_rmaps_at(
374 : struct xrep_refc *rr,
375 : struct rcbag *rcstack,
376 : xfs_agblock_t bno,
377 : struct xfs_rmap_irec *rmap,
378 : bool *have)
379 : {
380 237098384 : struct xfs_scrub *sc = rr->sc;
381 237098384 : int have_gt;
382 237098384 : int error;
383 :
384 406309648 : while (*have && rmap->rm_startblock == bno) {
385 169211271 : error = rcbag_add(rcstack, rr->sc->tp, rmap);
386 169211267 : if (error)
387 0 : return error;
388 :
389 169211267 : error = xrep_refc_walk_rmaps(rr, rmap, have);
390 169211265 : if (error)
391 1 : return error;
392 : }
393 :
394 237098377 : error = xfs_btree_decrement(sc->sa.rmap_cur, 0, &have_gt);
395 237098383 : if (error)
396 : return error;
397 237098383 : if (XFS_IS_CORRUPT(sc->mp, !have_gt)) {
398 0 : xfs_btree_mark_sick(sc->sa.rmap_cur);
399 0 : return -EFSCORRUPTED;
400 : }
401 :
402 : return 0;
403 : }
404 :
405 : /* Iterate all the rmap records to generate reference count data. */
406 : STATIC int
407 16691 : xrep_refc_find_refcounts(
408 : struct xrep_refc *rr)
409 : {
410 16691 : struct xfs_scrub *sc = rr->sc;
411 16691 : struct rcbag *rcstack;
412 16691 : uint64_t old_stack_height;
413 16691 : xfs_agblock_t sbno;
414 16691 : xfs_agblock_t cbno;
415 16691 : xfs_agblock_t nbno;
416 16691 : bool have;
417 16691 : int error;
418 :
419 16691 : xrep_ag_btcur_init(sc, &sc->sa);
420 :
421 : /*
422 : * Set up a bag to store all the rmap records that we're tracking to
423 : * generate a reference count record. If the size of the bag exceeds
424 : * MAXREFCOUNT, we clamp rc_refcount.
425 : */
426 16691 : error = rcbag_init(sc->mp, sc->xfile_buftarg, &rcstack);
427 16690 : if (error)
428 0 : goto out_cur;
429 :
430 : /* Start the rmapbt cursor to the left of all records. */
431 16690 : error = xfs_btree_goto_left_edge(sc->sa.rmap_cur);
432 16691 : if (error)
433 0 : goto out_bag;
434 :
435 : /* Process reverse mappings into refcount data. */
436 72739782 : while (xfs_btree_has_more_records(sc->sa.rmap_cur)) {
437 72725394 : struct xfs_rmap_irec rmap;
438 :
439 : /* Push all rmaps with pblk == sbno onto the stack */
440 72725394 : error = xrep_refc_walk_rmaps(rr, &rmap, &have);
441 72725394 : if (error)
442 1 : goto out_bag;
443 72725394 : if (!have)
444 : break;
445 72723092 : sbno = cbno = rmap.rm_startblock;
446 72723092 : error = xrep_refc_push_rmaps_at(rr, rcstack, sbno, &rmap,
447 : &have);
448 72723091 : if (error)
449 1 : goto out_bag;
450 :
451 : /* Set nbno to the bno of the next refcount change */
452 72723090 : error = rcbag_next_edge(rcstack, sc->tp, &rmap, have, &nbno);
453 72723091 : if (error)
454 0 : goto out_bag;
455 :
456 72723091 : ASSERT(nbno > sbno);
457 72723091 : old_stack_height = rcbag_count(rcstack);
458 :
459 : /* While stack isn't empty... */
460 164389698 : while (rcbag_count(rcstack) > 0) {
461 : /* Pop all rmaps that end at nbno */
462 164389698 : error = rcbag_remove_ending_at(rcstack, sc->tp, nbno);
463 164389698 : if (error)
464 0 : goto out_bag;
465 :
466 : /* Push array items that start at nbno */
467 164389698 : error = xrep_refc_walk_rmaps(rr, &rmap, &have);
468 164389697 : if (error)
469 0 : goto out_bag;
470 164389697 : if (have) {
471 164375293 : error = xrep_refc_push_rmaps_at(rr, rcstack,
472 : nbno, &rmap, &have);
473 164375292 : if (error)
474 0 : goto out_bag;
475 : }
476 :
477 : /* Emit refcount if necessary */
478 164389696 : ASSERT(nbno > cbno);
479 164389696 : if (rcbag_count(rcstack) != old_stack_height) {
480 74954397 : if (old_stack_height > 1) {
481 1437055 : error = xrep_refc_stash(rr,
482 : XFS_REFC_DOMAIN_SHARED,
483 : cbno, nbno - cbno,
484 : old_stack_height);
485 1437055 : if (error)
486 0 : goto out_bag;
487 : }
488 74954397 : cbno = nbno;
489 : }
490 :
491 : /* Stack empty, go find the next rmap */
492 164389698 : if (rcbag_count(rcstack) == 0)
493 : break;
494 91666607 : old_stack_height = rcbag_count(rcstack);
495 91666607 : sbno = nbno;
496 :
497 : /* Set nbno to the bno of the next refcount change */
498 91666607 : error = rcbag_next_edge(rcstack, sc->tp, &rmap, have,
499 : &nbno);
500 91666607 : if (error)
501 0 : goto out_bag;
502 :
503 91666607 : ASSERT(nbno > sbno);
504 : }
505 : }
506 :
507 16689 : ASSERT(rcbag_count(rcstack) == 0);
508 16687 : out_bag:
509 16688 : rcbag_free(&rcstack);
510 16691 : out_cur:
511 16691 : xchk_ag_btcur_free(&sc->sa);
512 16691 : return error;
513 : }
514 :
515 : /* Retrieve refcountbt data for bulk load. */
516 : STATIC int
517 13489 : xrep_refc_get_records(
518 : struct xfs_btree_cur *cur,
519 : unsigned int idx,
520 : struct xfs_btree_block *block,
521 : unsigned int nr_wanted,
522 : void *priv)
523 : {
524 13489 : struct xfs_refcount_irec *irec = &cur->bc_rec.rc;
525 13489 : struct xrep_refc *rr = priv;
526 13489 : union xfs_btree_rec *block_rec;
527 13489 : unsigned int loaded;
528 13489 : int error;
529 :
530 1517795 : for (loaded = 0; loaded < nr_wanted; loaded++, idx++) {
531 1504306 : error = xfarray_load(rr->refcount_records, rr->array_cur++,
532 : irec);
533 1504306 : if (error)
534 0 : return error;
535 :
536 1504306 : block_rec = xfs_btree_rec_addr(cur, idx, block);
537 1504306 : cur->bc_ops->init_rec_from_cur(cur, block_rec);
538 : }
539 :
540 13489 : return loaded;
541 : }
542 :
543 : /* Feed one of the new btree blocks to the bulk loader. */
544 : STATIC int
545 16683 : xrep_refc_claim_block(
546 : struct xfs_btree_cur *cur,
547 : union xfs_btree_ptr *ptr,
548 : void *priv)
549 : {
550 16683 : struct xrep_refc *rr = priv;
551 16683 : int error;
552 :
553 16683 : error = xrep_newbt_relog_autoreap(&rr->new_btree);
554 16681 : if (error)
555 : return error;
556 :
557 16681 : return xrep_newbt_claim_block(cur, &rr->new_btree, ptr);
558 : }
559 :
560 : /* Update the AGF counters. */
561 : STATIC int
562 16689 : xrep_refc_reset_counters(
563 : struct xrep_refc *rr)
564 : {
565 16689 : struct xfs_scrub *sc = rr->sc;
566 16689 : struct xfs_perag *pag = sc->sa.pag;
567 :
568 : /*
569 : * After we commit the new btree to disk, it is possible that the
570 : * process to reap the old btree blocks will race with the AIL trying
571 : * to checkpoint the old btree blocks into the filesystem. If the new
572 : * tree is shorter than the old one, the refcountbt write verifier will
573 : * fail and the AIL will shut down the filesystem.
574 : *
575 : * To avoid this, save the old incore btree height values as the alt
576 : * height values before re-initializing the perag info from the updated
577 : * AGF to capture all the new values.
578 : */
579 16689 : pag->pagf_alt_refcount_level = pag->pagf_refcount_level;
580 :
581 : /* Reinitialize with the values we just logged. */
582 16689 : return xrep_reinit_pagf(sc);
583 : }
584 :
585 : /*
586 : * Use the collected refcount information to stage a new refcount btree. If
587 : * this is successful we'll return with the new btree root information logged
588 : * to the repair transaction but not yet committed.
589 : */
590 : STATIC int
591 16690 : xrep_refc_build_new_tree(
592 : struct xrep_refc *rr)
593 : {
594 16690 : struct xfs_scrub *sc = rr->sc;
595 16690 : struct xfs_btree_cur *refc_cur;
596 16690 : struct xfs_perag *pag = sc->sa.pag;
597 16690 : xfs_fsblock_t fsbno;
598 16690 : int error;
599 :
600 16690 : error = xrep_refc_sort_records(rr);
601 16689 : if (error)
602 : return error;
603 :
604 : /*
605 : * Prepare to construct the new btree by reserving disk space for the
606 : * new btree and setting up all the accounting information we'll need
607 : * to root the new btree while it's under construction and before we
608 : * attach it to the AG header.
609 : */
610 16688 : fsbno = XFS_AGB_TO_FSB(sc->mp, pag->pag_agno, xfs_refc_block(sc->mp));
611 16687 : xrep_newbt_init_ag(&rr->new_btree, sc, &XFS_RMAP_OINFO_REFC, fsbno,
612 : XFS_AG_RESV_METADATA);
613 16687 : rr->new_btree.bload.get_records = xrep_refc_get_records;
614 16687 : rr->new_btree.bload.claim_block = xrep_refc_claim_block;
615 :
616 : /* Compute how many blocks we'll need. */
617 16687 : refc_cur = xfs_refcountbt_stage_cursor(sc->mp, &rr->new_btree.afake,
618 : pag);
619 16684 : error = xfs_btree_bload_compute_geometry(refc_cur,
620 : &rr->new_btree.bload,
621 : xfarray_length(rr->refcount_records));
622 16686 : if (error)
623 0 : goto err_cur;
624 :
625 : /* Last chance to abort before we start committing fixes. */
626 16686 : if (xchk_should_terminate(sc, &error))
627 0 : goto err_cur;
628 :
629 : /* Reserve the space we'll need for the new btree. */
630 16684 : error = xrep_newbt_alloc_blocks(&rr->new_btree,
631 : rr->new_btree.bload.nr_blocks);
632 16690 : if (error)
633 0 : goto err_cur;
634 :
635 : /*
636 : * Due to btree slack factors, it's possible for a new btree to be one
637 : * level taller than the old btree. Update the incore btree height so
638 : * that we don't trip the verifiers when writing the new btree blocks
639 : * to disk.
640 : */
641 16690 : pag->pagf_alt_refcount_level = rr->new_btree.bload.btree_height;
642 :
643 : /* Add all observed refcount records. */
644 16690 : rr->array_cur = XFARRAY_CURSOR_INIT;
645 16690 : error = xfs_btree_bload(refc_cur, &rr->new_btree.bload, rr);
646 16690 : if (error)
647 0 : goto err_level;
648 :
649 : /*
650 : * Install the new btree in the AG header. After this point the old
651 : * btree is no longer accessible and the new tree is live.
652 : */
653 16690 : xfs_refcountbt_commit_staged_btree(refc_cur, sc->tp, sc->sa.agf_bp);
654 16689 : xfs_btree_del_cursor(refc_cur, 0);
655 :
656 : /* Reset the AGF counters now that we've changed the btree shape. */
657 16690 : error = xrep_refc_reset_counters(rr);
658 16689 : if (error)
659 0 : goto err_newbt;
660 :
661 : /* Dispose of any unused blocks and the accounting information. */
662 16689 : error = xrep_newbt_commit(&rr->new_btree);
663 16690 : if (error)
664 : return error;
665 :
666 16690 : return xrep_roll_ag_trans(sc);
667 :
668 : err_level:
669 0 : pag->pagf_alt_refcount_level = 0;
670 0 : err_cur:
671 0 : xfs_btree_del_cursor(refc_cur, error);
672 0 : err_newbt:
673 0 : xrep_newbt_cancel(&rr->new_btree);
674 0 : return error;
675 : }
676 :
677 : /*
678 : * Now that we've logged the roots of the new btrees, invalidate all of the
679 : * old blocks and free them.
680 : */
681 : STATIC int
682 16689 : xrep_refc_remove_old_tree(
683 : struct xrep_refc *rr)
684 : {
685 16689 : struct xfs_scrub *sc = rr->sc;
686 16689 : struct xfs_perag *pag = sc->sa.pag;
687 16689 : int error;
688 :
689 : /* Free the old refcountbt blocks if they're not in use. */
690 16689 : error = xrep_reap_agblocks(sc, &rr->old_refcountbt_blocks,
691 : &XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA);
692 16690 : if (error)
693 : return error;
694 :
695 : /*
696 : * Now that we've zapped all the old refcountbt blocks we can turn off
697 : * the alternate height mechanism and reset the per-AG space
698 : * reservations.
699 : */
700 16690 : pag->pagf_alt_refcount_level = 0;
701 16690 : sc->flags |= XREP_RESET_PERAG_RESV;
702 16690 : return 0;
703 : }
704 :
705 : /* Rebuild the refcount btree. */
706 : int
707 16691 : xrep_refcountbt(
708 : struct xfs_scrub *sc)
709 : {
710 16691 : struct xrep_refc *rr;
711 16691 : struct xfs_mount *mp = sc->mp;
712 16691 : char *descr;
713 16691 : int error;
714 :
715 : /* We require the rmapbt to rebuild anything. */
716 16691 : if (!xfs_has_rmapbt(mp))
717 : return -EOPNOTSUPP;
718 :
719 16691 : rr = kzalloc(sizeof(struct xrep_refc), XCHK_GFP_FLAGS);
720 16691 : if (!rr)
721 : return -ENOMEM;
722 16691 : rr->sc = sc;
723 :
724 : /* Set up enough storage to handle one refcount record per block. */
725 16691 : descr = xchk_xfile_ag_descr(sc, "reference count records");
726 16691 : error = xfarray_create(descr, mp->m_sb.sb_agblocks,
727 : sizeof(struct xfs_refcount_irec),
728 : &rr->refcount_records);
729 16691 : kfree(descr);
730 16691 : if (error)
731 0 : goto out_rr;
732 :
733 : /* Collect all reference counts. */
734 16691 : xagb_bitmap_init(&rr->old_refcountbt_blocks);
735 16691 : error = xrep_refc_find_refcounts(rr);
736 16691 : if (error)
737 1 : goto out_bitmap;
738 :
739 : /* Rebuild the refcount information. */
740 16690 : error = xrep_refc_build_new_tree(rr);
741 16689 : if (error)
742 0 : goto out_bitmap;
743 :
744 : /* Kill the old tree. */
745 16689 : error = xrep_refc_remove_old_tree(rr);
746 :
747 16691 : out_bitmap:
748 16691 : xagb_bitmap_destroy(&rr->old_refcountbt_blocks);
749 16690 : xfarray_destroy(rr->refcount_records);
750 16691 : out_rr:
751 16691 : kfree(rr);
752 16691 : return error;
753 : }
|