LCOV - code coverage report
Current view: top level - fs/xfs/scrub - rtrefcount.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 214 258 82.9 %
Date: 2023-07-31 20:08:34 Functions: 14 14 100.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Copyright (C) 2021-2023 Oracle.  All Rights Reserved.
       4             :  * Author: Darrick J. Wong <djwong@kernel.org>
       5             :  */
       6             : #include "xfs.h"
       7             : #include "xfs_fs.h"
       8             : #include "xfs_shared.h"
       9             : #include "xfs_format.h"
      10             : #include "xfs_log_format.h"
      11             : #include "xfs_trans_resv.h"
      12             : #include "xfs_mount.h"
      13             : #include "xfs_trans.h"
      14             : #include "xfs_btree.h"
      15             : #include "xfs_rmap.h"
      16             : #include "xfs_refcount.h"
      17             : #include "xfs_inode.h"
      18             : #include "xfs_rtbitmap.h"
      19             : #include "xfs_rtgroup.h"
      20             : #include "xfs_rtalloc.h"
      21             : #include "scrub/scrub.h"
      22             : #include "scrub/common.h"
      23             : #include "scrub/btree.h"
      24             : #include "scrub/repair.h"
      25             : 
      26             : /* Set us up with the realtime refcount metadata locked. */
      27             : int
      28      179801 : xchk_setup_rtrefcountbt(
      29             :         struct xfs_scrub        *sc)
      30             : {
      31      179801 :         struct xfs_rtgroup      *rtg;
      32      179801 :         int                     error;
      33             : 
      34      179801 :         if (xchk_need_intent_drain(sc))
      35       14703 :                 xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
      36             : 
      37      359602 :         if (xchk_could_repair(sc)) {
      38       30808 :                 error = xrep_setup_rtrefcountbt(sc);
      39       30810 :                 if (error)
      40             :                         return error;
      41             :         }
      42             : 
      43      179803 :         rtg = xfs_rtgroup_get(sc->mp, sc->sm->sm_agno);
      44      179815 :         if (!rtg)
      45             :                 return -ENOENT;
      46             : 
      47      179815 :         error = xchk_setup_rt(sc);
      48      179798 :         if (error)
      49          40 :                 goto out_rtg;
      50             : 
      51      179758 :         error = xchk_install_live_inode(sc, rtg->rtg_refcountip);
      52      179768 :         if (error)
      53           0 :                 goto out_rtg;
      54             : 
      55      179768 :         error = xchk_ino_dqattach(sc);
      56      179748 :         if (error)
      57           0 :                 goto out_rtg;
      58             : 
      59      179748 :         error = xchk_rtgroup_init(sc, rtg->rtg_rgno, &sc->sr, XCHK_RTGLOCK_ALL);
      60      179809 : out_rtg:
      61      179809 :         xfs_rtgroup_put(rtg);
      62      179809 :         return error;
      63             : }
      64             : 
      65             : /* Realtime Reference count btree scrubber. */
      66             : 
      67             : /*
      68             :  * Confirming Reference Counts via Reverse Mappings
      69             :  *
      70             :  * We want to count the reverse mappings overlapping a refcount record
      71             :  * (bno, len, refcount), allowing for the possibility that some of the
      72             :  * overlap may come from smaller adjoining reverse mappings, while some
      73             :  * comes from single extents which overlap the range entirely.  The
      74             :  * outer loop is as follows:
      75             :  *
      76             :  * 1. For all reverse mappings overlapping the refcount extent,
      77             :  *    a. If a given rmap completely overlaps, mark it as seen.
      78             :  *    b. Otherwise, record the fragment (in agbno order) for later
      79             :  *       processing.
      80             :  *
      81             :  * Once we've seen all the rmaps, we know that for all blocks in the
      82             :  * refcount record we want to find $refcount owners and we've already
      83             :  * visited $seen extents that overlap all the blocks.  Therefore, we
      84             :  * need to find ($refcount - $seen) owners for every block in the
      85             :  * extent; call that quantity $target_nr.  Proceed as follows:
      86             :  *
      87             :  * 2. Pull the first $target_nr fragments from the list; all of them
      88             :  *    should start at or before the start of the extent.
      89             :  *    Call this subset of fragments the working set.
      90             :  * 3. Until there are no more unprocessed fragments,
      91             :  *    a. Find the shortest fragments in the set and remove them.
      92             :  *    b. Note the block number of the end of these fragments.
      93             :  *    c. Pull the same number of fragments from the list.  All of these
      94             :  *       fragments should start at the block number recorded in the
      95             :  *       previous step.
      96             :  *    d. Put those fragments in the set.
      97             :  * 4. Check that there are $target_nr fragments remaining in the list,
      98             :  *    and that they all end at or beyond the end of the refcount extent.
      99             :  *
     100             :  * If the refcount is correct, all the check conditions in the algorithm
     101             :  * should always hold true.  If not, the refcount is incorrect.
     102             :  */
     103             : struct xchk_rtrefcnt_frag {
     104             :         struct list_head        list;
     105             :         struct xfs_rmap_irec    rm;
     106             : };
     107             : 
     108             : struct xchk_rtrefcnt_check {
     109             :         struct xfs_scrub        *sc;
     110             :         struct list_head        fragments;
     111             : 
     112             :         /* refcount extent we're examining */
     113             :         xfs_rgblock_t           bno;
     114             :         xfs_extlen_t            len;
     115             :         xfs_nlink_t             refcount;
     116             : 
     117             :         /* number of owners seen */
     118             :         xfs_nlink_t             seen;
     119             : };
     120             : 
     121             : /*
     122             :  * Decide if the given rmap is large enough that we can redeem it
     123             :  * towards refcount verification now, or if it's a fragment, in
     124             :  * which case we'll hang onto it in the hopes that we'll later
     125             :  * discover that we've collected exactly the correct number of
     126             :  * fragments as the rtrefcountbt says we should have.
     127             :  */
     128             : STATIC int
     129  1174477149 : xchk_rtrefcountbt_rmap_check(
     130             :         struct xfs_btree_cur            *cur,
     131             :         const struct xfs_rmap_irec      *rec,
     132             :         void                            *priv)
     133             : {
     134  1174477149 :         struct xchk_rtrefcnt_check      *refchk = priv;
     135  1174477149 :         struct xchk_rtrefcnt_frag       *frag;
     136  1174477149 :         xfs_rgblock_t                   rm_last;
     137  1174477149 :         xfs_rgblock_t                   rc_last;
     138  1174477149 :         int                             error = 0;
     139             : 
     140  1174477149 :         if (xchk_should_terminate(refchk->sc, &error))
     141           0 :                 return error;
     142             : 
     143  1174348761 :         rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
     144  1174348761 :         rc_last = refchk->bno + refchk->len - 1;
     145             : 
     146             :         /* Confirm that a single-owner refc extent is a CoW stage. */
     147  1174348761 :         if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
     148           0 :                 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0);
     149           0 :                 return 0;
     150             :         }
     151             : 
     152  1174348761 :         if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
     153             :                 /*
     154             :                  * The rmap overlaps the refcount record, so we can confirm
     155             :                  * one refcount owner seen.
     156             :                  */
     157  1131687295 :                 refchk->seen++;
     158             :         } else {
     159             :                 /*
     160             :                  * This rmap covers only part of the refcount record, so
     161             :                  * save the fragment for later processing.  If the rmapbt
     162             :                  * is healthy each rmap_irec we see will be in agbno order
     163             :                  * so we don't need insertion sort here.
     164             :                  */
     165    42661466 :                 frag = kmalloc(sizeof(struct xchk_rtrefcnt_frag),
     166             :                                 XCHK_GFP_FLAGS);
     167    43223935 :                 if (!frag)
     168             :                         return -ENOMEM;
     169    86447870 :                 memcpy(&frag->rm, rec, sizeof(frag->rm));
     170    43223935 :                 list_add_tail(&frag->list, &refchk->fragments);
     171             :         }
     172             : 
     173             :         return 0;
     174             : }
     175             : 
     176             : /*
     177             :  * Given a bunch of rmap fragments, iterate through them, keeping
     178             :  * a running tally of the refcount.  If this ever deviates from
     179             :  * what we expect (which is the rtrefcountbt's refcount minus the
     180             :  * number of extents that totally covered the rtrefcountbt extent),
     181             :  * we have a rtrefcountbt error.
     182             :  */
     183             : STATIC void
     184    63092784 : xchk_rtrefcountbt_process_rmap_fragments(
     185             :         struct xchk_rtrefcnt_check      *refchk)
     186             : {
     187    63092784 :         struct list_head                worklist;
     188    63092784 :         struct xchk_rtrefcnt_frag       *frag;
     189    63092784 :         struct xchk_rtrefcnt_frag       *n;
     190    63092784 :         xfs_rgblock_t                   bno;
     191    63092784 :         xfs_rgblock_t                   rbno;
     192    63092784 :         xfs_rgblock_t                   next_rbno;
     193    63092784 :         xfs_nlink_t                     nr;
     194    63092784 :         xfs_nlink_t                     target_nr;
     195             : 
     196    63092784 :         target_nr = refchk->refcount - refchk->seen;
     197    63092784 :         if (target_nr == 0)
     198    60910193 :                 return;
     199             : 
     200             :         /*
     201             :          * There are (refchk->rc.rc_refcount - refchk->nr refcount)
     202             :          * references we haven't found yet.  Pull that many off the
     203             :          * fragment list and figure out where the smallest rmap ends
     204             :          * (and therefore the next rmap should start).  All the rmaps
     205             :          * we pull off should start at or before the beginning of the
     206             :          * refcount record's range.
     207             :          */
     208     2182591 :         INIT_LIST_HEAD(&worklist);
     209     2182591 :         rbno = NULLRGBLOCK;
     210             : 
     211             :         /* Make sure the fragments actually /are/ in bno order. */
     212     2182591 :         bno = 0;
     213    45039375 :         list_for_each_entry(frag, &refchk->fragments, list) {
     214    42856784 :                 if (frag->rm.rm_startblock < bno)
     215           0 :                         goto done;
     216    42856784 :                 bno = frag->rm.rm_startblock;
     217             :         }
     218             : 
     219             :         /*
     220             :          * Find all the rmaps that start at or before the refc extent,
     221             :          * and put them on the worklist.
     222             :          */
     223     2182591 :         nr = 0;
     224    13956612 :         list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
     225    13956612 :                 if (frag->rm.rm_startblock > refchk->bno || nr > target_nr)
     226             :                         break;
     227    11774021 :                 bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
     228    11774021 :                 if (bno < rbno)
     229             :                         rbno = bno;
     230    11774021 :                 list_move_tail(&frag->list, &worklist);
     231    11774021 :                 nr++;
     232             :         }
     233             : 
     234             :         /*
     235             :          * We should have found exactly $target_nr rmap fragments starting
     236             :          * at or before the refcount extent.
     237             :          */
     238     2182591 :         if (nr != target_nr)
     239           0 :                 goto done;
     240             : 
     241     4646779 :         while (!list_empty(&refchk->fragments)) {
     242             :                 /* Discard any fragments ending at rbno from the worklist. */
     243     2464188 :                 nr = 0;
     244     2464188 :                 next_rbno = NULLRGBLOCK;
     245    55112810 :                 list_for_each_entry_safe(frag, n, &worklist, list) {
     246    52648622 :                         bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
     247    52648622 :                         if (bno != rbno) {
     248    21109774 :                                 if (bno < next_rbno)
     249             :                                         next_rbno = bno;
     250    21109774 :                                 continue;
     251             :                         }
     252    31538848 :                         list_del(&frag->list);
     253    31502945 :                         kfree(frag);
     254    31538848 :                         nr++;
     255             :                 }
     256             : 
     257             :                 /* Try to add nr rmaps starting at rbno to the worklist. */
     258    31256656 :                 list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
     259    31256656 :                         bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
     260    31256656 :                         if (frag->rm.rm_startblock != rbno)
     261           0 :                                 goto done;
     262    31256656 :                         list_move_tail(&frag->list, &worklist);
     263    31256656 :                         if (next_rbno > bno)
     264             :                                 next_rbno = bno;
     265    31256656 :                         nr--;
     266    31256656 :                         if (nr == 0)
     267             :                                 break;
     268             :                 }
     269             : 
     270             :                 /*
     271             :                  * If we get here and nr > 0, this means that we added fewer
     272             :                  * items to the worklist than we discarded because the fragment
     273             :                  * list ran out of items.  Therefore, we cannot maintain the
     274             :                  * required refcount.  Something is wrong, so we're done.
     275             :                  */
     276     2464188 :                 if (nr)
     277           0 :                         goto done;
     278             : 
     279             :                 rbno = next_rbno;
     280             :         }
     281             : 
     282             :         /*
     283             :          * Make sure the last extent we processed ends at or beyond
     284             :          * the end of the refcount extent.
     285             :          */
     286     2182591 :         if (rbno < refchk->bno + refchk->len)
     287           0 :                 goto done;
     288             : 
     289             :         /* Actually record us having seen the remaining refcount. */
     290     2182591 :         refchk->seen = refchk->refcount;
     291     2182591 : done:
     292             :         /* Delete fragments and work list. */
     293    13971294 :         list_for_each_entry_safe(frag, n, &worklist, list) {
     294    11788703 :                 list_del(&frag->list);
     295    11786260 :                 kfree(frag);
     296             :         }
     297     2182591 :         list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
     298           0 :                 list_del(&frag->list);
     299           0 :                 kfree(frag);
     300             :         }
     301             : }
     302             : 
     303             : /* Use the rmap entries covering this extent to verify the refcount. */
     304             : STATIC void
     305    63092649 : xchk_rtrefcountbt_xref_rmap(
     306             :         struct xfs_scrub                *sc,
     307             :         const struct xfs_refcount_irec  *irec)
     308             : {
     309    63092649 :         struct xchk_rtrefcnt_check      refchk = {
     310             :                 .sc                     = sc,
     311    63092649 :                 .bno                    = irec->rc_startblock,
     312    63092649 :                 .len                    = irec->rc_blockcount,
     313    63092649 :                 .refcount               = irec->rc_refcount,
     314             :                 .seen                   = 0,
     315             :         };
     316    63092649 :         struct xfs_rmap_irec            low;
     317    63092649 :         struct xfs_rmap_irec            high;
     318    63092649 :         struct xchk_rtrefcnt_frag       *frag;
     319    63092649 :         struct xchk_rtrefcnt_frag       *n;
     320    63092649 :         int                             error;
     321             : 
     322    63092649 :         if (!sc->sr.rmap_cur || xchk_skip_xref(sc->sm))
     323           0 :                 return;
     324             : 
     325             :         /* Cross-reference with the rmapbt to confirm the refcount. */
     326    63092649 :         memset(&low, 0, sizeof(low));
     327    63092649 :         low.rm_startblock = irec->rc_startblock;
     328    63092649 :         memset(&high, 0xFF, sizeof(high));
     329    63092649 :         high.rm_startblock = irec->rc_startblock + irec->rc_blockcount - 1;
     330             : 
     331    63092649 :         INIT_LIST_HEAD(&refchk.fragments);
     332    63092649 :         error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
     333             :                         xchk_rtrefcountbt_rmap_check, &refchk);
     334    63092763 :         if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
     335           0 :                 goto out_free;
     336             : 
     337    63092744 :         xchk_rtrefcountbt_process_rmap_fragments(&refchk);
     338    63092756 :         if (irec->rc_refcount != refchk.seen)
     339           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
     340             : 
     341    63092756 : out_free:
     342    63092781 :         list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
     343           0 :                 list_del(&frag->list);
     344           0 :                 kfree(frag);
     345             :         }
     346             : }
     347             : 
     348             : /* Cross-reference with the other btrees. */
     349             : STATIC void
     350    63092787 : xchk_rtrefcountbt_xref(
     351             :         struct xfs_scrub                *sc,
     352             :         const struct xfs_refcount_irec  *irec)
     353             : {
     354    63092787 :         xfs_rtblock_t                   rtbno;
     355             : 
     356    63092787 :         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
     357             :                 return;
     358             : 
     359    63092804 :         rtbno = xfs_rgbno_to_rtb(sc->mp, sc->sr.rtg->rtg_rgno,
     360    63092804 :                         irec->rc_startblock);
     361    63092791 :         xchk_xref_is_used_rt_space(sc, rtbno, irec->rc_blockcount);
     362    63092683 :         xchk_rtrefcountbt_xref_rmap(sc, irec);
     363             : }
     364             : 
     365             : struct xchk_rtrefcbt_records {
     366             :         /* Previous refcount record. */
     367             :         struct xfs_refcount_irec        prev_rec;
     368             : 
     369             :         /* The next rtgroup block where we aren't expecting shared extents. */
     370             :         xfs_rgblock_t                   next_unshared_rgbno;
     371             : 
     372             :         /* Number of CoW blocks we expect. */
     373             :         xfs_extlen_t                    cow_blocks;
     374             : 
     375             :         /* Was the last record a shared or CoW staging extent? */
     376             :         enum xfs_refc_domain            prev_domain;
     377             : };
     378             : 
     379             : static inline bool
     380             : xchk_rtrefcount_mergeable(
     381             :         struct xchk_rtrefcbt_records    *rrc,
     382             :         const struct xfs_refcount_irec  *r2)
     383             : {
     384    63092859 :         const struct xfs_refcount_irec  *r1 = &rrc->prev_rec;
     385             : 
     386             :         /* Ignore if prev_rec is not yet initialized. */
     387    63092859 :         if (r1->rc_blockcount > 0)
     388             :                 return false;
     389             : 
     390       93087 :         if (r1->rc_startblock + r1->rc_blockcount != r2->rc_startblock)
     391             :                 return false;
     392           0 :         if (r1->rc_refcount != r2->rc_refcount)
     393             :                 return false;
     394             :         if ((unsigned long long)r1->rc_blockcount + r2->rc_blockcount >
     395             :                         XFS_REFC_LEN_MAX)
     396             :                 return false;
     397             : 
     398             :         return true;
     399             : }
     400             : 
     401             : /* Flag failures for records that could be merged. */
     402             : STATIC void
     403    63092859 : xchk_rtrefcountbt_check_mergeable(
     404             :         struct xchk_btree               *bs,
     405             :         struct xchk_rtrefcbt_records    *rrc,
     406             :         const struct xfs_refcount_irec  *irec)
     407             : {
     408    63092859 :         if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
     409             :                 return;
     410             : 
     411    63092859 :         if (xchk_rtrefcount_mergeable(rrc, irec))
     412           0 :                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
     413             : 
     414   126185718 :         memcpy(&rrc->prev_rec, irec, sizeof(struct xfs_refcount_irec));
     415             : }
     416             : 
     417             : STATIC int
     418  2044378540 : xchk_rtrefcountbt_rmap_check_gap(
     419             :         struct xfs_btree_cur            *cur,
     420             :         const struct xfs_rmap_irec      *rec,
     421             :         void                            *priv)
     422             : {
     423  2044378540 :         xfs_rgblock_t                   *next_bno = priv;
     424             : 
     425  2044378540 :         if (*next_bno != NULLRGBLOCK && rec->rm_startblock < *next_bno)
     426             :                 return -ECANCELED;
     427             : 
     428  2044378540 :         *next_bno = rec->rm_startblock + rec->rm_blockcount;
     429  2044378540 :         return 0;
     430             : }
     431             : 
     432             : /*
     433             :  * Make sure that a gap in the reference count records does not correspond to
     434             :  * overlapping records (i.e. shared extents) in the reverse mappings.
     435             :  */
     436             : static inline void
     437    51553766 : xchk_rtrefcountbt_xref_gaps(
     438             :         struct xfs_scrub        *sc,
     439             :         struct xchk_rtrefcbt_records *rrc,
     440             :         xfs_rtblock_t           bno)
     441             : {
     442    51553766 :         struct xfs_rmap_irec    low;
     443    51553766 :         struct xfs_rmap_irec    high;
     444    51553766 :         xfs_rgblock_t           next_bno = NULLRGBLOCK;
     445    51553766 :         int                     error;
     446             : 
     447    51553766 :         if (bno <= rrc->next_unshared_rgbno || !sc->sr.rmap_cur ||
     448    36487831 :             xchk_skip_xref(sc->sm))
     449    15065936 :                 return;
     450             : 
     451    36487830 :         memset(&low, 0, sizeof(low));
     452    36487830 :         low.rm_startblock = rrc->next_unshared_rgbno;
     453    36487830 :         memset(&high, 0xFF, sizeof(high));
     454    36487830 :         high.rm_startblock = bno - 1;
     455             : 
     456    36487830 :         error = xfs_rmap_query_range(sc->sr.rmap_cur, &low, &high,
     457             :                         xchk_rtrefcountbt_rmap_check_gap, &next_bno);
     458    36487827 :         if (error == -ECANCELED)
     459           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
     460             :         else
     461    36487827 :                 xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur);
     462             : }
     463             : 
     464             : /* Scrub a rtrefcountbt record. */
     465             : STATIC int
     466    63092767 : xchk_rtrefcountbt_rec(
     467             :         struct xchk_btree               *bs,
     468             :         const union xfs_btree_rec       *rec)
     469             : {
     470    63092767 :         struct xfs_mount                *mp = bs->cur->bc_mp;
     471    63092767 :         struct xchk_rtrefcbt_records    *rrc = bs->private;
     472    63092767 :         struct xfs_refcount_irec        irec;
     473    63092767 :         u32                             mod;
     474             : 
     475    63092767 :         xfs_refcount_btrec_to_irec(rec, &irec);
     476    63092755 :         if (xfs_refcount_check_irec(bs->cur, &irec) != NULL) {
     477           0 :                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
     478           0 :                 return 0;
     479             :         }
     480             : 
     481             :         /* We can only share full rt extents. */
     482    63092886 :         xfs_rtb_to_rtx(mp, irec.rc_startblock, &mod);
     483    63092851 :         if (mod)
     484           0 :                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
     485    63092851 :         xfs_rtb_to_rtx(mp, irec.rc_blockcount, &mod);
     486    63092873 :         if (mod)
     487           0 :                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
     488             : 
     489    63092873 :         if (irec.rc_domain == XFS_REFC_DOMAIN_COW)
     490    11706546 :                 rrc->cow_blocks += irec.rc_blockcount;
     491             : 
     492             :         /* Shared records always come before CoW records. */
     493    63092873 :         if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED &&
     494    51386339 :             rrc->prev_domain == XFS_REFC_DOMAIN_COW)
     495           0 :                 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
     496    63092873 :         rrc->prev_domain = irec.rc_domain;
     497             : 
     498    63092873 :         xchk_rtrefcountbt_check_mergeable(bs, rrc, &irec);
     499    63092772 :         xchk_rtrefcountbt_xref(bs->sc, &irec);
     500             : 
     501             :         /*
     502             :          * If this is a record for a shared extent, check that all blocks
     503             :          * between the previous record and this one have at most one reverse
     504             :          * mapping.
     505             :          */
     506    63092818 :         if (irec.rc_domain == XFS_REFC_DOMAIN_SHARED) {
     507    51386300 :                 xchk_rtrefcountbt_xref_gaps(bs->sc, rrc, irec.rc_startblock);
     508    51386287 :                 rrc->next_unshared_rgbno = irec.rc_startblock +
     509    51386287 :                                            irec.rc_blockcount;
     510             :         }
     511             : 
     512             :         return 0;
     513             : }
     514             : 
     515             : /* Make sure we have as many refc blocks as the rmap says. */
     516             : STATIC void
     517      167456 : xchk_refcount_xref_rmap(
     518             :         struct xfs_scrub        *sc,
     519             :         const struct xfs_owner_info *btree_oinfo,
     520             :         xfs_extlen_t            cow_blocks)
     521             : {
     522      167456 :         xfs_extlen_t            refcbt_blocks = 0;
     523      167456 :         xfs_filblks_t           blocks;
     524      167456 :         int                     error;
     525             : 
     526      167456 :         if (!sc->sr.rmap_cur || !sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
     527      167456 :                 return;
     528             : 
     529             :         /* Check that we saw as many refcbt blocks as the rmap knows about. */
     530           0 :         error = xfs_btree_count_blocks(sc->sr.refc_cur, &refcbt_blocks);
     531           0 :         if (!xchk_btree_process_error(sc, sc->sr.refc_cur, 0, &error))
     532             :                 return;
     533           0 :         error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, btree_oinfo,
     534             :                         &blocks);
     535           0 :         if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
     536             :                 return;
     537           0 :         if (blocks != refcbt_blocks)
     538           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
     539             : 
     540             :         /* Check that we saw as many cow blocks as the rmap knows about. */
     541           0 :         error = xchk_count_rmap_ownedby_ag(sc, sc->sr.rmap_cur,
     542             :                         &XFS_RMAP_OINFO_COW, &blocks);
     543           0 :         if (!xchk_should_check_xref(sc, &error, &sc->sr.rmap_cur))
     544             :                 return;
     545           0 :         if (blocks != cow_blocks)
     546           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.rmap_cur, 0);
     547             : }
     548             : 
     549             : /* Scrub the refcount btree for some AG. */
     550             : int
     551      167439 : xchk_rtrefcountbt(
     552             :         struct xfs_scrub        *sc)
     553             : {
     554      167439 :         struct xfs_owner_info   btree_oinfo;
     555      167439 :         struct xchk_rtrefcbt_records rrc = {
     556             :                 .cow_blocks             = 0,
     557             :                 .next_unshared_rgbno    = 0,
     558             :                 .prev_domain            = XFS_REFC_DOMAIN_SHARED,
     559             :         };
     560      167439 :         int                     error;
     561             : 
     562      167439 :         error = xchk_metadata_inode_forks(sc);
     563      167457 :         if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
     564             :                 return error;
     565             : 
     566      167457 :         xfs_rmap_ino_bmbt_owner(&btree_oinfo, sc->sr.rtg->rtg_refcountip->i_ino,
     567             :                         XFS_DATA_FORK);
     568      167457 :         error = xchk_btree(sc, sc->sr.refc_cur, xchk_rtrefcountbt_rec,
     569             :                         &btree_oinfo, &rrc);
     570      167457 :         if (error)
     571           1 :                 goto out_unlock;
     572             : 
     573             :         /*
     574             :          * Check that all blocks between the last refcount > 1 record and the
     575             :          * end of the rt volume have at most one reverse mapping.
     576             :          */
     577      167456 :         xchk_rtrefcountbt_xref_gaps(sc, &rrc, sc->mp->m_sb.sb_rblocks);
     578             : 
     579      167456 :         xchk_refcount_xref_rmap(sc, &btree_oinfo, rrc.cow_blocks);
     580             : 
     581             : out_unlock:
     582             :         return error;
     583             : }
     584             : 
     585             : /* xref check that a cow staging extent is marked in the rtrefcountbt. */
     586             : void
     587      219296 : xchk_xref_is_rt_cow_staging(
     588             :         struct xfs_scrub                *sc,
     589             :         xfs_rgblock_t                   bno,
     590             :         xfs_extlen_t                    len)
     591             : {
     592      219296 :         struct xfs_refcount_irec        rc;
     593      219296 :         int                             has_refcount;
     594      219296 :         int                             error;
     595             : 
     596      219296 :         if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
     597           0 :                 return;
     598             : 
     599             :         /* Find the CoW staging extent. */
     600      219296 :         error = xfs_refcount_lookup_le(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
     601             :                         bno, &has_refcount);
     602      219298 :         if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
     603             :                 return;
     604      219296 :         if (!has_refcount) {
     605           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
     606           0 :                 return;
     607             :         }
     608             : 
     609      219296 :         error = xfs_refcount_get_rec(sc->sr.refc_cur, &rc, &has_refcount);
     610      219293 :         if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
     611             :                 return;
     612      219295 :         if (!has_refcount) {
     613           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
     614           0 :                 return;
     615             :         }
     616             : 
     617             :         /* CoW lookup returned a shared extent record? */
     618      219295 :         if (rc.rc_domain != XFS_REFC_DOMAIN_COW)
     619           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
     620             : 
     621             :         /* Must be at least as long as what was passed in */
     622      219295 :         if (rc.rc_blockcount < len)
     623           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
     624             : }
     625             : 
     626             : /*
     627             :  * xref check that the extent is not shared.  Only file data blocks
     628             :  * can have multiple owners.
     629             :  */
     630             : void
     631    55842177 : xchk_xref_is_not_rt_shared(
     632             :         struct xfs_scrub        *sc,
     633             :         xfs_rgblock_t           bno,
     634             :         xfs_extlen_t            len)
     635             : {
     636    55842177 :         enum xbtree_recpacking  outcome;
     637    55842177 :         int                     error;
     638             : 
     639    55842177 :         if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
     640       30764 :                 return;
     641             : 
     642    55811413 :         error = xfs_refcount_has_records(sc->sr.refc_cur,
     643             :                         XFS_REFC_DOMAIN_SHARED, bno, len, &outcome);
     644    55812268 :         if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
     645             :                 return;
     646    55811178 :         if (outcome != XBTREE_RECPACKING_EMPTY)
     647           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
     648             : }
     649             : 
     650             : /* xref check that the extent is not being used for CoW staging. */
     651             : void
     652   207324684 : xchk_xref_is_not_rt_cow_staging(
     653             :         struct xfs_scrub        *sc,
     654             :         xfs_rgblock_t           bno,
     655             :         xfs_extlen_t            len)
     656             : {
     657   207324684 :         enum xbtree_recpacking  outcome;
     658   207324684 :         int                     error;
     659             : 
     660   207324684 :         if (!sc->sr.refc_cur || xchk_skip_xref(sc->sm))
     661       30762 :                 return;
     662             : 
     663   207293922 :         error = xfs_refcount_has_records(sc->sr.refc_cur, XFS_REFC_DOMAIN_COW,
     664             :                         bno, len, &outcome);
     665   207284105 :         if (!xchk_should_check_xref(sc, &error, &sc->sr.refc_cur))
     666             :                 return;
     667   207143508 :         if (outcome != XBTREE_RECPACKING_EMPTY)
     668           0 :                 xchk_btree_xref_set_corrupt(sc, sc->sr.refc_cur, 0);
     669             : }

Generated by: LCOV version 1.14