LCOV - code coverage report
Current view: top level - fs/xfs/scrub - newbt.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-achx @ Mon Jul 31 20:08:12 PDT 2023 Lines: 231 263 87.8 %
Date: 2023-07-31 20:08:12 Functions: 20 21 95.2 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-or-later
       2             : /*
       3             :  * Copyright (C) 2022-2023 Oracle.  All Rights Reserved.
       4             :  * Author: Darrick J. Wong <djwong@kernel.org>
       5             :  */
       6             : #include "xfs.h"
       7             : #include "xfs_fs.h"
       8             : #include "xfs_shared.h"
       9             : #include "xfs_format.h"
      10             : #include "xfs_trans_resv.h"
      11             : #include "xfs_mount.h"
      12             : #include "xfs_btree.h"
      13             : #include "xfs_btree_staging.h"
      14             : #include "xfs_log_format.h"
      15             : #include "xfs_trans.h"
      16             : #include "xfs_log.h"
      17             : #include "xfs_sb.h"
      18             : #include "xfs_inode.h"
      19             : #include "xfs_alloc.h"
      20             : #include "xfs_rmap.h"
      21             : #include "xfs_ag.h"
      22             : #include "xfs_defer.h"
      23             : #include "xfs_extfree_item.h"
      24             : #include "scrub/scrub.h"
      25             : #include "scrub/common.h"
      26             : #include "scrub/trace.h"
      27             : #include "scrub/repair.h"
      28             : #include "scrub/newbt.h"
      29             : 
      30             : /*
      31             :  * Estimate proper slack values for a btree that's being reloaded.
      32             :  *
      33             :  * Under most circumstances, we'll take whatever default loading value the
      34             :  * btree bulk loading code calculates for us.  However, there are some
      35             :  * exceptions to this rule:
      36             :  *
      37             :  * (1) If someone turned one of the debug knobs.
      38             :  * (2) If this is a per-AG btree and the AG has less than ~9% space free.
      39             :  * (3) If this is an inode btree and the FS has less than ~9% space free.
      40             :  *
      41             :  * Note that we actually use 3/32 for the comparison to avoid division.
      42             :  */
      43             : static void
      44     2841414 : xrep_newbt_estimate_slack(
      45             :         struct xrep_newbt       *xnr)
      46             : {
      47     2841414 :         struct xfs_scrub        *sc = xnr->sc;
      48     2841414 :         struct xfs_btree_bload  *bload = &xnr->bload;
      49     2841414 :         uint64_t                free;
      50     2841414 :         uint64_t                sz;
      51             : 
      52             :         /*
      53             :          * The xfs_globals values are set to -1 (i.e. take the bload defaults)
      54             :          * unless someone has set them otherwise, so we just pull the values
      55             :          * here.
      56             :          */
      57     2841414 :         bload->leaf_slack = xfs_globals.bload_leaf_slack;
      58     2841414 :         bload->node_slack = xfs_globals.bload_node_slack;
      59             : 
      60     2841414 :         if (sc->ops->type == ST_PERAG) {
      61      272737 :                 free = sc->sa.pag->pagf_freeblks;
      62      272737 :                 sz = xfs_ag_block_count(sc->mp, sc->sa.pag->pag_agno);
      63             :         } else {
      64     2568677 :                 free = percpu_counter_sum(&sc->mp->m_fdblocks);
      65     2568677 :                 sz = sc->mp->m_sb.sb_dblocks;
      66             :         }
      67             : 
      68             :         /* No further changes if there's more than 3/32ths space left. */
      69     2841306 :         if (free >= ((sz * 3) >> 5))
      70             :                 return;
      71             : 
      72             :         /* We're low on space; load the btrees as tightly as possible. */
      73       33107 :         if (bload->leaf_slack < 0)
      74       33110 :                 bload->leaf_slack = 0;
      75       33107 :         if (bload->node_slack < 0)
      76       33111 :                 bload->node_slack = 0;
      77             : }
      78             : 
      79             : /* Initialize accounting resources for staging a new AG btree. */
      80             : void
      81     2841447 : xrep_newbt_init_ag(
      82             :         struct xrep_newbt               *xnr,
      83             :         struct xfs_scrub                *sc,
      84             :         const struct xfs_owner_info     *oinfo,
      85             :         xfs_fsblock_t                   alloc_hint,
      86             :         enum xfs_ag_resv_type           resv)
      87             : {
      88     2841447 :         memset(xnr, 0, sizeof(struct xrep_newbt));
      89     2841447 :         xnr->sc = sc;
      90     2841447 :         xnr->oinfo = *oinfo; /* structure copy */
      91     2841447 :         xnr->alloc_hint = alloc_hint;
      92     2841447 :         xnr->resv = resv;
      93     2841447 :         INIT_LIST_HEAD(&xnr->resv_list);
      94     2841447 :         xnr->bload.max_dirty = XFS_B_TO_FSBT(sc->mp, 256U << 10); /* 256K */
      95     2841447 :         xrep_newbt_estimate_slack(xnr);
      96     2841310 : }
      97             : 
      98             : /* Initialize accounting resources for staging a new inode fork btree. */
      99             : int
     100     2568677 : xrep_newbt_init_inode(
     101             :         struct xrep_newbt               *xnr,
     102             :         struct xfs_scrub                *sc,
     103             :         int                             whichfork,
     104             :         const struct xfs_owner_info     *oinfo)
     105             : {
     106     2568677 :         struct xfs_ifork                *ifp;
     107             : 
     108     2568677 :         ifp = kmem_cache_zalloc(xfs_ifork_cache, XCHK_GFP_FLAGS);
     109     2568677 :         if (!ifp)
     110             :                 return -ENOMEM;
     111             : 
     112    12843385 :         xrep_newbt_init_ag(xnr, sc, oinfo,
     113     2568677 :                         XFS_INO_TO_FSB(sc->mp, sc->ip->i_ino),
     114             :                         XFS_AG_RESV_NONE);
     115     2568677 :         xnr->ifake.if_fork = ifp;
     116     2568677 :         xnr->ifake.if_fork_size = xfs_inode_fork_size(sc->ip, whichfork);
     117     2568677 :         xnr->ifake.if_whichfork = whichfork;
     118     2568677 :         return 0;
     119             : }
     120             : 
     121             : /*
     122             :  * Initialize accounting resources for staging a new btree.  Callers are
     123             :  * expected to add their own reservations (and clean them up) manually.
     124             :  */
     125             : void
     126       79731 : xrep_newbt_init_bare(
     127             :         struct xrep_newbt               *xnr,
     128             :         struct xfs_scrub                *sc)
     129             : {
     130       79731 :         xrep_newbt_init_ag(xnr, sc, &XFS_RMAP_OINFO_ANY_OWNER, NULLFSBLOCK,
     131             :                         XFS_AG_RESV_NONE);
     132       79706 : }
     133             : 
     134             : /*
     135             :  * Set up automatic reaping of the blocks reserved for btree reconstruction in
     136             :  * case we crash by logging a deferred free item for each extent we allocate so
     137             :  * that we can get all of the space back if we crash before we can commit the
     138             :  * new btree.  This function returns a token that can be used to cancel
     139             :  * automatic reaping if repair is successful.
     140             :  */
     141             : static int
     142      232468 : xrep_newbt_schedule_autoreap(
     143             :         struct xrep_newbt               *xnr,
     144             :         struct xrep_newbt_resv          *resv)
     145             : {
     146      232468 :         struct xfs_extent_free_item     efi_item = {
     147      232468 :                 .xefi_blockcount        = resv->len,
     148      232468 :                 .xefi_owner             = xnr->oinfo.oi_owner,
     149             :                 .xefi_flags             = XFS_EFI_SKIP_DISCARD,
     150      232468 :                 .xefi_pag               = resv->pag,
     151             :         };
     152      232468 :         struct xfs_scrub                *sc = xnr->sc;
     153      232468 :         struct xfs_log_item             *lip;
     154      232468 :         LIST_HEAD(items);
     155             : 
     156      232468 :         ASSERT(xnr->oinfo.oi_offset == 0);
     157             : 
     158      232468 :         efi_item.xefi_startblock = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno,
     159             :                         resv->agbno);
     160      232468 :         if (xnr->oinfo.oi_flags & XFS_OWNER_INFO_ATTR_FORK)
     161           3 :                 efi_item.xefi_flags |= XFS_EFI_ATTR_FORK;
     162      232468 :         if (xnr->oinfo.oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
     163       23946 :                 efi_item.xefi_flags |= XFS_EFI_BMBT_BLOCK;
     164             : 
     165      232468 :         INIT_LIST_HEAD(&efi_item.xefi_list);
     166      232468 :         list_add(&efi_item.xefi_list, &items);
     167             : 
     168      232300 :         xfs_perag_intent_hold(resv->pag);
     169      232490 :         lip = xfs_extent_free_defer_type.create_intent(sc->tp, &items, 1,
     170             :                         false);
     171      232465 :         ASSERT(lip != NULL && !IS_ERR(lip));
     172             : 
     173      232465 :         resv->efi = lip;
     174      232465 :         return 0;
     175             : }
     176             : 
     177             : /*
     178             :  * Earlier, we logged EFIs for the extents that we allocated to hold the new
     179             :  * btree so that we could automatically roll back those allocations if the
     180             :  * system crashed.  Now we log an EFD to cancel the EFI, either because the
     181             :  * repair succeeded and the new blocks are in use; or because the repair was
     182             :  * cancelled and we're about to free the extents directly.
     183             :  */
     184             : static inline void
     185      232325 : xrep_newbt_finish_autoreap(
     186             :         struct xfs_scrub        *sc,
     187             :         struct xrep_newbt_resv  *resv)
     188             : {
     189      232325 :         struct xfs_efd_log_item *efdp;
     190      232325 :         struct xfs_extent       *extp;
     191      232325 :         struct xfs_log_item     *efd_lip;
     192             : 
     193      232325 :         efd_lip = xfs_extent_free_defer_type.create_done(sc->tp, resv->efi, 1);
     194      232498 :         efdp = container_of(efd_lip, struct xfs_efd_log_item, efd_item);
     195      232498 :         extp = efdp->efd_format.efd_extents;
     196      232498 :         extp->ext_start = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno,
     197             :                                          resv->agbno);
     198      232498 :         extp->ext_len = resv->len;
     199      232498 :         efdp->efd_next_extent++;
     200      232498 :         set_bit(XFS_LI_DIRTY, &efd_lip->li_flags);
     201      232596 :         xfs_perag_intent_rele(resv->pag);
     202      232550 : }
     203             : 
     204             : /* Abort an EFI logged for a new btree block reservation. */
     205             : static inline void
     206           0 : xrep_newbt_cancel_autoreap(
     207             :         struct xrep_newbt_resv  *resv)
     208             : {
     209           0 :         xfs_extent_free_defer_type.abort_intent(resv->efi);
     210           0 :         xfs_perag_intent_rele(resv->pag);
     211           0 : }
     212             : 
     213             : /*
     214             :  * Relog the EFIs attached to a staging btree so that we don't pin the log
     215             :  * tail.  Same logic as xfs_defer_relog.
     216             :  */
     217             : int
     218      706002 : xrep_newbt_relog_autoreap(
     219             :         struct xrep_newbt       *xnr)
     220             : {
     221      706002 :         struct xrep_newbt_resv  *resv;
     222      706002 :         unsigned int            efi_bytes = 0;
     223             : 
     224     2015602 :         list_for_each_entry(resv, &xnr->resv_list, list) {
     225             :                 /*
     226             :                  * If the log intent item for this deferred op is in a
     227             :                  * different checkpoint, relog it to keep the log tail moving
     228             :                  * forward.  We're ok with this being racy because an incorrect
     229             :                  * decision means we'll be a little slower at pushing the tail.
     230             :                  */
     231     1309510 :                 if (!resv->efi || xfs_log_item_in_current_chkpt(resv->efi))
     232     1306390 :                         continue;
     233             : 
     234        3208 :                 resv->efi = xfs_trans_item_relog(resv->efi, xnr->sc->tp);
     235             : 
     236             :                 /*
     237             :                  * If free space is very fragmented, it's possible that the new
     238             :                  * btree will be allocated a large number of small extents.
     239             :                  * On an active system, it's possible that so many of those
     240             :                  * EFIs will need relogging here that doing them all in one
     241             :                  * transaction will overflow the reservation.
     242             :                  *
     243             :                  * Each allocation for the new btree (xrep_newbt_resv) points
     244             :                  * to a unique single-mapping EFI, so each relog operation logs
     245             :                  * a single-mapping EFD followed by a new EFI.  Each single
     246             :                  * mapping EF[ID] item consumes about 128 bytes, so we'll
     247             :                  * assume 256 bytes per relog.  Roll if we consume more than
     248             :                  * half of the transaction reservation.
     249             :                  */
     250        3210 :                 efi_bytes += 256;
     251        3210 :                 if (efi_bytes > xnr->sc->tp->t_log_res / 2) {
     252           0 :                         int     error;
     253             : 
     254           0 :                         error = xrep_roll_trans(xnr->sc);
     255           0 :                         if (error)
     256           0 :                                 return error;
     257             : 
     258             :                         efi_bytes = 0;
     259             :                 }
     260             :         }
     261             : 
     262      706092 :         if (xnr->sc->tp->t_flags & XFS_TRANS_DIRTY)
     263        3104 :                 return xrep_roll_trans(xnr->sc);
     264             :         return 0;
     265             : }
     266             : 
     267             : /*
     268             :  * Designate specific blocks to be used to build our new btree.  @pag must be
     269             :  * a passive reference.
     270             :  */
     271             : STATIC int
     272      272241 : xrep_newbt_add_blocks(
     273             :         struct xrep_newbt               *xnr,
     274             :         struct xfs_perag                *pag,
     275             :         xfs_agblock_t                   agbno,
     276             :         xfs_extlen_t                    len,
     277             :         bool                            autoreap)
     278             : {
     279      272241 :         struct xrep_newbt_resv          *resv;
     280      272241 :         int                             error;
     281             : 
     282      272241 :         resv = kmalloc(sizeof(struct xrep_newbt_resv), XCHK_GFP_FLAGS);
     283      272191 :         if (!resv)
     284             :                 return -ENOMEM;
     285             : 
     286      272191 :         INIT_LIST_HEAD(&resv->list);
     287      272191 :         resv->agbno = agbno;
     288      272191 :         resv->len = len;
     289      272191 :         resv->used = 0;
     290      272191 :         resv->pag = xfs_perag_hold(pag);
     291             : 
     292      272536 :         if (autoreap) {
     293      232471 :                 error = xrep_newbt_schedule_autoreap(xnr, resv);
     294      232464 :                 if (error)
     295           0 :                         goto out_pag;
     296             :         }
     297             : 
     298      272529 :         list_add_tail(&resv->list, &xnr->resv_list);
     299      272529 :         return 0;
     300             : out_pag:
     301           0 :         xfs_perag_put(resv->pag);
     302           0 :         kfree(resv);
     303           0 :         return error;
     304             : }
     305             : 
     306             : /*
     307             :  * Add an extent to the new btree reservation pool.  Callers are required to
     308             :  * handle any automatic reaping if the repair is cancelled.  @pag must be a
     309             :  * passive reference.
     310             :  */
     311             : int
     312       39874 : xrep_newbt_add_extent(
     313             :         struct xrep_newbt               *xnr,
     314             :         struct xfs_perag                *pag,
     315             :         xfs_agblock_t                   agbno,
     316             :         xfs_extlen_t                    len)
     317             : {
     318       39874 :         return xrep_newbt_add_blocks(xnr, pag, agbno, len, false);
     319             : }
     320             : 
     321             : /* Don't let our allocation hint take us beyond this AG */
     322             : static inline void
     323      208548 : xrep_newbt_validate_ag_alloc_hint(
     324             :         struct xrep_newbt       *xnr)
     325             : {
     326      208548 :         struct xfs_scrub        *sc = xnr->sc;
     327      208548 :         xfs_agnumber_t          agno = XFS_FSB_TO_AGNO(sc->mp, xnr->alloc_hint);
     328             : 
     329      416771 :         if (agno == sc->sa.pag->pag_agno &&
     330      208471 :             xfs_verify_fsbno(sc->mp, xnr->alloc_hint))
     331             :                 return;
     332             : 
     333           1 :         xnr->alloc_hint = XFS_AGB_TO_FSB(sc->mp, sc->sa.pag->pag_agno,
     334             :                                          XFS_AGFL_BLOCK(sc->mp) + 1);
     335             : }
     336             : 
     337             : /* Allocate disk space for a new per-AG btree. */
     338             : STATIC int
     339      193422 : xrep_newbt_alloc_ag_blocks(
     340             :         struct xrep_newbt       *xnr,
     341             :         uint64_t                nr_blocks)
     342             : {
     343      193422 :         struct xfs_scrub        *sc = xnr->sc;
     344      193422 :         int                     error = 0;
     345             : 
     346      193422 :         ASSERT(sc->sa.pag != NULL);
     347             : 
     348      401763 :         while (nr_blocks > 0) {
     349      208357 :                 struct xfs_alloc_arg    args = {
     350      208357 :                         .tp             = sc->tp,
     351      208357 :                         .mp             = sc->mp,
     352             :                         .oinfo          = xnr->oinfo,
     353             :                         .minlen         = 1,
     354             :                         .maxlen         = nr_blocks,
     355             :                         .prod           = 1,
     356      208357 :                         .resv           = xnr->resv,
     357             :                 };
     358             : 
     359      208357 :                 xrep_newbt_validate_ag_alloc_hint(xnr);
     360             : 
     361      208202 :                 if (xnr->alloc_vextent)
     362       18852 :                         error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint);
     363             :                 else
     364      189350 :                         error = xfs_alloc_vextent_near_bno(&args,
     365             :                                         xnr->alloc_hint);
     366      208561 :                 if (error)
     367          68 :                         return error;
     368      208561 :                 if (args.fsbno == NULLFSBLOCK)
     369             :                         return -ENOSPC;
     370             : 
     371      208493 :                 trace_xrep_newbt_alloc_ag_blocks(sc->mp, args.agno, args.agbno,
     372      208493 :                                 args.len, xnr->oinfo.oi_owner);
     373             : 
     374      208418 :                 error = xrep_newbt_add_blocks(xnr, sc->sa.pag, args.agbno,
     375             :                                 args.len, true);
     376      208467 :                 if (error)
     377           0 :                         return error;
     378             : 
     379      208467 :                 nr_blocks -= args.len;
     380      208467 :                 xnr->alloc_hint = args.fsbno + args.len;
     381             : 
     382      208467 :                 error = xrep_defer_finish(sc);
     383      208341 :                 if (error)
     384           0 :                         return error;
     385             :         }
     386             : 
     387             :         return 0;
     388             : }
     389             : 
     390             : /* Don't let our allocation hint take us beyond EOFS */
     391             : static inline void
     392       23946 : xrep_newbt_validate_file_alloc_hint(
     393             :         struct xrep_newbt       *xnr)
     394             : {
     395       23946 :         struct xfs_scrub        *sc = xnr->sc;
     396             : 
     397       23946 :         if (xfs_verify_fsbno(sc->mp, xnr->alloc_hint))
     398             :                 return;
     399             : 
     400           0 :         xnr->alloc_hint = XFS_AGB_TO_FSB(sc->mp, 0, XFS_AGFL_BLOCK(sc->mp) + 1);
     401             : }
     402             : 
     403             : /* Allocate disk space for our new file-based btree. */
     404             : STATIC int
     405       23757 : xrep_newbt_alloc_file_blocks(
     406             :         struct xrep_newbt       *xnr,
     407             :         uint64_t                nr_blocks)
     408             : {
     409       23757 :         struct xfs_scrub        *sc = xnr->sc;
     410       23757 :         int                     error = 0;
     411             : 
     412       47703 :         while (nr_blocks > 0) {
     413       23946 :                 struct xfs_alloc_arg    args = {
     414       23946 :                         .tp             = sc->tp,
     415       23946 :                         .mp             = sc->mp,
     416             :                         .oinfo          = xnr->oinfo,
     417             :                         .minlen         = 1,
     418             :                         .maxlen         = nr_blocks,
     419             :                         .prod           = 1,
     420       23946 :                         .resv           = xnr->resv,
     421             :                 };
     422       23946 :                 struct xfs_perag        *pag;
     423             : 
     424       23946 :                 xrep_newbt_validate_file_alloc_hint(xnr);
     425             : 
     426       23946 :                 if (xnr->alloc_vextent)
     427           0 :                         error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint);
     428             :                 else
     429       23946 :                         error = xfs_alloc_vextent_start_ag(&args,
     430             :                                         xnr->alloc_hint);
     431       23946 :                 if (error)
     432           0 :                         return error;
     433       23946 :                 if (args.fsbno == NULLFSBLOCK)
     434             :                         return -ENOSPC;
     435             : 
     436       23946 :                 trace_xrep_newbt_alloc_file_blocks(sc->mp, args.agno,
     437       23946 :                                 args.agbno, args.len, xnr->oinfo.oi_owner);
     438             : 
     439       23946 :                 pag = xfs_perag_get(sc->mp, args.agno);
     440       23946 :                 if (!pag) {
     441           0 :                         ASSERT(0);
     442           0 :                         return -EFSCORRUPTED;
     443             :                 }
     444             : 
     445       23946 :                 error = xrep_newbt_add_blocks(xnr, pag, args.agbno, args.len,
     446             :                                 true);
     447       23946 :                 xfs_perag_put(pag);
     448       23946 :                 if (error)
     449           0 :                         return error;
     450             : 
     451       23946 :                 nr_blocks -= args.len;
     452       23946 :                 xnr->alloc_hint = args.fsbno + args.len;
     453             : 
     454       23946 :                 error = xrep_defer_finish(sc);
     455       23946 :                 if (error)
     456           0 :                         return error;
     457             :         }
     458             : 
     459             :         return 0;
     460             : }
     461             : 
     462             : /* Allocate disk space for our new btree. */
     463             : int
     464      217207 : xrep_newbt_alloc_blocks(
     465             :         struct xrep_newbt       *xnr,
     466             :         uint64_t                nr_blocks)
     467             : {
     468      217207 :         if (xnr->sc->ip)
     469       23757 :                 return xrep_newbt_alloc_file_blocks(xnr, nr_blocks);
     470      193450 :         return xrep_newbt_alloc_ag_blocks(xnr, nr_blocks);
     471             : }
     472             : 
     473             : /*
     474             :  * How many extent freeing items can we attach to a transaction before we want
     475             :  * to finish the chain so that unreserving new btree blocks doesn't overrun
     476             :  * the transaction reservation?
     477             :  */
     478             : #define XREP_REAP_MAX_NEWBT_EFIS        (128)
     479             : 
     480             : /*
     481             :  * Free the unused part of an extent.  Returns the number of EFIs logged or
     482             :  * a negative errno.
     483             :  */
     484             : STATIC int
     485      232380 : xrep_newbt_free_extent(
     486             :         struct xrep_newbt       *xnr,
     487             :         struct xrep_newbt_resv  *resv,
     488             :         bool                    btree_committed)
     489             : {
     490      232380 :         struct xfs_scrub        *sc = xnr->sc;
     491      232380 :         xfs_agblock_t           free_agbno = resv->agbno;
     492      232380 :         xfs_extlen_t            free_aglen = resv->len;
     493      232380 :         xfs_fsblock_t           fsbno;
     494      232380 :         int                     error;
     495             : 
     496             :         /*
     497             :          * If we used space and committed the btree, remove those blocks from
     498             :          * the extent before we act on it.
     499             :          */
     500      232380 :         if (btree_committed) {
     501      231935 :                 free_agbno += resv->used;
     502      231935 :                 free_aglen -= resv->used;
     503             :         }
     504             : 
     505      232380 :         xrep_newbt_finish_autoreap(sc, resv);
     506             : 
     507      232548 :         if (free_aglen == 0)
     508             :                 return 0;
     509             : 
     510         422 :         trace_xrep_newbt_free_blocks(sc->mp, resv->pag->pag_agno, free_agbno,
     511         422 :                         free_aglen, xnr->oinfo.oi_owner);
     512             : 
     513         422 :         ASSERT(xnr->resv != XFS_AG_RESV_AGFL);
     514         422 :         ASSERT(xnr->resv != XFS_AG_RESV_IGNORE);
     515             : 
     516             :         /*
     517             :          * Use EFIs to free the reservations.  This reduces the chance
     518             :          * that we leak blocks if the system goes down.
     519             :          */
     520         422 :         fsbno = XFS_AGB_TO_FSB(sc->mp, resv->pag->pag_agno, free_agbno);
     521         422 :         error = __xfs_free_extent_later(sc->tp, fsbno, free_aglen, &xnr->oinfo,
     522             :                         xnr->resv, true);
     523         422 :         if (error)
     524           0 :                 return error;
     525             : 
     526             :         return 1;
     527             : }
     528             : 
     529             : /* Free all the accounting info and disk space we reserved for a new btree. */
     530             : STATIC int
     531     2842065 : xrep_newbt_free(
     532             :         struct xrep_newbt       *xnr,
     533             :         bool                    btree_committed)
     534             : {
     535     2842065 :         struct xfs_scrub        *sc = xnr->sc;
     536     2842065 :         struct xrep_newbt_resv  *resv, *n;
     537     2842065 :         unsigned int            freed = 0;
     538     2842065 :         int                     error = 0;
     539             : 
     540             :         /*
     541             :          * If the filesystem already went down, we can't free the blocks.  Skip
     542             :          * ahead to freeing the incore metadata because we can't fix anything.
     543             :          */
     544     5684130 :         if (xfs_is_shutdown(sc->mp))
     545           0 :                 goto junkit;
     546             : 
     547     3074582 :         list_for_each_entry_safe(resv, n, &xnr->resv_list, list) {
     548      232387 :                 int             ret;
     549             : 
     550      232387 :                 ret = xrep_newbt_free_extent(xnr, resv, btree_committed);
     551      232563 :                 list_del(&resv->list);
     552      232561 :                 xfs_perag_put(resv->pag);
     553      232605 :                 kfree(resv);
     554      232517 :                 if (ret < 0) {
     555           0 :                         error = ret;
     556           0 :                         goto junkit;
     557             :                 }
     558             : 
     559      232517 :                 freed += ret;
     560      232517 :                 if (freed >= XREP_REAP_MAX_NEWBT_EFIS) {
     561           0 :                         error = xrep_defer_finish(sc);
     562           0 :                         if (error)
     563           0 :                                 goto junkit;
     564             :                         freed = 0;
     565             :                 }
     566             :         }
     567             : 
     568     2842195 :         if (freed)
     569          18 :                 error = xrep_defer_finish(sc);
     570             : 
     571     2842177 : junkit:
     572             :         /*
     573             :          * If we still have reservations attached to @newbt, cleanup must have
     574             :          * failed and the filesystem is about to go down.  Clean up the incore
     575             :          * reservations.
     576             :          */
     577     2842117 :         list_for_each_entry_safe(resv, n, &xnr->resv_list, list) {
     578           0 :                 xrep_newbt_cancel_autoreap(resv);
     579           0 :                 list_del(&resv->list);
     580           0 :                 xfs_perag_put(resv->pag);
     581           0 :                 kfree(resv);
     582             :         }
     583             : 
     584     2842117 :         if (sc->ip) {
     585     2568677 :                 kmem_cache_free(xfs_ifork_cache, xnr->ifake.if_fork);
     586     2568677 :                 xnr->ifake.if_fork = NULL;
     587             :         }
     588             : 
     589     2842117 :         return error;
     590             : }
     591             : 
     592             : /*
     593             :  * Free all the accounting info and unused disk space allocations after
     594             :  * committing a new btree.
     595             :  */
     596             : int
     597     2761617 : xrep_newbt_commit(
     598             :         struct xrep_newbt       *xnr)
     599             : {
     600     2761617 :         return xrep_newbt_free(xnr, true);
     601             : }
     602             : 
     603             : /*
     604             :  * Free all the accounting info and all of the disk space we reserved for a new
     605             :  * btree that we're not going to commit.  We want to try to roll things back
     606             :  * cleanly for things like ENOSPC midway through allocation.
     607             :  */
     608             : void
     609       80428 : xrep_newbt_cancel(
     610             :         struct xrep_newbt       *xnr)
     611             : {
     612       80428 :         xrep_newbt_free(xnr, false);
     613       80369 : }
     614             : 
     615             : /* Feed one of the reserved btree blocks to the bulk loader. */
     616             : int
     617     1005688 : xrep_newbt_claim_block(
     618             :         struct xfs_btree_cur    *cur,
     619             :         struct xrep_newbt       *xnr,
     620             :         union xfs_btree_ptr     *ptr)
     621             : {
     622     1005688 :         struct xrep_newbt_resv  *resv;
     623     1005688 :         struct xfs_mount        *mp = cur->bc_mp;
     624     1005688 :         xfs_agblock_t           agbno;
     625             : 
     626             :         /*
     627             :          * The first item in the list should always have a free block unless
     628             :          * we're completely out.
     629             :          */
     630     1005688 :         resv = list_first_entry(&xnr->resv_list, struct xrep_newbt_resv, list);
     631     1005688 :         if (resv->used == resv->len)
     632             :                 return -ENOSPC;
     633             : 
     634             :         /*
     635             :          * Peel off a block from the start of the reservation.  We allocate
     636             :          * blocks in order to place blocks on disk in increasing record or key
     637             :          * order.  The block reservations tend to end up on the list in
     638             :          * decreasing order, which hopefully results in leaf blocks ending up
     639             :          * together.
     640             :          */
     641     1005688 :         agbno = resv->agbno + resv->used;
     642     1005688 :         resv->used++;
     643             : 
     644             :         /* If we used all the blocks in this reservation, move it to the end. */
     645     1005688 :         if (resv->used == resv->len)
     646      271912 :                 list_move_tail(&resv->list, &xnr->resv_list);
     647             : 
     648     1005786 :         trace_xrep_newbt_claim_block(mp, resv->pag->pag_agno, agbno, 1,
     649     1005786 :                         xnr->oinfo.oi_owner);
     650             : 
     651     1005720 :         if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
     652       67094 :                 ptr->l = cpu_to_be64(XFS_AGB_TO_FSB(mp, resv->pag->pag_agno,
     653             :                                                                 agbno));
     654             :         else
     655      938626 :                 ptr->s = cpu_to_be32(agbno);
     656             :         return 0;
     657             : }
     658             : 
     659             : /* How many reserved blocks are unused? */
     660             : unsigned int
     661       80428 : xrep_newbt_unused_blocks(
     662             :         struct xrep_newbt       *xnr)
     663             : {
     664       80428 :         struct xrep_newbt_resv  *resv;
     665       80428 :         unsigned int            unused = 0;
     666             : 
     667      120659 :         list_for_each_entry(resv, &xnr->resv_list, list)
     668       40231 :                 unused += resv->len - resv->used;
     669       80428 :         return unused;
     670             : }

Generated by: LCOV version 1.14