LCOV - code coverage report
Current view: top level - fs/xfs - xfs_icache.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsa @ Mon Jul 31 20:08:27 PDT 2023 Lines: 836 920 90.9 %
Date: 2023-07-31 20:08:27 Functions: 64 65 98.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
       4             :  * All Rights Reserved.
       5             :  */
       6             : #include "xfs.h"
       7             : #include "xfs_fs.h"
       8             : #include "xfs_shared.h"
       9             : #include "xfs_format.h"
      10             : #include "xfs_log_format.h"
      11             : #include "xfs_trans_resv.h"
      12             : #include "xfs_mount.h"
      13             : #include "xfs_inode.h"
      14             : #include "xfs_trans.h"
      15             : #include "xfs_trans_priv.h"
      16             : #include "xfs_inode_item.h"
      17             : #include "xfs_quota.h"
      18             : #include "xfs_trace.h"
      19             : #include "xfs_icache.h"
      20             : #include "xfs_bmap_util.h"
      21             : #include "xfs_dquot_item.h"
      22             : #include "xfs_dquot.h"
      23             : #include "xfs_reflink.h"
      24             : #include "xfs_ialloc.h"
      25             : #include "xfs_ag.h"
      26             : #include "xfs_log_priv.h"
      27             : #include "xfs_health.h"
      28             : #include "xfs_da_format.h"
      29             : #include "xfs_dir2.h"
      30             : #include "xfs_imeta.h"
      31             : 
      32             : #include <linux/iversion.h>
      33             : 
      34             : /* Radix tree tags for incore inode tree. */
      35             : 
      36             : /* inode is to be reclaimed */
      37             : #define XFS_ICI_RECLAIM_TAG     0
      38             : /* Inode has speculative preallocations (posteof or cow) to clean. */
      39             : #define XFS_ICI_BLOCKGC_TAG     1
      40             : 
      41             : /*
      42             :  * The goal for walking incore inodes.  These can correspond with incore inode
      43             :  * radix tree tags when convenient.  Avoid existing XFS_IWALK namespace.
      44             :  */
      45             : enum xfs_icwalk_goal {
      46             :         /* Goals directly associated with tagged inodes. */
      47             :         XFS_ICWALK_BLOCKGC      = XFS_ICI_BLOCKGC_TAG,
      48             :         XFS_ICWALK_RECLAIM      = XFS_ICI_RECLAIM_TAG,
      49             : };
      50             : 
      51             : static int xfs_icwalk(struct xfs_mount *mp,
      52             :                 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
      53             : static int xfs_icwalk_ag(struct xfs_perag *pag,
      54             :                 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
      55             : 
      56             : /*
      57             :  * Private inode cache walk flags for struct xfs_icwalk.  Must not
      58             :  * coincide with XFS_ICWALK_FLAGS_VALID.
      59             :  */
      60             : 
      61             : /* Stop scanning after icw_scan_limit inodes. */
      62             : #define XFS_ICWALK_FLAG_SCAN_LIMIT      (1U << 28)
      63             : 
      64             : #define XFS_ICWALK_FLAG_RECLAIM_SICK    (1U << 27)
      65             : #define XFS_ICWALK_FLAG_UNION           (1U << 26) /* union filter algorithm */
      66             : 
      67             : #define XFS_ICWALK_PRIVATE_FLAGS        (XFS_ICWALK_FLAG_SCAN_LIMIT | \
      68             :                                          XFS_ICWALK_FLAG_RECLAIM_SICK | \
      69             :                                          XFS_ICWALK_FLAG_UNION)
      70             : 
      71             : /*
      72             :  * Allocate and initialise an xfs_inode.
      73             :  */
      74             : struct xfs_inode *
      75   509835937 : xfs_inode_alloc(
      76             :         struct xfs_mount        *mp,
      77             :         xfs_ino_t               ino)
      78             : {
      79   509835937 :         struct xfs_inode        *ip;
      80             : 
      81             :         /*
      82             :          * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
      83             :          * and return NULL here on ENOMEM.
      84             :          */
      85   509835937 :         ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
      86             : 
      87   509864658 :         if (inode_init_always(mp->m_super, VFS_I(ip))) {
      88           0 :                 kmem_cache_free(xfs_inode_cache, ip);
      89           0 :                 return NULL;
      90             :         }
      91             : 
      92             :         /* VFS doesn't initialise i_mode or i_state! */
      93   509856215 :         VFS_I(ip)->i_mode = 0;
      94   509856215 :         VFS_I(ip)->i_state = 0;
      95   509856215 :         mapping_set_large_folios(VFS_I(ip)->i_mapping);
      96             : 
      97   509790763 :         XFS_STATS_INC(mp, vn_active);
      98   509790763 :         ASSERT(atomic_read(&ip->i_pincount) == 0);
      99   509790763 :         ASSERT(ip->i_ino == 0);
     100             : 
     101             :         /* initialise the xfs inode */
     102   509790763 :         ip->i_ino = ino;
     103   509790763 :         ip->i_mount = mp;
     104   509790763 :         memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
     105   509790763 :         ip->i_cowfp = NULL;
     106   509790763 :         memset(&ip->i_af, 0, sizeof(ip->i_af));
     107   509790763 :         ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
     108   509790763 :         memset(&ip->i_df, 0, sizeof(ip->i_df));
     109   509790763 :         ip->i_flags = 0;
     110   509790763 :         ip->i_delayed_blks = 0;
     111   509790763 :         ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
     112   509790763 :         ip->i_nblocks = 0;
     113   509790763 :         ip->i_forkoff = 0;
     114   509790763 :         ip->i_sick = 0;
     115   509790763 :         ip->i_checked = 0;
     116   509790763 :         INIT_WORK(&ip->i_ioend_work, xfs_end_io);
     117   509790763 :         INIT_LIST_HEAD(&ip->i_ioend_list);
     118   509790763 :         spin_lock_init(&ip->i_ioend_lock);
     119   509788447 :         ip->i_next_unlinked = NULLAGINO;
     120   509788447 :         ip->i_prev_unlinked = 0;
     121             : 
     122   509788447 :         return ip;
     123             : }
     124             : 
     125             : STATIC void
     126   506958733 : xfs_inode_free_callback(
     127             :         struct rcu_head         *head)
     128             : {
     129   506958733 :         struct inode            *inode = container_of(head, struct inode, i_rcu);
     130   506958733 :         struct xfs_inode        *ip = XFS_I(inode);
     131             : 
     132   506958733 :         switch (VFS_I(ip)->i_mode & S_IFMT) {
     133   290042439 :         case S_IFREG:
     134             :         case S_IFDIR:
     135             :         case S_IFLNK:
     136   290042439 :                 xfs_idestroy_fork(&ip->i_df);
     137   290042439 :                 break;
     138             :         }
     139             : 
     140   507222445 :         xfs_ifork_zap_attr(ip);
     141             : 
     142   508751246 :         if (ip->i_cowfp) {
     143   102860662 :                 xfs_idestroy_fork(ip->i_cowfp);
     144   102844614 :                 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
     145             :         }
     146   508308771 :         if (ip->i_itemp) {
     147    54855272 :                 ASSERT(!test_bit(XFS_LI_IN_AIL,
     148             :                                  &ip->i_itemp->ili_item.li_flags));
     149    54855272 :                 xfs_inode_item_destroy(ip);
     150    55058729 :                 ip->i_itemp = NULL;
     151             :         }
     152             : 
     153   508512228 :         kmem_cache_free(xfs_inode_cache, ip);
     154   508242907 : }
     155             : 
     156             : static void
     157   509885902 : __xfs_inode_free(
     158             :         struct xfs_inode        *ip)
     159             : {
     160             :         /* asserts to verify all state is correct here */
     161   509885902 :         ASSERT(atomic_read(&ip->i_pincount) == 0);
     162   509885902 :         ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
     163   509885902 :         XFS_STATS_DEC(ip->i_mount, vn_active);
     164             : 
     165   509885902 :         call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
     166   509885574 : }
     167             : 
     168             : void
     169      793169 : xfs_inode_free(
     170             :         struct xfs_inode        *ip)
     171             : {
     172     1586341 :         ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
     173             : 
     174             :         /*
     175             :          * Because we use RCU freeing we need to ensure the inode always
     176             :          * appears to be reclaimed with an invalid inode number when in the
     177             :          * free state. The ip->i_flags_lock provides the barrier against lookup
     178             :          * races.
     179             :          */
     180      793172 :         spin_lock(&ip->i_flags_lock);
     181      793174 :         ip->i_flags = XFS_IRECLAIM;
     182      793174 :         ip->i_ino = 0;
     183      793174 :         spin_unlock(&ip->i_flags_lock);
     184             : 
     185      793174 :         __xfs_inode_free(ip);
     186      793169 : }
     187             : 
     188             : /*
     189             :  * Queue background inode reclaim work if there are reclaimable inodes and there
     190             :  * isn't reclaim work already scheduled or in progress.
     191             :  */
     192             : static void
     193     5626337 : xfs_reclaim_work_queue(
     194             :         struct xfs_mount        *mp)
     195             : {
     196             : 
     197     5626337 :         rcu_read_lock();
     198     5626261 :         if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
     199     5611473 :                 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
     200     5611473 :                         msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
     201             :         }
     202     5626020 :         rcu_read_unlock();
     203     5625780 : }
     204             : 
     205             : /*
     206             :  * Background scanning to trim preallocated space. This is queued based on the
     207             :  * 'speculative_prealloc_lifetime' tunable (5m by default).
     208             :  */
     209             : static inline void
     210      954285 : xfs_blockgc_queue(
     211             :         struct xfs_perag        *pag)
     212             : {
     213      954285 :         struct xfs_mount        *mp = pag->pag_mount;
     214             : 
     215     1908570 :         if (!xfs_is_blockgc_enabled(mp))
     216             :                 return;
     217             : 
     218      954285 :         rcu_read_lock();
     219      954281 :         if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
     220      938516 :                 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
     221             :                                    &pag->pag_blockgc_work,
     222      938516 :                                    msecs_to_jiffies(xfs_blockgc_secs * 1000));
     223      954227 :         rcu_read_unlock();
     224             : }
     225             : 
     226             : /* Set a tag on both the AG incore inode tree and the AG radix tree. */
     227             : static void
     228  1078608068 : xfs_perag_set_inode_tag(
     229             :         struct xfs_perag        *pag,
     230             :         xfs_agino_t             agino,
     231             :         unsigned int            tag)
     232             : {
     233  1078608068 :         struct xfs_mount        *mp = pag->pag_mount;
     234  1078608068 :         bool                    was_tagged;
     235             : 
     236  1078608068 :         lockdep_assert_held(&pag->pag_ici_lock);
     237             : 
     238  1078608068 :         was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
     239  1078609130 :         radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
     240             : 
     241  1078576732 :         if (tag == XFS_ICI_RECLAIM_TAG)
     242  1075661946 :                 pag->pag_ici_reclaimable++;
     243             : 
     244  1078576732 :         if (was_tagged)
     245             :                 return;
     246             : 
     247             :         /* propagate the tag up into the perag radix tree */
     248     5366186 :         spin_lock(&mp->m_perag_lock);
     249     5367426 :         radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
     250     5367426 :         spin_unlock(&mp->m_perag_lock);
     251             : 
     252             :         /* start background work */
     253     5366622 :         switch (tag) {
     254     5066950 :         case XFS_ICI_RECLAIM_TAG:
     255     5066950 :                 xfs_reclaim_work_queue(mp);
     256     5066950 :                 break;
     257      299672 :         case XFS_ICI_BLOCKGC_TAG:
     258      299672 :                 xfs_blockgc_queue(pag);
     259      299672 :                 break;
     260             :         }
     261             : 
     262     5366645 :         trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
     263             : }
     264             : 
     265             : /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
     266             : static void
     267  1089346061 : xfs_perag_clear_inode_tag(
     268             :         struct xfs_perag        *pag,
     269             :         xfs_agino_t             agino,
     270             :         unsigned int            tag)
     271             : {
     272  1089346061 :         struct xfs_mount        *mp = pag->pag_mount;
     273             : 
     274  1089346061 :         lockdep_assert_held(&pag->pag_ici_lock);
     275             : 
     276             :         /*
     277             :          * Reclaim can signal (with a null agino) that it cleared its own tag
     278             :          * by removing the inode from the radix tree.
     279             :          */
     280  1089346061 :         if (agino != NULLAGINO)
     281   580253540 :                 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
     282             :         else
     283   509092521 :                 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
     284             : 
     285  1089348366 :         if (tag == XFS_ICI_RECLAIM_TAG)
     286  1075633386 :                 pag->pag_ici_reclaimable--;
     287             : 
     288  1089348366 :         if (radix_tree_tagged(&pag->pag_ici_root, tag))
     289             :                 return;
     290             : 
     291             :         /* clear the tag from the perag radix tree */
     292     9772930 :         spin_lock(&mp->m_perag_lock);
     293     9773393 :         radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
     294     9773393 :         spin_unlock(&mp->m_perag_lock);
     295             : 
     296     9773371 :         trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
     297             : }
     298             : 
     299             : /*
     300             :  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
     301             :  * part of the structure. This is made more complex by the fact we store
     302             :  * information about the on-disk values in the VFS inode and so we can't just
     303             :  * overwrite the values unconditionally. Hence we save the parameters we
     304             :  * need to retain across reinitialisation, and rewrite them into the VFS inode
     305             :  * after reinitialisation even if it fails.
     306             :  */
     307             : static int
     308   566001901 : xfs_reinit_inode(
     309             :         struct xfs_mount        *mp,
     310             :         struct inode            *inode)
     311             : {
     312   566001901 :         int                     error;
     313   566001901 :         uint32_t                nlink = inode->i_nlink;
     314   566001901 :         uint32_t                generation = inode->i_generation;
     315   566001901 :         uint64_t                version = inode_peek_iversion(inode);
     316   566001901 :         umode_t                 mode = inode->i_mode;
     317   566001901 :         dev_t                   dev = inode->i_rdev;
     318   566001901 :         kuid_t                  uid = inode->i_uid;
     319   566001901 :         kgid_t                  gid = inode->i_gid;
     320             : 
     321   566001901 :         error = inode_init_always(mp->m_super, inode);
     322             : 
     323   566575224 :         set_nlink(inode, nlink);
     324   566593138 :         inode->i_generation = generation;
     325   566593138 :         inode_set_iversion_queried(inode, version);
     326   566593138 :         inode->i_mode = mode;
     327   566593138 :         inode->i_rdev = dev;
     328   566593138 :         inode->i_uid = uid;
     329   566593138 :         inode->i_gid = gid;
     330   566593138 :         mapping_set_large_folios(inode->i_mapping);
     331   566556003 :         return error;
     332             : }
     333             : 
     334             : /*
     335             :  * Carefully nudge an inode whose VFS state has been torn down back into a
     336             :  * usable state.  Drops the i_flags_lock and the rcu read lock.
     337             :  */
     338             : static int
     339   566590485 : xfs_iget_recycle(
     340             :         struct xfs_perag        *pag,
     341             :         struct xfs_inode        *ip) __releases(&ip->i_flags_lock)
     342             : {
     343   566590485 :         struct xfs_mount        *mp = ip->i_mount;
     344   566590485 :         struct inode            *inode = VFS_I(ip);
     345   566590485 :         int                     error;
     346             : 
     347   566590485 :         trace_xfs_iget_recycle(ip);
     348             : 
     349   566590541 :         if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
     350             :                 return -EAGAIN;
     351             : 
     352             :         /*
     353             :          * We need to make it look like the inode is being reclaimed to prevent
     354             :          * the actual reclaim workers from stomping over us while we recycle
     355             :          * the inode.  We can't clear the radix tree tag yet as it requires
     356             :          * pag_ici_lock to be held exclusive.
     357             :          */
     358   566602494 :         ip->i_flags |= XFS_IRECLAIM;
     359             : 
     360   566602494 :         spin_unlock(&ip->i_flags_lock);
     361   566600144 :         rcu_read_unlock();
     362             : 
     363   566492145 :         ASSERT(!rwsem_is_locked(&inode->i_rwsem));
     364   566492145 :         error = xfs_reinit_inode(mp, inode);
     365   566530236 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     366   566591598 :         if (error) {
     367             :                 /*
     368             :                  * Re-initializing the inode failed, and we are in deep
     369             :                  * trouble.  Try to re-add it to the reclaim list.
     370             :                  */
     371           0 :                 rcu_read_lock();
     372           0 :                 spin_lock(&ip->i_flags_lock);
     373           0 :                 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
     374           0 :                 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
     375           0 :                 spin_unlock(&ip->i_flags_lock);
     376           0 :                 rcu_read_unlock();
     377             : 
     378           0 :                 trace_xfs_iget_recycle_fail(ip);
     379           0 :                 return error;
     380             :         }
     381             : 
     382   566591598 :         spin_lock(&pag->pag_ici_lock);
     383   566621407 :         spin_lock(&ip->i_flags_lock);
     384             : 
     385             :         /*
     386             :          * Clear the per-lifetime state in the inode as we are now effectively
     387             :          * a new inode and need to return to the initial state before reuse
     388             :          * occurs.
     389             :          */
     390   566622036 :         ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
     391   566622036 :         ip->i_flags |= XFS_INEW;
     392   566622036 :         xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
     393             :                         XFS_ICI_RECLAIM_TAG);
     394   566617642 :         inode->i_state = I_NEW;
     395   566617642 :         spin_unlock(&ip->i_flags_lock);
     396   566620484 :         spin_unlock(&pag->pag_ici_lock);
     397             : 
     398   566620484 :         return 0;
     399             : }
     400             : 
     401             : /*
     402             :  * If we are allocating a new inode, then check what was returned is
     403             :  * actually a free, empty inode. If we are not allocating an inode,
     404             :  * then check we didn't find a free inode.
     405             :  *
     406             :  * Returns:
     407             :  *      0               if the inode free state matches the lookup context
     408             :  *      -ENOENT         if the inode is free and we are not allocating
     409             :  *      -EFSCORRUPTED   if there is any state mismatch at all
     410             :  */
     411             : static int
     412 75937587329 : xfs_iget_check_free_state(
     413             :         struct xfs_inode        *ip,
     414             :         int                     flags)
     415             : {
     416 75937587329 :         if (flags & XFS_IGET_CREATE) {
     417             :                 /* should be a free inode */
     418    64359267 :                 if (VFS_I(ip)->i_mode != 0) {
     419           0 :                         xfs_warn(ip->i_mount,
     420             : "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
     421             :                                 ip->i_ino, VFS_I(ip)->i_mode);
     422           0 :                         xfs_agno_mark_sick(ip->i_mount,
     423           0 :                                         XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
     424             :                                         XFS_SICK_AG_INOBT);
     425           0 :                         return -EFSCORRUPTED;
     426             :                 }
     427             : 
     428    64359267 :                 if (ip->i_nblocks != 0) {
     429           0 :                         xfs_warn(ip->i_mount,
     430             : "Corruption detected! Free inode 0x%llx has blocks allocated!",
     431             :                                 ip->i_ino);
     432           0 :                         xfs_agno_mark_sick(ip->i_mount,
     433           0 :                                         XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
     434             :                                         XFS_SICK_AG_INOBT);
     435           0 :                         return -EFSCORRUPTED;
     436             :                 }
     437             :                 return 0;
     438             :         }
     439             : 
     440             :         /* should be an allocated inode */
     441 75873228062 :         if (VFS_I(ip)->i_mode == 0)
     442     2166487 :                 return -ENOENT;
     443             : 
     444             :         return 0;
     445             : }
     446             : 
     447             : /* Make all pending inactivation work start immediately. */
     448             : static bool
     449    14414572 : xfs_inodegc_queue_all(
     450             :         struct xfs_mount        *mp)
     451             : {
     452    14414572 :         struct xfs_inodegc      *gc;
     453    14414572 :         int                     cpu;
     454    14414572 :         bool                    ret = false;
     455             : 
     456    43222279 :         for_each_online_cpu(cpu) {
     457    28807286 :                 gc = per_cpu_ptr(mp->m_inodegc, cpu);
     458    28807286 :                 if (!llist_empty(&gc->list)) {
     459      877940 :                         mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
     460      877940 :                         ret = true;
     461             :                 }
     462             :         }
     463             : 
     464    14403065 :         return ret;
     465             : }
     466             : 
     467             : /* Wait for all queued work and collect errors */
     468             : static int
     469     5055050 : xfs_inodegc_wait_all(
     470             :         struct xfs_mount        *mp)
     471             : {
     472     5055050 :         int                     cpu;
     473     5055050 :         int                     error = 0;
     474             : 
     475     5055050 :         flush_workqueue(mp->m_inodegc_wq);
     476    20221793 :         for_each_online_cpu(cpu) {
     477    10110717 :                 struct xfs_inodegc      *gc;
     478             : 
     479    10110717 :                 gc = per_cpu_ptr(mp->m_inodegc, cpu);
     480    10110717 :                 if (gc->error && !error)
     481        1707 :                         error = gc->error;
     482    10110717 :                 gc->error = 0;
     483             :         }
     484             : 
     485     5056039 :         return error;
     486             : }
     487             : 
     488             : /*
     489             :  * Check the validity of the inode we just found it the cache
     490             :  */
     491             : static int
     492 75249629947 : xfs_iget_cache_hit(
     493             :         struct xfs_perag        *pag,
     494             :         struct xfs_inode        *ip,
     495             :         xfs_ino_t               ino,
     496             :         int                     flags,
     497             :         int                     lock_flags) __releases(RCU)
     498             : {
     499 75249629947 :         struct inode            *inode = VFS_I(ip);
     500 75249629947 :         struct xfs_mount        *mp = ip->i_mount;
     501 75249629947 :         int                     error;
     502             : 
     503             :         /*
     504             :          * check for re-use of an inode within an RCU grace period due to the
     505             :          * radix tree nodes not being updated yet. We monitor for this by
     506             :          * setting the inode number to zero before freeing the inode structure.
     507             :          * If the inode has been reallocated and set up, then the inode number
     508             :          * will not match, so check for that, too.
     509             :          */
     510 75249629947 :         spin_lock(&ip->i_flags_lock);
     511 75504923126 :         if (ip->i_ino != ino)
     512         483 :                 goto out_skip;
     513             : 
     514             :         /*
     515             :          * If we are racing with another cache hit that is currently
     516             :          * instantiating this inode or currently recycling it out of
     517             :          * reclaimable state, wait for the initialisation to complete
     518             :          * before continuing.
     519             :          *
     520             :          * If we're racing with the inactivation worker we also want to wait.
     521             :          * If we're creating a new file, it's possible that the worker
     522             :          * previously marked the inode as free on disk but hasn't finished
     523             :          * updating the incore state yet.  The AGI buffer will be dirty and
     524             :          * locked to the icreate transaction, so a synchronous push of the
     525             :          * inodegc workers would result in deadlock.  For a regular iget, the
     526             :          * worker is running already, so we might as well wait.
     527             :          *
     528             :          * XXX(hch): eventually we should do something equivalent to
     529             :          *           wait_on_inode to wait for these flags to be cleared
     530             :          *           instead of polling for it.
     531             :          */
     532 75504922643 :         if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
     533      246532 :                 goto out_skip;
     534             : 
     535 75504676111 :         if (ip->i_flags & XFS_NEED_INACTIVE) {
     536             :                 /* Unlinked inodes cannot be re-grabbed. */
     537     1223008 :                 if (VFS_I(ip)->i_nlink == 0) {
     538     1181081 :                         error = -ENOENT;
     539     1181081 :                         goto out_error;
     540             :                 }
     541       41927 :                 goto out_inodegc_flush;
     542             :         }
     543             : 
     544             :         /*
     545             :          * Check the inode free state is valid. This also detects lookup
     546             :          * racing with unlinks.
     547             :          */
     548 75503453103 :         error = xfs_iget_check_free_state(ip, flags);
     549 75443297507 :         if (error)
     550     2166481 :                 goto out_error;
     551             : 
     552             :         /* Skip inodes that have no vfs state. */
     553 75441131026 :         if ((flags & XFS_IGET_INCORE) &&
     554           0 :             (ip->i_flags & XFS_IRECLAIMABLE))
     555           0 :                 goto out_skip;
     556             : 
     557             :         /* The inode fits the selection criteria; process it. */
     558 75441131026 :         if (ip->i_flags & XFS_IRECLAIMABLE) {
     559             :                 /* Drops i_flags_lock and RCU read lock. */
     560   566591273 :                 error = xfs_iget_recycle(pag, ip);
     561   566596195 :                 if (error == -EAGAIN)
     562           3 :                         goto out_skip;
     563   566596192 :                 if (error)
     564             :                         return error;
     565             :         } else {
     566             :                 /* If the VFS inode is being torn down, pause and try again. */
     567 74874539753 :                 if (!igrab(inode))
     568      167523 :                         goto out_skip;
     569             : 
     570             :                 /* We've got a live one. */
     571 74935722757 :                 spin_unlock(&ip->i_flags_lock);
     572 75003332008 :                 rcu_read_unlock();
     573 74895947582 :                 trace_xfs_iget_hit(ip);
     574             :         }
     575             : 
     576 75348648738 :         if (lock_flags != 0)
     577 68626264449 :                 xfs_ilock(ip, lock_flags);
     578             : 
     579 75311719751 :         if (!(flags & XFS_IGET_INCORE))
     580 75434709205 :                 xfs_iflags_clear(ip, XFS_ISTALE);
     581 75458257560 :         XFS_STATS_INC(mp, xs_ig_found);
     582             : 
     583 75458257560 :         return 0;
     584             : 
     585      414541 : out_skip:
     586      414541 :         trace_xfs_iget_skip(ip);
     587      414541 :         XFS_STATS_INC(mp, xs_ig_frecycle);
     588      414541 :         error = -EAGAIN;
     589     3762103 : out_error:
     590     3762103 :         spin_unlock(&ip->i_flags_lock);
     591     3762097 :         rcu_read_unlock();
     592     3762097 :         return error;
     593             : 
     594             : out_inodegc_flush:
     595       41927 :         spin_unlock(&ip->i_flags_lock);
     596       41927 :         rcu_read_unlock();
     597             :         /*
     598             :          * Do not wait for the workers, because the caller could hold an AGI
     599             :          * buffer lock.  We're just going to sleep in a loop anyway.
     600             :          */
     601       83854 :         if (xfs_is_inodegc_enabled(mp))
     602       41908 :                 xfs_inodegc_queue_all(mp);
     603             :         return -EAGAIN;
     604             : }
     605             : 
     606             : static int
     607   509843350 : xfs_iget_cache_miss(
     608             :         struct xfs_mount        *mp,
     609             :         struct xfs_perag        *pag,
     610             :         xfs_trans_t             *tp,
     611             :         xfs_ino_t               ino,
     612             :         struct xfs_inode        **ipp,
     613             :         int                     flags,
     614             :         int                     lock_flags)
     615             : {
     616   509843350 :         struct xfs_inode        *ip;
     617   509843350 :         int                     error;
     618   509843350 :         xfs_agino_t             agino = XFS_INO_TO_AGINO(mp, ino);
     619   509843350 :         int                     iflags;
     620             : 
     621   509843350 :         ip = xfs_inode_alloc(mp, ino);
     622   509844046 :         if (!ip)
     623             :                 return -ENOMEM;
     624             : 
     625   509844046 :         error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
     626   509858883 :         if (error)
     627       67993 :                 goto out_destroy;
     628             : 
     629             :         /*
     630             :          * For version 5 superblocks, if we are initialising a new inode and we
     631             :          * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
     632             :          * simply build the new inode core with a random generation number.
     633             :          *
     634             :          * For version 4 (and older) superblocks, log recovery is dependent on
     635             :          * the i_flushiter field being initialised from the current on-disk
     636             :          * value and hence we must also read the inode off disk even when
     637             :          * initializing new inodes.
     638             :          */
     639   509790890 :         if (xfs_has_v3inodes(mp) &&
     640   509790472 :             (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
     641    30450933 :                 VFS_I(ip)->i_generation = get_random_u32();
     642             :         } else {
     643   479339957 :                 struct xfs_buf          *bp;
     644             : 
     645   479339957 :                 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
     646   479354723 :                 if (error)
     647        4413 :                         goto out_destroy;
     648             : 
     649   479355787 :                 error = xfs_inode_from_disk(ip,
     650   479350512 :                                 xfs_buf_offset(bp, ip->i_imap.im_boffset));
     651   479345275 :                 if (!error)
     652   479345073 :                         xfs_buf_set_ref(bp, XFS_INO_REF);
     653             :                 else
     654         202 :                         xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
     655   479349280 :                 xfs_trans_brelse(tp, bp);
     656             : 
     657   479354234 :                 if (error)
     658         202 :                         goto out_destroy;
     659             :         }
     660             : 
     661   509803928 :         trace_xfs_iget_miss(ip);
     662             : 
     663             :         /*
     664             :          * Check the inode free state is valid. This also detects lookup
     665             :          * racing with unlinks.
     666             :          */
     667   509787069 :         error = xfs_iget_check_free_state(ip, flags);
     668   509801609 :         if (error)
     669           0 :                 goto out_destroy;
     670             : 
     671             :         /*
     672             :          * Preload the radix tree so we can insert safely under the
     673             :          * write spinlock. Note that we cannot sleep inside the preload
     674             :          * region. Since we can be called from transaction context, don't
     675             :          * recurse into the file system.
     676             :          */
     677   509801609 :         if (radix_tree_preload(GFP_NOFS)) {
     678           0 :                 error = -EAGAIN;
     679           0 :                 goto out_destroy;
     680             :         }
     681             : 
     682             :         /*
     683             :          * Because the inode hasn't been added to the radix-tree yet it can't
     684             :          * be found by another thread, so we can do the non-sleeping lock here.
     685             :          */
     686   509805825 :         if (lock_flags) {
     687   496642449 :                 if (!xfs_ilock_nowait(ip, lock_flags))
     688           0 :                         BUG();
     689             :         }
     690             : 
     691             :         /*
     692             :          * These values must be set before inserting the inode into the radix
     693             :          * tree as the moment it is inserted a concurrent lookup (allowed by the
     694             :          * RCU locking mechanism) can find it and that lookup must see that this
     695             :          * is an inode currently under construction (i.e. that XFS_INEW is set).
     696             :          * The ip->i_flags_lock that protects the XFS_INEW flag forms the
     697             :          * memory barrier that ensures this detection works correctly at lookup
     698             :          * time.
     699             :          */
     700   509807843 :         iflags = XFS_INEW;
     701   509807843 :         if (flags & XFS_IGET_DONTCACHE)
     702   466227194 :                 d_mark_dontcache(VFS_I(ip));
     703   509808448 :         ip->i_udquot = NULL;
     704   509808448 :         ip->i_gdquot = NULL;
     705   509808448 :         ip->i_pdquot = NULL;
     706   509808448 :         xfs_iflags_set(ip, iflags);
     707             : 
     708             :         /* insert the new inode */
     709   509809483 :         spin_lock(&pag->pag_ici_lock);
     710   509810350 :         error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
     711   509808763 :         if (unlikely(error)) {
     712      720768 :                 WARN_ON(error != -EEXIST);
     713      720768 :                 XFS_STATS_INC(mp, xs_ig_dup);
     714      720768 :                 error = -EAGAIN;
     715      720768 :                 goto out_preload_end;
     716             :         }
     717   509087995 :         spin_unlock(&pag->pag_ici_lock);
     718   509082296 :         radix_tree_preload_end();
     719             : 
     720   509086839 :         *ipp = ip;
     721   509086839 :         return 0;
     722             : 
     723             : out_preload_end:
     724      720768 :         spin_unlock(&pag->pag_ici_lock);
     725      720767 :         radix_tree_preload_end();
     726      720764 :         if (lock_flags)
     727      720108 :                 xfs_iunlock(ip, lock_flags);
     728         656 : out_destroy:
     729      793174 :         __destroy_inode(VFS_I(ip));
     730      793172 :         xfs_inode_free(ip);
     731      793172 :         return error;
     732             : }
     733             : 
     734             : /*
     735             :  * Look up an inode by number in the given file system.  The inode is looked up
     736             :  * in the cache held in each AG.  If the inode is found in the cache, initialise
     737             :  * the vfs inode if necessary.
     738             :  *
     739             :  * If it is not in core, read it in from the file system's device, add it to the
     740             :  * cache and initialise the vfs inode.
     741             :  *
     742             :  * The inode is locked according to the value of the lock_flags parameter.
     743             :  * Inode lookup is only done during metadata operations and not as part of the
     744             :  * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
     745             :  */
     746             : int
     747 75508942380 : xfs_iget(
     748             :         struct xfs_mount        *mp,
     749             :         struct xfs_trans        *tp,
     750             :         xfs_ino_t               ino,
     751             :         uint                    flags,
     752             :         uint                    lock_flags,
     753             :         struct xfs_inode        **ipp)
     754             : {
     755 75508942380 :         struct xfs_inode        *ip;
     756 75508942380 :         struct xfs_perag        *pag;
     757 75508942380 :         xfs_agino_t             agino;
     758 75508942380 :         int                     error;
     759             : 
     760 75508942380 :         ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
     761             : 
     762             :         /* reject inode numbers outside existing AGs */
     763 75508942380 :         if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
     764             :                 return -EINVAL;
     765             : 
     766 75508109737 :         XFS_STATS_INC(mp, xs_ig_attempts);
     767             : 
     768             :         /* get the perag structure and ensure that it's inode capable */
     769 75508109737 :         pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
     770 75736324075 :         agino = XFS_INO_TO_AGINO(mp, ino);
     771             : 
     772 75737480838 : again:
     773 75737480838 :         error = 0;
     774 75737480838 :         rcu_read_lock();
     775 75997237068 :         ip = radix_tree_lookup(&pag->pag_ici_root, agino);
     776             : 
     777 75687303508 :         if (ip) {
     778 75177463330 :                 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
     779 75311430637 :                 if (error)
     780     3804011 :                         goto out_error_or_again;
     781             :         } else {
     782   509840178 :                 rcu_read_unlock();
     783   509852798 :                 if (flags & XFS_IGET_INCORE) {
     784           0 :                         error = -ENODATA;
     785           0 :                         goto out_error_or_again;
     786             :                 }
     787   509852798 :                 XFS_STATS_INC(mp, xs_ig_missed);
     788             : 
     789   509852798 :                 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
     790             :                                                         flags, lock_flags);
     791   509875297 :                 if (error)
     792      793162 :                         goto out_error_or_again;
     793             :         }
     794 75816708761 :         xfs_perag_put(pag);
     795             : 
     796 76002559562 :         *ipp = ip;
     797             : 
     798             :         /*
     799             :          * If we have a real type for an on-disk inode, we can setup the inode
     800             :          * now.  If it's a new inode being created, xfs_init_new_inode will
     801             :          * handle it.
     802             :          */
     803 >15191*10^7 :         if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
     804  1009657665 :                 xfs_setup_existing_inode(ip);
     805             :         return 0;
     806             : 
     807     4597173 : out_error_or_again:
     808     4597173 :         if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
     809             :             error == -EAGAIN) {
     810     1157066 :                 delay(1);
     811     1156763 :                 goto again;
     812             :         }
     813     3440107 :         xfs_perag_put(pag);
     814     3440107 :         return error;
     815             : }
     816             : 
     817             : /* Get a metadata inode.  The ftype must match exactly. */
     818             : int
     819      385920 : xfs_imeta_iget(
     820             :         struct xfs_mount        *mp,
     821             :         xfs_ino_t               ino,
     822             :         unsigned char           ftype,
     823             :         struct xfs_inode        **ipp)
     824             : {
     825      385920 :         struct xfs_inode        *ip;
     826      385920 :         int                     error;
     827             : 
     828      385920 :         ASSERT(ftype != XFS_DIR3_FT_UNKNOWN);
     829             : 
     830      385920 :         error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip);
     831      385920 :         if (error == -EFSCORRUPTED)
     832           4 :                 goto whine;
     833      385916 :         if (error)
     834             :                 return error;
     835             : 
     836      385916 :         if (VFS_I(ip)->i_nlink == 0)
     837           0 :                 goto bad_rele;
     838      385916 :         if (xfs_mode_to_ftype(VFS_I(ip)->i_mode) != ftype)
     839           0 :                 goto bad_rele;
     840      385916 :         if (xfs_has_metadir(mp) && !xfs_is_metadir_inode(ip))
     841           0 :                 goto bad_rele;
     842             : 
     843      385916 :         *ipp = ip;
     844      385916 :         return 0;
     845           0 : bad_rele:
     846           0 :         xfs_irele(ip);
     847           4 : whine:
     848           4 :         xfs_err(mp, "metadata inode 0x%llx is corrupt", ino);
     849           4 :         xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR);
     850           4 :         return -EFSCORRUPTED;
     851             : }
     852             : 
     853             : /*
     854             :  * Grab the inode for reclaim exclusively.
     855             :  *
     856             :  * We have found this inode via a lookup under RCU, so the inode may have
     857             :  * already been freed, or it may be in the process of being recycled by
     858             :  * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
     859             :  * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
     860             :  * will not be set. Hence we need to check for both these flag conditions to
     861             :  * avoid inodes that are no longer reclaim candidates.
     862             :  *
     863             :  * Note: checking for other state flags here, under the i_flags_lock or not, is
     864             :  * racy and should be avoided. Those races should be resolved only after we have
     865             :  * ensured that we are able to reclaim this inode and the world can see that we
     866             :  * are going to reclaim it.
     867             :  *
     868             :  * Return true if we grabbed it, false otherwise.
     869             :  */
     870             : static bool
     871   529344291 : xfs_reclaim_igrab(
     872             :         struct xfs_inode        *ip,
     873             :         struct xfs_icwalk       *icw)
     874             : {
     875   529344291 :         ASSERT(rcu_read_lock_held());
     876             : 
     877   529344291 :         spin_lock(&ip->i_flags_lock);
     878   529353772 :         if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
     879             :             __xfs_iflags_test(ip, XFS_IRECLAIM)) {
     880             :                 /* not a reclaim candidate. */
     881       80174 :                 spin_unlock(&ip->i_flags_lock);
     882       80174 :                 return false;
     883             :         }
     884             : 
     885             :         /* Don't reclaim a sick inode unless the caller asked for it. */
     886   529273598 :         if (ip->i_sick &&
     887          32 :             (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
     888           0 :                 spin_unlock(&ip->i_flags_lock);
     889           0 :                 return false;
     890             :         }
     891             : 
     892   529273598 :         __xfs_iflags_set(ip, XFS_IRECLAIM);
     893   529273598 :         spin_unlock(&ip->i_flags_lock);
     894   529273598 :         return true;
     895             : }
     896             : 
     897             : /*
     898             :  * Inode reclaim is non-blocking, so the default action if progress cannot be
     899             :  * made is to "requeue" the inode for reclaim by unlocking it and clearing the
     900             :  * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
     901             :  * blocking anymore and hence we can wait for the inode to be able to reclaim
     902             :  * it.
     903             :  *
     904             :  * We do no IO here - if callers require inodes to be cleaned they must push the
     905             :  * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
     906             :  * done in the background in a non-blocking manner, and enables memory reclaim
     907             :  * to make progress without blocking.
     908             :  */
     909             : static void
     910   529278523 : xfs_reclaim_inode(
     911             :         struct xfs_inode        *ip,
     912             :         struct xfs_perag        *pag)
     913             : {
     914   529278523 :         xfs_ino_t               ino = ip->i_ino; /* for radix_tree_delete */
     915             : 
     916   529278523 :         if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
     917           5 :                 goto out;
     918   529279984 :         if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
     919    12446997 :                 goto out_iunlock;
     920             : 
     921             :         /*
     922             :          * Check for log shutdown because aborting the inode can move the log
     923             :          * tail and corrupt in memory state. This is fine if the log is shut
     924             :          * down, but if the log is still active and only the mount is shut down
     925             :          * then the in-memory log tail movement caused by the abort can be
     926             :          * incorrectly propagated to disk.
     927             :          */
     928  1033659418 :         if (xlog_is_shutdown(ip->i_mount->m_log)) {
     929   305761830 :                 xfs_iunpin_wait(ip);
     930   305761830 :                 xfs_iflush_shutdown_abort(ip);
     931   305761830 :                 goto reclaim;
     932             :         }
     933   211067879 :         if (xfs_ipincount(ip))
     934     1531340 :                 goto out_clear_flush;
     935   209536539 :         if (!xfs_inode_clean(ip))
     936     6212954 :                 goto out_clear_flush;
     937             : 
     938   203323585 :         xfs_iflags_clear(ip, XFS_IFLUSHING);
     939   509091974 : reclaim:
     940   509091974 :         trace_xfs_inode_reclaiming(ip);
     941             : 
     942             :         /*
     943             :          * Because we use RCU freeing we need to ensure the inode always appears
     944             :          * to be reclaimed with an invalid inode number when in the free state.
     945             :          * We do this as early as possible under the ILOCK so that
     946             :          * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
     947             :          * detect races with us here. By doing this, we guarantee that once
     948             :          * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
     949             :          * it will see either a valid inode that will serialise correctly, or it
     950             :          * will see an invalid inode that it can skip.
     951             :          */
     952   509091941 :         spin_lock(&ip->i_flags_lock);
     953   509092515 :         ip->i_flags = XFS_IRECLAIM;
     954   509092515 :         ip->i_ino = 0;
     955   509092515 :         ip->i_sick = 0;
     956   509092515 :         ip->i_checked = 0;
     957   509092515 :         spin_unlock(&ip->i_flags_lock);
     958             : 
     959   509092394 :         ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
     960   509092394 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     961             : 
     962   509079841 :         XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
     963             :         /*
     964             :          * Remove the inode from the per-AG radix tree.
     965             :          *
     966             :          * Because radix_tree_delete won't complain even if the item was never
     967             :          * added to the tree assert that it's been there before to catch
     968             :          * problems with the inode life time early on.
     969             :          */
     970   509079841 :         spin_lock(&pag->pag_ici_lock);
     971  1018185750 :         if (!xfs_is_shutdown(pag->pag_mount)) {
     972             :                 /* had better not be on any unlinked list! */
     973   203330964 :                 ASSERT(!xfs_inode_on_unlinked_list(ip));
     974   203330964 :                 if (xfs_inode_on_unlinked_list(ip))
     975           0 :                         xfs_emerg(pag->pag_mount, "IUNLINK ino 0x%llx nlink %u mode 0o%o prevun 0x%x nextun 0x%x", ino, VFS_I(ip)->i_nlink, VFS_I(ip)->i_mode, ip->i_prev_unlinked, ip->i_next_unlinked);
     976             :         }
     977   509092662 :         if (!radix_tree_delete(&pag->pag_ici_root,
     978   509092875 :                                 XFS_INO_TO_AGINO(ip->i_mount, ino)))
     979           0 :                 ASSERT(0);
     980   509092662 :         xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
     981   509092755 :         spin_unlock(&pag->pag_ici_lock);
     982             : 
     983             :         /*
     984             :          * Here we do an (almost) spurious inode lock in order to coordinate
     985             :          * with inode cache radix tree lookups.  This is because the lookup
     986             :          * can reference the inodes in the cache without taking references.
     987             :          *
     988             :          * We make that OK here by ensuring that we wait until the inode is
     989             :          * unlocked after the lookup before we go ahead and free it.
     990             :          */
     991   509092898 :         xfs_ilock(ip, XFS_ILOCK_EXCL);
     992   509092838 :         ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
     993   509092838 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     994   564257913 :         ASSERT(xfs_inode_clean(ip));
     995             : 
     996   509092901 :         __xfs_inode_free(ip);
     997   509092901 :         return;
     998             : 
     999     7744294 : out_clear_flush:
    1000     7744294 :         xfs_iflags_clear(ip, XFS_IFLUSHING);
    1001    20191305 : out_iunlock:
    1002    20191305 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1003    20191193 : out:
    1004    20191193 :         xfs_iflags_clear(ip, XFS_IRECLAIM);
    1005             : }
    1006             : 
    1007             : /* Reclaim sick inodes if we're unmounting or the fs went down. */
    1008             : static inline bool
    1009      545189 : xfs_want_reclaim_sick(
    1010             :         struct xfs_mount        *mp)
    1011             : {
    1012     1611236 :         return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
    1013             :                xfs_is_shutdown(mp);
    1014             : }
    1015             : 
    1016             : void
    1017       24331 : xfs_reclaim_inodes(
    1018             :         struct xfs_mount        *mp)
    1019             : {
    1020       24331 :         struct xfs_icwalk       icw = {
    1021             :                 .icw_flags      = 0,
    1022             :         };
    1023             : 
    1024       24331 :         if (xfs_want_reclaim_sick(mp))
    1025       24331 :                 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
    1026             : 
    1027       64692 :         while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
    1028       40361 :                 xfs_ail_push_all_sync(mp->m_ail);
    1029       40361 :                 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
    1030             :         }
    1031       24331 : }
    1032             : 
    1033             : /*
    1034             :  * The shrinker infrastructure determines how many inodes we should scan for
    1035             :  * reclaim. We want as many clean inodes ready to reclaim as possible, so we
    1036             :  * push the AIL here. We also want to proactively free up memory if we can to
    1037             :  * minimise the amount of work memory reclaim has to do so we kick the
    1038             :  * background reclaim if it isn't already scheduled.
    1039             :  */
    1040             : long
    1041      520858 : xfs_reclaim_inodes_nr(
    1042             :         struct xfs_mount        *mp,
    1043             :         unsigned long           nr_to_scan)
    1044             : {
    1045      520858 :         struct xfs_icwalk       icw = {
    1046             :                 .icw_flags      = XFS_ICWALK_FLAG_SCAN_LIMIT,
    1047      520858 :                 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
    1048             :         };
    1049             : 
    1050      520858 :         if (xfs_want_reclaim_sick(mp))
    1051           2 :                 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
    1052             : 
    1053             :         /* kick background reclaimer and push the AIL */
    1054      520858 :         xfs_reclaim_work_queue(mp);
    1055      520858 :         xfs_ail_push_all(mp->m_ail);
    1056             : 
    1057      520858 :         xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
    1058      520858 :         return 0;
    1059             : }
    1060             : 
    1061             : /*
    1062             :  * Return the number of reclaimable inodes in the filesystem for
    1063             :  * the shrinker to determine how much to reclaim.
    1064             :  */
    1065             : long
    1066     1035540 : xfs_reclaim_inodes_count(
    1067             :         struct xfs_mount        *mp)
    1068             : {
    1069     1035540 :         struct xfs_perag        *pag;
    1070     1035540 :         xfs_agnumber_t          ag = 0;
    1071     1035540 :         long                    reclaimable = 0;
    1072             : 
    1073     3474045 :         while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
    1074     2438504 :                 ag = pag->pag_agno + 1;
    1075     2438504 :                 reclaimable += pag->pag_ici_reclaimable;
    1076     2438504 :                 xfs_perag_put(pag);
    1077             :         }
    1078     1035541 :         return reclaimable;
    1079             : }
    1080             : 
    1081             : STATIC bool
    1082      110851 : xfs_icwalk_match_id(
    1083             :         struct xfs_inode        *ip,
    1084             :         struct xfs_icwalk       *icw)
    1085             : {
    1086      110851 :         if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
    1087             :             !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
    1088             :                 return false;
    1089             : 
    1090      110851 :         if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
    1091             :             !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
    1092             :                 return false;
    1093             : 
    1094      110851 :         if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
    1095           0 :             ip->i_projid != icw->icw_prid)
    1096           0 :                 return false;
    1097             : 
    1098             :         return true;
    1099             : }
    1100             : 
    1101             : /*
    1102             :  * A union-based inode filtering algorithm. Process the inode if any of the
    1103             :  * criteria match. This is for global/internal scans only.
    1104             :  */
    1105             : STATIC bool
    1106        1508 : xfs_icwalk_match_id_union(
    1107             :         struct xfs_inode        *ip,
    1108             :         struct xfs_icwalk       *icw)
    1109             : {
    1110        1508 :         if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
    1111             :             uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
    1112             :                 return true;
    1113             : 
    1114         513 :         if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
    1115             :             gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
    1116             :                 return true;
    1117             : 
    1118           0 :         if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
    1119           0 :             ip->i_projid == icw->icw_prid)
    1120           0 :                 return true;
    1121             : 
    1122             :         return false;
    1123             : }
    1124             : 
    1125             : /*
    1126             :  * Is this inode @ip eligible for eof/cow block reclamation, given some
    1127             :  * filtering parameters @icw?  The inode is eligible if @icw is null or
    1128             :  * if the predicate functions match.
    1129             :  */
    1130             : static bool
    1131      814159 : xfs_icwalk_match(
    1132             :         struct xfs_inode        *ip,
    1133             :         struct xfs_icwalk       *icw)
    1134             : {
    1135      814159 :         bool                    match;
    1136             : 
    1137      814159 :         if (!icw)
    1138             :                 return true;
    1139             : 
    1140      112377 :         if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
    1141        1508 :                 match = xfs_icwalk_match_id_union(ip, icw);
    1142             :         else
    1143      110869 :                 match = xfs_icwalk_match_id(ip, icw);
    1144      112377 :         if (!match)
    1145             :                 return false;
    1146             : 
    1147             :         /* skip the inode if the file size is too small */
    1148      112388 :         if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
    1149           0 :             XFS_ISIZE(ip) < icw->icw_min_file_size)
    1150           0 :                 return false;
    1151             : 
    1152             :         return true;
    1153             : }
    1154             : 
    1155             : /*
    1156             :  * This is a fast pass over the inode cache to try to get reclaim moving on as
    1157             :  * many inodes as possible in a short period of time. It kicks itself every few
    1158             :  * seconds, as well as being kicked by the inode cache shrinker when memory
    1159             :  * goes low.
    1160             :  */
    1161             : void
    1162       37888 : xfs_reclaim_worker(
    1163             :         struct work_struct *work)
    1164             : {
    1165       37888 :         struct xfs_mount *mp = container_of(to_delayed_work(work),
    1166             :                                         struct xfs_mount, m_reclaim_work);
    1167             : 
    1168       37888 :         xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
    1169       37888 :         xfs_reclaim_work_queue(mp);
    1170       37888 : }
    1171             : 
    1172             : STATIC int
    1173     1381457 : xfs_inode_free_eofblocks(
    1174             :         struct xfs_inode        *ip,
    1175             :         struct xfs_icwalk       *icw,
    1176             :         unsigned int            *lockflags)
    1177             : {
    1178     1381457 :         bool                    wait;
    1179             : 
    1180     1381457 :         wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
    1181             : 
    1182     2762761 :         if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
    1183             :                 return 0;
    1184             : 
    1185             :         /*
    1186             :          * If the mapping is dirty the operation can block and wait for some
    1187             :          * time. Unless we are waiting, skip it.
    1188             :          */
    1189     1205517 :         if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
    1190             :                 return 0;
    1191             : 
    1192      663077 :         if (!xfs_icwalk_match(ip, icw))
    1193             :                 return 0;
    1194             : 
    1195             :         /*
    1196             :          * If the caller is waiting, return -EAGAIN to keep the background
    1197             :          * scanner moving and revisit the inode in a subsequent pass.
    1198             :          */
    1199      663077 :         if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
    1200      106413 :                 if (wait)
    1201             :                         return -EAGAIN;
    1202       33910 :                 return 0;
    1203             :         }
    1204      556840 :         *lockflags |= XFS_IOLOCK_EXCL;
    1205             : 
    1206      556840 :         if (xfs_can_free_eofblocks(ip, false))
    1207      186403 :                 return xfs_free_eofblocks(ip);
    1208             : 
    1209             :         /* inode could be preallocated or append-only */
    1210      370437 :         trace_xfs_inode_free_eofblocks_invalid(ip);
    1211      370432 :         xfs_inode_clear_eofblocks_tag(ip);
    1212      370432 :         return 0;
    1213             : }
    1214             : 
    1215             : static void
    1216     4197407 : xfs_blockgc_set_iflag(
    1217             :         struct xfs_inode        *ip,
    1218             :         unsigned long           iflag)
    1219             : {
    1220     4197407 :         struct xfs_mount        *mp = ip->i_mount;
    1221     4197407 :         struct xfs_perag        *pag;
    1222             : 
    1223     4197407 :         ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
    1224             : 
    1225             :         /*
    1226             :          * Don't bother locking the AG and looking up in the radix trees
    1227             :          * if we already know that we have the tag set.
    1228             :          */
    1229     4197407 :         if (ip->i_flags & iflag)
    1230             :                 return;
    1231     2919447 :         spin_lock(&ip->i_flags_lock);
    1232     2919434 :         ip->i_flags |= iflag;
    1233     2919434 :         spin_unlock(&ip->i_flags_lock);
    1234             : 
    1235     2919424 :         pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
    1236     2919421 :         spin_lock(&pag->pag_ici_lock);
    1237             : 
    1238     2919438 :         xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
    1239             :                         XFS_ICI_BLOCKGC_TAG);
    1240             : 
    1241     2919443 :         spin_unlock(&pag->pag_ici_lock);
    1242     2919445 :         xfs_perag_put(pag);
    1243             : }
    1244             : 
    1245             : void
    1246     3409799 : xfs_inode_set_eofblocks_tag(
    1247             :         xfs_inode_t     *ip)
    1248             : {
    1249     3409799 :         trace_xfs_inode_set_eofblocks_tag(ip);
    1250     3409769 :         return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
    1251             : }
    1252             : 
    1253             : static void
    1254    15481560 : xfs_blockgc_clear_iflag(
    1255             :         struct xfs_inode        *ip,
    1256             :         unsigned long           iflag)
    1257             : {
    1258    15481560 :         struct xfs_mount        *mp = ip->i_mount;
    1259    15481560 :         struct xfs_perag        *pag;
    1260    15481560 :         bool                    clear_tag;
    1261             : 
    1262    15481560 :         ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
    1263             : 
    1264    15481560 :         spin_lock(&ip->i_flags_lock);
    1265    15481784 :         ip->i_flags &= ~iflag;
    1266    15481784 :         clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
    1267    15481784 :         spin_unlock(&ip->i_flags_lock);
    1268             : 
    1269    15480746 :         if (!clear_tag)
    1270             :                 return;
    1271             : 
    1272    13634851 :         pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
    1273    13634791 :         spin_lock(&pag->pag_ici_lock);
    1274             : 
    1275    13634986 :         xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
    1276             :                         XFS_ICI_BLOCKGC_TAG);
    1277             : 
    1278    13635776 :         spin_unlock(&pag->pag_ici_lock);
    1279    13635560 :         xfs_perag_put(pag);
    1280             : }
    1281             : 
    1282             : void
    1283     9610828 : xfs_inode_clear_eofblocks_tag(
    1284             :         xfs_inode_t     *ip)
    1285             : {
    1286     9610828 :         trace_xfs_inode_clear_eofblocks_tag(ip);
    1287     9610902 :         return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
    1288             : }
    1289             : 
    1290             : /*
    1291             :  * Set ourselves up to free CoW blocks from this file.  If it's already clean
    1292             :  * then we can bail out quickly, but otherwise we must back off if the file
    1293             :  * is undergoing some kind of write.
    1294             :  */
    1295             : static bool
    1296      328142 : xfs_prep_free_cowblocks(
    1297             :         struct xfs_inode        *ip)
    1298             : {
    1299             :         /*
    1300             :          * Just clear the tag if we have an empty cow fork or none at all. It's
    1301             :          * possible the inode was fully unshared since it was originally tagged.
    1302             :          */
    1303      656284 :         if (!xfs_inode_has_cow_data(ip)) {
    1304       20028 :                 trace_xfs_inode_free_cowblocks_invalid(ip);
    1305       20028 :                 xfs_inode_clear_cowblocks_tag(ip);
    1306       20028 :                 return false;
    1307             :         }
    1308             : 
    1309             :         /*
    1310             :          * If the mapping is dirty or under writeback we cannot touch the
    1311             :          * CoW fork.  Leave it alone if we're in the midst of a directio.
    1312             :          */
    1313      308114 :         if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
    1314      303093 :             mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
    1315      301323 :             mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
    1316             :             atomic_read(&VFS_I(ip)->i_dio_count))
    1317        6794 :                 return false;
    1318             : 
    1319             :         return true;
    1320             : }
    1321             : 
    1322             : /*
    1323             :  * Automatic CoW Reservation Freeing
    1324             :  *
    1325             :  * These functions automatically garbage collect leftover CoW reservations
    1326             :  * that were made on behalf of a cowextsize hint when we start to run out
    1327             :  * of quota or when the reservations sit around for too long.  If the file
    1328             :  * has dirty pages or is undergoing writeback, its CoW reservations will
    1329             :  * be retained.
    1330             :  *
    1331             :  * The actual garbage collection piggybacks off the same code that runs
    1332             :  * the speculative EOF preallocation garbage collector.
    1333             :  */
    1334             : STATIC int
    1335     1308735 : xfs_inode_free_cowblocks(
    1336             :         struct xfs_inode        *ip,
    1337             :         struct xfs_icwalk       *icw,
    1338             :         unsigned int            *lockflags)
    1339             : {
    1340     1308735 :         bool                    wait;
    1341     1308735 :         int                     ret = 0;
    1342             : 
    1343     1308735 :         wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
    1344             : 
    1345     2617767 :         if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
    1346             :                 return 0;
    1347             : 
    1348      177947 :         if (!xfs_prep_free_cowblocks(ip))
    1349             :                 return 0;
    1350             : 
    1351      151111 :         if (!xfs_icwalk_match(ip, icw))
    1352             :                 return 0;
    1353             : 
    1354             :         /*
    1355             :          * If the caller is waiting, return -EAGAIN to keep the background
    1356             :          * scanner moving and revisit the inode in a subsequent pass.
    1357             :          */
    1358      300224 :         if (!(*lockflags & XFS_IOLOCK_EXCL) &&
    1359      149084 :             !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
    1360         901 :                 if (wait)
    1361             :                         return -EAGAIN;
    1362         406 :                 return 0;
    1363             :         }
    1364      150239 :         *lockflags |= XFS_IOLOCK_EXCL;
    1365             : 
    1366      150239 :         if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
    1367           0 :                 if (wait)
    1368             :                         return -EAGAIN;
    1369           0 :                 return 0;
    1370             :         }
    1371      150222 :         *lockflags |= XFS_MMAPLOCK_EXCL;
    1372             : 
    1373             :         /*
    1374             :          * Check again, nobody else should be able to dirty blocks or change
    1375             :          * the reflink iflag now that we have the first two locks held.
    1376             :          */
    1377      150222 :         if (xfs_prep_free_cowblocks(ip))
    1378      150222 :                 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
    1379             :         return ret;
    1380             : }
    1381             : 
    1382             : void
    1383      787606 : xfs_inode_set_cowblocks_tag(
    1384             :         xfs_inode_t     *ip)
    1385             : {
    1386      787606 :         trace_xfs_inode_set_cowblocks_tag(ip);
    1387      787606 :         return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
    1388             : }
    1389             : 
    1390             : void
    1391     5870686 : xfs_inode_clear_cowblocks_tag(
    1392             :         xfs_inode_t     *ip)
    1393             : {
    1394     5870686 :         trace_xfs_inode_clear_cowblocks_tag(ip);
    1395     5870702 :         return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
    1396             : }
    1397             : 
    1398             : /* Disable post-EOF and CoW block auto-reclamation. */
    1399             : void
    1400       72995 : xfs_blockgc_stop(
    1401             :         struct xfs_mount        *mp)
    1402             : {
    1403       72995 :         struct xfs_perag        *pag;
    1404       72995 :         xfs_agnumber_t          agno;
    1405             : 
    1406       72995 :         if (!xfs_clear_blockgc_enabled(mp))
    1407          14 :                 return;
    1408             : 
    1409      402101 :         for_each_perag(mp, agno, pag)
    1410      329120 :                 cancel_delayed_work_sync(&pag->pag_blockgc_work);
    1411       72981 :         trace_xfs_blockgc_stop(mp, __return_address);
    1412             : }
    1413             : 
    1414             : /* Enable post-EOF and CoW block auto-reclamation. */
    1415             : void
    1416       73013 : xfs_blockgc_start(
    1417             :         struct xfs_mount        *mp)
    1418             : {
    1419       73013 :         struct xfs_perag        *pag;
    1420       73013 :         xfs_agnumber_t          agno;
    1421             : 
    1422       73013 :         if (xfs_set_blockgc_enabled(mp))
    1423             :                 return;
    1424             : 
    1425       73009 :         trace_xfs_blockgc_start(mp, __return_address);
    1426      241127 :         for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
    1427      168118 :                 xfs_blockgc_queue(pag);
    1428             : }
    1429             : 
    1430             : /* Don't try to run block gc on an inode that's in any of these states. */
    1431             : #define XFS_BLOCKGC_NOGRAB_IFLAGS       (XFS_INEW | \
    1432             :                                          XFS_NEED_INACTIVE | \
    1433             :                                          XFS_INACTIVATING | \
    1434             :                                          XFS_IRECLAIMABLE | \
    1435             :                                          XFS_IRECLAIM)
    1436             : /*
    1437             :  * Decide if the given @ip is eligible for garbage collection of speculative
    1438             :  * preallocations, and grab it if so.  Returns true if it's ready to go or
    1439             :  * false if we should just ignore it.
    1440             :  */
    1441             : static bool
    1442     1513800 : xfs_blockgc_igrab(
    1443             :         struct xfs_inode        *ip)
    1444             : {
    1445     1513800 :         struct inode            *inode = VFS_I(ip);
    1446             : 
    1447     1513800 :         ASSERT(rcu_read_lock_held());
    1448             : 
    1449             :         /* Check for stale RCU freed inode */
    1450     1513800 :         spin_lock(&ip->i_flags_lock);
    1451     1513973 :         if (!ip->i_ino)
    1452           0 :                 goto out_unlock_noent;
    1453             : 
    1454     1513973 :         if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
    1455      132749 :                 goto out_unlock_noent;
    1456     1381224 :         spin_unlock(&ip->i_flags_lock);
    1457             : 
    1458             :         /* nothing to sync during shutdown */
    1459     2762438 :         if (xfs_is_shutdown(ip->i_mount))
    1460             :                 return false;
    1461             : 
    1462             :         /* If we can't grab the inode, it must on it's way to reclaim. */
    1463     1381198 :         if (!igrab(inode))
    1464         272 :                 return false;
    1465             : 
    1466             :         /* inode is valid */
    1467             :         return true;
    1468             : 
    1469      132749 : out_unlock_noent:
    1470      132749 :         spin_unlock(&ip->i_flags_lock);
    1471      132749 :         return false;
    1472             : }
    1473             : 
    1474             : /* Scan one incore inode for block preallocations that we can remove. */
    1475             : static int
    1476     1381458 : xfs_blockgc_scan_inode(
    1477             :         struct xfs_inode        *ip,
    1478             :         struct xfs_icwalk       *icw)
    1479             : {
    1480     1381458 :         unsigned int            lockflags = 0;
    1481     1381458 :         int                     error;
    1482             : 
    1483     1381458 :         error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
    1484     1381283 :         if (error)
    1485       72482 :                 goto unlock;
    1486             : 
    1487     1308801 :         error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
    1488     1381458 : unlock:
    1489     1381458 :         if (lockflags)
    1490      705079 :                 xfs_iunlock(ip, lockflags);
    1491     1381395 :         xfs_irele(ip);
    1492     1381467 :         return error;
    1493             : }
    1494             : 
    1495             : /* Background worker that trims preallocated space. */
    1496             : void
    1497      486516 : xfs_blockgc_worker(
    1498             :         struct work_struct      *work)
    1499             : {
    1500      486516 :         struct xfs_perag        *pag = container_of(to_delayed_work(work),
    1501             :                                         struct xfs_perag, pag_blockgc_work);
    1502      486516 :         struct xfs_mount        *mp = pag->pag_mount;
    1503      486516 :         int                     error;
    1504             : 
    1505      486516 :         trace_xfs_blockgc_worker(mp, __return_address);
    1506             : 
    1507      486520 :         error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
    1508      486486 :         if (error)
    1509           0 :                 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
    1510             :                                 pag->pag_agno, error);
    1511      486486 :         xfs_blockgc_queue(pag);
    1512      486423 : }
    1513             : 
    1514             : /*
    1515             :  * Try to free space in the filesystem by purging inactive inodes, eofblocks
    1516             :  * and cowblocks.
    1517             :  */
    1518             : int
    1519      436571 : xfs_blockgc_free_space(
    1520             :         struct xfs_mount        *mp,
    1521             :         struct xfs_icwalk       *icw)
    1522             : {
    1523      436571 :         int                     error;
    1524             : 
    1525      436571 :         trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
    1526             : 
    1527      436482 :         error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
    1528      436432 :         if (error)
    1529             :                 return error;
    1530             : 
    1531      436394 :         return xfs_inodegc_flush(mp);
    1532             : }
    1533             : 
    1534             : /*
    1535             :  * Reclaim all the free space that we can by scheduling the background blockgc
    1536             :  * and inodegc workers immediately and waiting for them all to clear.
    1537             :  */
    1538             : int
    1539     4302302 : xfs_blockgc_flush_all(
    1540             :         struct xfs_mount        *mp)
    1541             : {
    1542     4302302 :         struct xfs_perag        *pag;
    1543     4302302 :         xfs_agnumber_t          agno;
    1544             : 
    1545     4302302 :         trace_xfs_blockgc_flush_all(mp, __return_address);
    1546             : 
    1547             :         /*
    1548             :          * For each blockgc worker, move its queue time up to now.  If it
    1549             :          * wasn't queued, it will not be requeued.  Then flush whatever's
    1550             :          * left.
    1551             :          */
    1552     4739391 :         for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
    1553      437065 :                 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
    1554             :                                 &pag->pag_blockgc_work, 0);
    1555             : 
    1556     4734903 :         for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
    1557      432784 :                 flush_delayed_work(&pag->pag_blockgc_work);
    1558             : 
    1559     4301975 :         return xfs_inodegc_flush(mp);
    1560             : }
    1561             : 
    1562             : /*
    1563             :  * Run cow/eofblocks scans on the supplied dquots.  We don't know exactly which
    1564             :  * quota caused an allocation failure, so we make a best effort by including
    1565             :  * each quota under low free space conditions (less than 1% free space) in the
    1566             :  * scan.
    1567             :  *
    1568             :  * Callers must not hold any inode's ILOCK.  If requesting a synchronous scan
    1569             :  * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
    1570             :  * MMAPLOCK.
    1571             :  */
    1572             : int
    1573        6832 : xfs_blockgc_free_dquots(
    1574             :         struct xfs_mount        *mp,
    1575             :         struct xfs_dquot        *udqp,
    1576             :         struct xfs_dquot        *gdqp,
    1577             :         struct xfs_dquot        *pdqp,
    1578             :         unsigned int            iwalk_flags)
    1579             : {
    1580        6832 :         struct xfs_icwalk       icw = {0};
    1581        6832 :         bool                    do_work = false;
    1582             : 
    1583        6832 :         if (!udqp && !gdqp && !pdqp)
    1584             :                 return 0;
    1585             : 
    1586             :         /*
    1587             :          * Run a scan to free blocks using the union filter to cover all
    1588             :          * applicable quotas in a single scan.
    1589             :          */
    1590        6832 :         icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
    1591             : 
    1592        6832 :         if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
    1593        4657 :                 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
    1594        4657 :                 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
    1595        4657 :                 do_work = true;
    1596             :         }
    1597             : 
    1598        6832 :         if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
    1599        6588 :                 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
    1600        6588 :                 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
    1601        6588 :                 do_work = true;
    1602             :         }
    1603             : 
    1604        6832 :         if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
    1605        6612 :                 icw.icw_prid = pdqp->q_id;
    1606        6612 :                 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
    1607        6612 :                 do_work = true;
    1608             :         }
    1609             : 
    1610        6832 :         if (!do_work)
    1611             :                 return 0;
    1612             : 
    1613        6694 :         return xfs_blockgc_free_space(mp, &icw);
    1614             : }
    1615             : 
    1616             : /* Run cow/eofblocks scans on the quotas attached to the inode. */
    1617             : int
    1618        4514 : xfs_blockgc_free_quota(
    1619             :         struct xfs_inode        *ip,
    1620             :         unsigned int            iwalk_flags)
    1621             : {
    1622        4514 :         return xfs_blockgc_free_dquots(ip->i_mount,
    1623             :                         xfs_inode_dquot(ip, XFS_DQTYPE_USER),
    1624             :                         xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
    1625             :                         xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
    1626             : }
    1627             : 
    1628             : /* XFS Inode Cache Walking Code */
    1629             : 
    1630             : /*
    1631             :  * The inode lookup is done in batches to keep the amount of lock traffic and
    1632             :  * radix tree lookups to a minimum. The batch size is a trade off between
    1633             :  * lookup reduction and stack usage. This is in the reclaim path, so we can't
    1634             :  * be too greedy.
    1635             :  */
    1636             : #define XFS_LOOKUP_BATCH        32
    1637             : 
    1638             : 
    1639             : /*
    1640             :  * Decide if we want to grab this inode in anticipation of doing work towards
    1641             :  * the goal.
    1642             :  */
    1643             : static inline bool
    1644   530857520 : xfs_icwalk_igrab(
    1645             :         enum xfs_icwalk_goal    goal,
    1646             :         struct xfs_inode        *ip,
    1647             :         struct xfs_icwalk       *icw)
    1648             : {
    1649   530857520 :         switch (goal) {
    1650     1513795 :         case XFS_ICWALK_BLOCKGC:
    1651     1513795 :                 return xfs_blockgc_igrab(ip);
    1652   529343725 :         case XFS_ICWALK_RECLAIM:
    1653   529343725 :                 return xfs_reclaim_igrab(ip, icw);
    1654             :         default:
    1655             :                 return false;
    1656             :         }
    1657             : }
    1658             : 
    1659             : /*
    1660             :  * Process an inode.  Each processing function must handle any state changes
    1661             :  * made by the icwalk igrab function.  Return -EAGAIN to skip an inode.
    1662             :  */
    1663             : static inline int
    1664   530659790 : xfs_icwalk_process_inode(
    1665             :         enum xfs_icwalk_goal    goal,
    1666             :         struct xfs_inode        *ip,
    1667             :         struct xfs_perag        *pag,
    1668             :         struct xfs_icwalk       *icw)
    1669             : {
    1670   530659790 :         int                     error = 0;
    1671             : 
    1672   530659790 :         switch (goal) {
    1673     1381512 :         case XFS_ICWALK_BLOCKGC:
    1674     1381512 :                 error = xfs_blockgc_scan_inode(ip, icw);
    1675     1381512 :                 break;
    1676   529278278 :         case XFS_ICWALK_RECLAIM:
    1677   529278278 :                 xfs_reclaim_inode(ip, pag);
    1678   529278278 :                 break;
    1679             :         }
    1680   530661593 :         return error;
    1681             : }
    1682             : 
    1683             : /*
    1684             :  * For a given per-AG structure @pag and a goal, grab qualifying inodes and
    1685             :  * process them in some manner.
    1686             :  */
    1687             : static int
    1688     2680392 : xfs_icwalk_ag(
    1689             :         struct xfs_perag        *pag,
    1690             :         enum xfs_icwalk_goal    goal,
    1691             :         struct xfs_icwalk       *icw)
    1692             : {
    1693     2680392 :         struct xfs_mount        *mp = pag->pag_mount;
    1694     2680392 :         uint32_t                first_index;
    1695     2680392 :         int                     last_error = 0;
    1696     2746684 :         int                     skipped;
    1697     2746684 :         bool                    done;
    1698     2746684 :         int                     nr_found;
    1699             : 
    1700     2746684 : restart:
    1701     2746684 :         done = false;
    1702     2746684 :         skipped = 0;
    1703     2746684 :         if (goal == XFS_ICWALK_RECLAIM)
    1704     2110836 :                 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
    1705             :         else
    1706             :                 first_index = 0;
    1707             :         nr_found = 0;
    1708    18692964 :         do {
    1709    18692964 :                 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
    1710    18692964 :                 int             error = 0;
    1711    18692964 :                 int             i;
    1712             : 
    1713    18692964 :                 rcu_read_lock();
    1714             : 
    1715    18692267 :                 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
    1716             :                                 (void **) batch, first_index,
    1717             :                                 XFS_LOOKUP_BATCH, goal);
    1718    18692683 :                 if (!nr_found) {
    1719     1154065 :                         done = true;
    1720     1154065 :                         rcu_read_unlock();
    1721     2746744 :                         break;
    1722             :                 }
    1723             : 
    1724             :                 /*
    1725             :                  * Grab the inodes before we drop the lock. if we found
    1726             :                  * nothing, nr == 0 and the loop will be skipped.
    1727             :                  */
    1728   548396028 :                 for (i = 0; i < nr_found; i++) {
    1729   530857051 :                         struct xfs_inode *ip = batch[i];
    1730             : 
    1731   530857051 :                         if (done || !xfs_icwalk_igrab(goal, ip, icw))
    1732      213004 :                                 batch[i] = NULL;
    1733             : 
    1734             :                         /*
    1735             :                          * Update the index for the next lookup. Catch
    1736             :                          * overflows into the next AG range which can occur if
    1737             :                          * we have inodes in the last block of the AG and we
    1738             :                          * are currently pointing to the last inode.
    1739             :                          *
    1740             :                          * Because we may see inodes that are from the wrong AG
    1741             :                          * due to RCU freeing and reallocation, only update the
    1742             :                          * index if it lies in this AG. It was a race that lead
    1743             :                          * us to see this inode, so another lookup from the
    1744             :                          * same index will not find it again.
    1745             :                          */
    1746   530857410 :                         if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
    1747        4746 :                                 continue;
    1748   530852664 :                         first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
    1749   530852664 :                         if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
    1750           0 :                                 done = true;
    1751             :                 }
    1752             : 
    1753             :                 /* unlock now we've grabbed the inodes. */
    1754    17538977 :                 rcu_read_unlock();
    1755             : 
    1756   565951823 :                 for (i = 0; i < nr_found; i++) {
    1757   530873923 :                         if (!batch[i])
    1758      213201 :                                 continue;
    1759   530660722 :                         error = xfs_icwalk_process_inode(goal, batch[i], pag,
    1760             :                                         icw);
    1761   530660679 :                         if (error == -EAGAIN) {
    1762       72968 :                                 skipped++;
    1763       72968 :                                 continue;
    1764             :                         }
    1765   530587711 :                         if (error && last_error != -EFSCORRUPTED)
    1766           0 :                                 last_error = error;
    1767             :                 }
    1768             : 
    1769             :                 /* bail out if the filesystem is corrupted.  */
    1770    17538923 :                 if (error == -EFSCORRUPTED)
    1771             :                         break;
    1772             : 
    1773    17538923 :                 cond_resched();
    1774             : 
    1775    17538992 :                 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
    1776     5596250 :                         icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
    1777     5596250 :                         if (icw->icw_scan_limit <= 0)
    1778             :                                 break;
    1779             :                 }
    1780    15946280 :         } while (nr_found && !done);
    1781             : 
    1782     2746744 :         if (goal == XFS_ICWALK_RECLAIM) {
    1783     2110836 :                 if (done)
    1784      518124 :                         first_index = 0;
    1785     2110836 :                 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
    1786             :         }
    1787             : 
    1788     2746744 :         if (skipped) {
    1789       66366 :                 delay(1);
    1790       66292 :                 goto restart;
    1791             :         }
    1792     2680378 :         return last_error;
    1793             : }
    1794             : 
    1795             : /* Walk all incore inodes to achieve a given goal. */
    1796             : static int
    1797     1035585 : xfs_icwalk(
    1798             :         struct xfs_mount        *mp,
    1799             :         enum xfs_icwalk_goal    goal,
    1800             :         struct xfs_icwalk       *icw)
    1801             : {
    1802     1035585 :         struct xfs_perag        *pag;
    1803     1035585 :         int                     error = 0;
    1804     1035585 :         int                     last_error = 0;
    1805     1035585 :         xfs_agnumber_t          agno;
    1806             : 
    1807     3229513 :         for_each_perag_tag(mp, agno, pag, goal) {
    1808     2193956 :                 error = xfs_icwalk_ag(pag, goal, icw);
    1809     2193928 :                 if (error) {
    1810           0 :                         last_error = error;
    1811           0 :                         if (error == -EFSCORRUPTED) {
    1812           0 :                                 xfs_perag_rele(pag);
    1813           0 :                                 break;
    1814             :                         }
    1815             :                 }
    1816             :         }
    1817     1035583 :         return last_error;
    1818             :         BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
    1819             : }
    1820             : 
    1821             : #ifdef DEBUG
    1822             : static void
    1823           0 : xfs_check_delalloc(
    1824             :         struct xfs_inode        *ip,
    1825             :         int                     whichfork)
    1826             : {
    1827           0 :         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, whichfork);
    1828           0 :         struct xfs_bmbt_irec    got;
    1829           0 :         struct xfs_iext_cursor  icur;
    1830             : 
    1831           0 :         if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
    1832           0 :                 return;
    1833           0 :         do {
    1834           0 :                 if (isnullstartblock(got.br_startblock)) {
    1835           0 :                         xfs_warn(ip->i_mount,
    1836             :         "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
    1837             :                                 ip->i_ino,
    1838             :                                 whichfork == XFS_DATA_FORK ? "data" : "cow",
    1839             :                                 got.br_startoff, got.br_blockcount);
    1840             :                 }
    1841           0 :         } while (xfs_iext_next_extent(ifp, &icur, &got));
    1842             : }
    1843             : #else
    1844             : #define xfs_check_delalloc(ip, whichfork)       do { } while (0)
    1845             : #endif
    1846             : 
    1847             : /* Schedule the inode for reclaim. */
    1848             : static void
    1849  1075637417 : xfs_inodegc_set_reclaimable(
    1850             :         struct xfs_inode        *ip)
    1851             : {
    1852  1075637417 :         struct xfs_mount        *mp = ip->i_mount;
    1853  1075637417 :         struct xfs_perag        *pag;
    1854             : 
    1855  2151274834 :         if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
    1856           0 :                 xfs_check_delalloc(ip, XFS_DATA_FORK);
    1857           0 :                 xfs_check_delalloc(ip, XFS_COW_FORK);
    1858           0 :                 ASSERT(0);
    1859             :         }
    1860             : 
    1861  1075637417 :         pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
    1862  1075597272 :         spin_lock(&pag->pag_ici_lock);
    1863  1075706332 :         spin_lock(&ip->i_flags_lock);
    1864             : 
    1865  2151422762 :         if (!xfs_is_shutdown(pag->pag_mount)) {
    1866             :                 /* had better not be on any unlinked list! */
    1867   770862489 :                 ASSERT(!xfs_inode_on_unlinked_list(ip));
    1868   770862489 :                 if (xfs_inode_on_unlinked_list(ip))
    1869           1 :                         xfs_emerg(pag->pag_mount, "IUNLINK mark reclaim ino 0x%llx nlink %u mode 0o%o prevun 0x%x nextun 0x%x", ip->i_ino, VFS_I(ip)->i_nlink, VFS_I(ip)->i_mode, ip->i_prev_unlinked, ip->i_next_unlinked);
    1870             :         }
    1871             : 
    1872  1075711381 :         trace_xfs_inode_set_reclaimable(ip);
    1873  1075687775 :         ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
    1874  1075687775 :         ip->i_flags |= XFS_IRECLAIMABLE;
    1875  1075687775 :         xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
    1876             :                         XFS_ICI_RECLAIM_TAG);
    1877             : 
    1878  1075657875 :         spin_unlock(&ip->i_flags_lock);
    1879  1075690283 :         spin_unlock(&pag->pag_ici_lock);
    1880  1075693517 :         xfs_perag_put(pag);
    1881  1075648687 : }
    1882             : 
    1883             : /*
    1884             :  * Free all speculative preallocations and possibly even the inode itself.
    1885             :  * This is the last chance to make changes to an otherwise unreferenced file
    1886             :  * before incore reclamation happens.
    1887             :  */
    1888             : static int
    1889    38271508 : xfs_inodegc_inactivate(
    1890             :         struct xfs_inode        *ip)
    1891             : {
    1892    38271508 :         int                     error;
    1893             : 
    1894    38271508 :         trace_xfs_inode_inactivating(ip);
    1895    38262732 :         error = xfs_inactive(ip);
    1896    38275198 :         xfs_inodegc_set_reclaimable(ip);
    1897    38270182 :         return error;
    1898             : 
    1899             : }
    1900             : 
    1901             : void
    1902     4613622 : xfs_inodegc_worker(
    1903             :         struct work_struct      *work)
    1904             : {
    1905     4613622 :         struct xfs_inodegc      *gc = container_of(to_delayed_work(work),
    1906             :                                                 struct xfs_inodegc, work);
    1907     4613622 :         struct llist_node       *node = llist_del_all(&gc->list);
    1908     4612385 :         struct xfs_inode        *ip, *n;
    1909     4612385 :         unsigned int            nofs_flag;
    1910             : 
    1911     4612385 :         ASSERT(gc->cpu == smp_processor_id());
    1912             : 
    1913     4612240 :         WRITE_ONCE(gc->items, 0);
    1914             : 
    1915     4612240 :         if (!node)
    1916             :                 return;
    1917             : 
    1918             :         /*
    1919             :          * We can allocate memory here while doing writeback on behalf of
    1920             :          * memory reclaim.  To avoid memory allocation deadlocks set the
    1921             :          * task-wide nofs context for the following operations.
    1922             :          */
    1923     4609079 :         nofs_flag = memalloc_nofs_save();
    1924             : 
    1925     4609079 :         ip = llist_entry(node, struct xfs_inode, i_gclist);
    1926     4609079 :         trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
    1927             : 
    1928     4601039 :         WRITE_ONCE(gc->shrinker_hits, 0);
    1929    42871055 :         llist_for_each_entry_safe(ip, n, node, i_gclist) {
    1930    38258045 :                 int     error;
    1931             : 
    1932    38258045 :                 xfs_iflags_set(ip, XFS_INACTIVATING);
    1933    38265501 :                 error = xfs_inodegc_inactivate(ip);
    1934    38270016 :                 if (error && !gc->error)
    1935        1757 :                         gc->error = error;
    1936             :         }
    1937             : 
    1938     4613010 :         memalloc_nofs_restore(nofs_flag);
    1939             : }
    1940             : 
    1941             : /*
    1942             :  * Expedite all pending inodegc work to run immediately. This does not wait for
    1943             :  * completion of the work.
    1944             :  */
    1945             : void
    1946    14184053 : xfs_inodegc_push(
    1947             :         struct xfs_mount        *mp)
    1948             : {
    1949    28368106 :         if (!xfs_is_inodegc_enabled(mp))
    1950             :                 return;
    1951    14164346 :         trace_xfs_inodegc_push(mp, __return_address);
    1952    14151600 :         xfs_inodegc_queue_all(mp);
    1953             : }
    1954             : 
    1955             : /*
    1956             :  * Force all currently queued inode inactivation work to run immediately and
    1957             :  * wait for the work to finish.
    1958             :  */
    1959             : int
    1960     5055021 : xfs_inodegc_flush(
    1961             :         struct xfs_mount        *mp)
    1962             : {
    1963     5055021 :         xfs_inodegc_push(mp);
    1964     5054949 :         trace_xfs_inodegc_flush(mp, __return_address);
    1965     5054962 :         return xfs_inodegc_wait_all(mp);
    1966             : }
    1967             : 
    1968             : /*
    1969             :  * Flush all the pending work and then disable the inode inactivation background
    1970             :  * workers and wait for them to stop.  Caller must hold sb->s_umount to
    1971             :  * coordinate changes in the inodegc_enabled state.
    1972             :  */
    1973             : void
    1974       73029 : xfs_inodegc_stop(
    1975             :         struct xfs_mount        *mp)
    1976             : {
    1977       73029 :         bool                    rerun;
    1978             : 
    1979       73029 :         if (!xfs_clear_inodegc_enabled(mp))
    1980             :                 return;
    1981             : 
    1982             :         /*
    1983             :          * Drain all pending inodegc work, including inodes that could be
    1984             :          * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
    1985             :          * threads that sample the inodegc state just prior to us clearing it.
    1986             :          * The inodegc flag state prevents new threads from queuing more
    1987             :          * inodes, so we queue pending work items and flush the workqueue until
    1988             :          * all inodegc lists are empty.  IOWs, we cannot use drain_workqueue
    1989             :          * here because it does not allow other unserialized mechanisms to
    1990             :          * reschedule inodegc work while this draining is in progress.
    1991             :          */
    1992       73015 :         xfs_inodegc_queue_all(mp);
    1993       73015 :         do {
    1994       73015 :                 flush_workqueue(mp->m_inodegc_wq);
    1995       73015 :                 rerun = xfs_inodegc_queue_all(mp);
    1996       73015 :         } while (rerun);
    1997             : 
    1998       73015 :         trace_xfs_inodegc_stop(mp, __return_address);
    1999             : }
    2000             : 
    2001             : /*
    2002             :  * Enable the inode inactivation background workers and schedule deferred inode
    2003             :  * inactivation work if there is any.  Caller must hold sb->s_umount to
    2004             :  * coordinate changes in the inodegc_enabled state.
    2005             :  */
    2006             : void
    2007       73013 : xfs_inodegc_start(
    2008             :         struct xfs_mount        *mp)
    2009             : {
    2010       73013 :         if (xfs_set_inodegc_enabled(mp))
    2011             :                 return;
    2012             : 
    2013       73009 :         trace_xfs_inodegc_start(mp, __return_address);
    2014       73009 :         xfs_inodegc_queue_all(mp);
    2015             : }
    2016             : 
    2017             : #ifdef CONFIG_XFS_RT
    2018             : static inline bool
    2019    32071125 : xfs_inodegc_want_queue_rt_file(
    2020             :         struct xfs_inode        *ip)
    2021             : {
    2022    32071125 :         struct xfs_mount        *mp = ip->i_mount;
    2023             : 
    2024    32071125 :         if (!XFS_IS_REALTIME_INODE(ip))
    2025             :                 return false;
    2026             : 
    2027     1842611 :         if (__percpu_counter_compare(&mp->m_frextents,
    2028     1842614 :                                 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
    2029             :                                 XFS_FDBLOCKS_BATCH) < 0)
    2030           0 :                 return true;
    2031             : 
    2032             :         return false;
    2033             : }
    2034             : #else
    2035             : # define xfs_inodegc_want_queue_rt_file(ip)     (false)
    2036             : #endif /* CONFIG_XFS_RT */
    2037             : 
    2038             : /*
    2039             :  * Schedule the inactivation worker when:
    2040             :  *
    2041             :  *  - We've accumulated more than one inode cluster buffer's worth of inodes.
    2042             :  *  - There is less than 5% free space left.
    2043             :  *  - Any of the quotas for this inode are near an enforcement limit.
    2044             :  */
    2045             : static inline bool
    2046    38274917 : xfs_inodegc_want_queue_work(
    2047             :         struct xfs_inode        *ip,
    2048             :         unsigned int            items)
    2049             : {
    2050    38274917 :         struct xfs_mount        *mp = ip->i_mount;
    2051             : 
    2052    38274917 :         if (items > mp->m_ino_geo.inodes_per_cluster)
    2053             :                 return true;
    2054             : 
    2055    32209619 :         if (__percpu_counter_compare(&mp->m_fdblocks,
    2056    32209768 :                                 mp->m_low_space[XFS_LOWSP_5_PCNT],
    2057             :                                 XFS_FDBLOCKS_BATCH) < 0)
    2058             :                 return true;
    2059             : 
    2060    32071348 :         if (xfs_inodegc_want_queue_rt_file(ip))
    2061             :                 return true;
    2062             : 
    2063    32071546 :         if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
    2064             :                 return true;
    2065             : 
    2066    32071083 :         if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
    2067             :                 return true;
    2068             : 
    2069    32071157 :         if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
    2070          38 :                 return true;
    2071             : 
    2072             :         return false;
    2073             : }
    2074             : 
    2075             : /*
    2076             :  * Upper bound on the number of inodes in each AG that can be queued for
    2077             :  * inactivation at any given time, to avoid monopolizing the workqueue.
    2078             :  */
    2079             : #define XFS_INODEGC_MAX_BACKLOG         (4 * XFS_INODES_PER_CHUNK)
    2080             : 
    2081             : /*
    2082             :  * Make the frontend wait for inactivations when:
    2083             :  *
    2084             :  *  - Memory shrinkers queued the inactivation worker and it hasn't finished.
    2085             :  *  - The queue depth exceeds the maximum allowable percpu backlog.
    2086             :  *
    2087             :  * Note: If the current thread is running a transaction, we don't ever want to
    2088             :  * wait for other transactions because that could introduce a deadlock.
    2089             :  */
    2090             : static inline bool
    2091             : xfs_inodegc_want_flush_work(
    2092             :         struct xfs_inode        *ip,
    2093             :         unsigned int            items,
    2094             :         unsigned int            shrinker_hits)
    2095             : {
    2096    38273992 :         if (current->journal_info)
    2097             :                 return false;
    2098             : 
    2099    38236780 :         if (shrinker_hits > 0)
    2100             :                 return true;
    2101             : 
    2102    38236776 :         if (items > XFS_INODEGC_MAX_BACKLOG)
    2103             :                 return true;
    2104             : 
    2105             :         return false;
    2106             : }
    2107             : 
    2108             : /*
    2109             :  * Queue a background inactivation worker if there are inodes that need to be
    2110             :  * inactivated and higher level xfs code hasn't disabled the background
    2111             :  * workers.
    2112             :  */
    2113             : static void
    2114    38273139 : xfs_inodegc_queue(
    2115             :         struct xfs_inode        *ip)
    2116             : {
    2117    38273139 :         struct xfs_mount        *mp = ip->i_mount;
    2118    38273139 :         struct xfs_inodegc      *gc;
    2119    38273139 :         int                     items;
    2120    38273139 :         unsigned int            shrinker_hits;
    2121    38273139 :         unsigned long           queue_delay = 1;
    2122             : 
    2123    38273139 :         trace_xfs_inode_set_need_inactive(ip);
    2124    38274517 :         spin_lock(&ip->i_flags_lock);
    2125    38274796 :         ip->i_flags |= XFS_NEED_INACTIVE;
    2126    38274796 :         spin_unlock(&ip->i_flags_lock);
    2127             : 
    2128    38274711 :         gc = get_cpu_ptr(mp->m_inodegc);
    2129    38274945 :         llist_add(&ip->i_gclist, &gc->list);
    2130    38274931 :         items = READ_ONCE(gc->items);
    2131    38274931 :         WRITE_ONCE(gc->items, items + 1);
    2132    38274931 :         shrinker_hits = READ_ONCE(gc->shrinker_hits);
    2133             : 
    2134             :         /*
    2135             :          * We queue the work while holding the current CPU so that the work
    2136             :          * is scheduled to run on this CPU.
    2137             :          */
    2138    76549862 :         if (!xfs_is_inodegc_enabled(mp)) {
    2139           0 :                 put_cpu_ptr(gc);
    2140           0 :                 return;
    2141             :         }
    2142             : 
    2143    38274931 :         if (xfs_inodegc_want_queue_work(ip, items))
    2144     6203697 :                 queue_delay = 0;
    2145             : 
    2146    38274610 :         trace_xfs_inodegc_queue(mp, __return_address);
    2147    38274401 :         mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
    2148             :                         queue_delay);
    2149    38273376 :         put_cpu_ptr(gc);
    2150             : 
    2151    38273992 :         if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
    2152      967864 :                 trace_xfs_inodegc_throttle(mp, __return_address);
    2153      967850 :                 flush_delayed_work(&gc->work);
    2154             :         }
    2155             : }
    2156             : 
    2157             : /*
    2158             :  * Fold the dead CPU inodegc queue into the current CPUs queue.
    2159             :  */
    2160             : void
    2161           9 : xfs_inodegc_cpu_dead(
    2162             :         struct xfs_mount        *mp,
    2163             :         unsigned int            dead_cpu)
    2164             : {
    2165           9 :         struct xfs_inodegc      *dead_gc, *gc;
    2166           9 :         struct llist_node       *first, *last;
    2167           9 :         unsigned int            count = 0;
    2168             : 
    2169           9 :         dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
    2170           9 :         cancel_delayed_work_sync(&dead_gc->work);
    2171             : 
    2172           9 :         if (llist_empty(&dead_gc->list))
    2173             :                 return;
    2174             : 
    2175           0 :         first = dead_gc->list.first;
    2176           0 :         last = first;
    2177           0 :         while (last->next) {
    2178           0 :                 last = last->next;
    2179           0 :                 count++;
    2180             :         }
    2181           0 :         dead_gc->list.first = NULL;
    2182           0 :         dead_gc->items = 0;
    2183             : 
    2184             :         /* Add pending work to current CPU */
    2185           0 :         gc = get_cpu_ptr(mp->m_inodegc);
    2186           0 :         llist_add_batch(first, last, &gc->list);
    2187           0 :         count += READ_ONCE(gc->items);
    2188           0 :         WRITE_ONCE(gc->items, count);
    2189             : 
    2190           0 :         if (xfs_is_inodegc_enabled(mp)) {
    2191           0 :                 trace_xfs_inodegc_queue(mp, __return_address);
    2192           0 :                 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
    2193             :                                 0);
    2194             :         }
    2195           0 :         put_cpu_ptr(gc);
    2196             : }
    2197             : 
    2198             : /*
    2199             :  * We set the inode flag atomically with the radix tree tag.  Once we get tag
    2200             :  * lookups on the radix tree, this inode flag can go away.
    2201             :  *
    2202             :  * We always use background reclaim here because even if the inode is clean, it
    2203             :  * still may be under IO and hence we have wait for IO completion to occur
    2204             :  * before we can reclaim the inode. The background reclaim path handles this
    2205             :  * more efficiently than we can here, so simply let background reclaim tear down
    2206             :  * all inodes.
    2207             :  */
    2208             : void
    2209  1075513810 : xfs_inode_mark_reclaimable(
    2210             :         struct xfs_inode        *ip)
    2211             : {
    2212  1075513810 :         struct xfs_mount        *mp = ip->i_mount;
    2213  1075513810 :         bool                    need_inactive;
    2214             : 
    2215  1075513810 :         XFS_STATS_INC(mp, vn_reclaim);
    2216             : 
    2217             :         /*
    2218             :          * We should never get here with any of the reclaim flags already set.
    2219             :          */
    2220  2151218122 :         ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
    2221             : 
    2222  1075704312 :         need_inactive = xfs_inode_needs_inactive(ip);
    2223  1075618950 :         if (need_inactive) {
    2224    38272841 :                 xfs_inodegc_queue(ip);
    2225    38272841 :                 return;
    2226             :         }
    2227             : 
    2228             :         /* Going straight to reclaim, so drop the dquots. */
    2229  1037346109 :         xfs_qm_dqdetach(ip);
    2230  1037335845 :         xfs_inodegc_set_reclaimable(ip);
    2231             : }
    2232             : 
    2233             : /*
    2234             :  * Register a phony shrinker so that we can run background inodegc sooner when
    2235             :  * there's memory pressure.  Inactivation does not itself free any memory but
    2236             :  * it does make inodes reclaimable, which eventually frees memory.
    2237             :  *
    2238             :  * The count function, seek value, and batch value are crafted to trigger the
    2239             :  * scan function during the second round of scanning.  Hopefully this means
    2240             :  * that we reclaimed enough memory that initiating metadata transactions won't
    2241             :  * make things worse.
    2242             :  */
    2243             : #define XFS_INODEGC_SHRINKER_COUNT      (1UL << DEF_PRIORITY)
    2244             : #define XFS_INODEGC_SHRINKER_BATCH      ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
    2245             : 
    2246             : static unsigned long
    2247        6547 : xfs_inodegc_shrinker_count(
    2248             :         struct shrinker         *shrink,
    2249             :         struct shrink_control   *sc)
    2250             : {
    2251        6547 :         struct xfs_mount        *mp = container_of(shrink, struct xfs_mount,
    2252             :                                                    m_inodegc_shrinker);
    2253        6547 :         struct xfs_inodegc      *gc;
    2254        6547 :         int                     cpu;
    2255             : 
    2256       13094 :         if (!xfs_is_inodegc_enabled(mp))
    2257             :                 return 0;
    2258             : 
    2259       19322 :         for_each_online_cpu(cpu) {
    2260       12966 :                 gc = per_cpu_ptr(mp->m_inodegc, cpu);
    2261       12966 :                 if (!llist_empty(&gc->list))
    2262             :                         return XFS_INODEGC_SHRINKER_COUNT;
    2263             :         }
    2264             : 
    2265             :         return 0;
    2266             : }
    2267             : 
    2268             : static unsigned long
    2269         119 : xfs_inodegc_shrinker_scan(
    2270             :         struct shrinker         *shrink,
    2271             :         struct shrink_control   *sc)
    2272             : {
    2273         119 :         struct xfs_mount        *mp = container_of(shrink, struct xfs_mount,
    2274             :                                                    m_inodegc_shrinker);
    2275         119 :         struct xfs_inodegc      *gc;
    2276         119 :         int                     cpu;
    2277         119 :         bool                    no_items = true;
    2278             : 
    2279         238 :         if (!xfs_is_inodegc_enabled(mp))
    2280             :                 return SHRINK_STOP;
    2281             : 
    2282         119 :         trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
    2283             : 
    2284         476 :         for_each_online_cpu(cpu) {
    2285         238 :                 gc = per_cpu_ptr(mp->m_inodegc, cpu);
    2286         238 :                 if (!llist_empty(&gc->list)) {
    2287         122 :                         unsigned int    h = READ_ONCE(gc->shrinker_hits);
    2288             : 
    2289         122 :                         WRITE_ONCE(gc->shrinker_hits, h + 1);
    2290         122 :                         mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
    2291         122 :                         no_items = false;
    2292             :                 }
    2293             :         }
    2294             : 
    2295             :         /*
    2296             :          * If there are no inodes to inactivate, we don't want the shrinker
    2297             :          * to think there's deferred work to call us back about.
    2298             :          */
    2299         119 :         if (no_items)
    2300           0 :                 return LONG_MAX;
    2301             : 
    2302             :         return SHRINK_STOP;
    2303             : }
    2304             : 
    2305             : /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
    2306             : int
    2307       24333 : xfs_inodegc_register_shrinker(
    2308             :         struct xfs_mount        *mp)
    2309             : {
    2310       24333 :         struct shrinker         *shrink = &mp->m_inodegc_shrinker;
    2311             : 
    2312       24333 :         shrink->count_objects = xfs_inodegc_shrinker_count;
    2313       24333 :         shrink->scan_objects = xfs_inodegc_shrinker_scan;
    2314       24333 :         shrink->seeks = 0;
    2315       24333 :         shrink->flags = SHRINKER_NONSLAB;
    2316       24333 :         shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
    2317             : 
    2318       24333 :         return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);
    2319             : }

Generated by: LCOV version 1.14