LCOV - code coverage report
Current view: top level - fs/xfs - xfs_iomap.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-acha @ Mon Jul 31 20:08:06 PDT 2023 Lines: 566 610 92.8 %
Date: 2023-07-31 20:08:07 Functions: 23 25 92.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
       4             :  * Copyright (c) 2016-2018 Christoph Hellwig.
       5             :  * All Rights Reserved.
       6             :  */
       7             : #include "xfs.h"
       8             : #include "xfs_fs.h"
       9             : #include "xfs_shared.h"
      10             : #include "xfs_format.h"
      11             : #include "xfs_log_format.h"
      12             : #include "xfs_trans_resv.h"
      13             : #include "xfs_mount.h"
      14             : #include "xfs_inode.h"
      15             : #include "xfs_btree.h"
      16             : #include "xfs_bmap_btree.h"
      17             : #include "xfs_bmap.h"
      18             : #include "xfs_bmap_util.h"
      19             : #include "xfs_errortag.h"
      20             : #include "xfs_error.h"
      21             : #include "xfs_trans.h"
      22             : #include "xfs_trans_space.h"
      23             : #include "xfs_inode_item.h"
      24             : #include "xfs_iomap.h"
      25             : #include "xfs_trace.h"
      26             : #include "xfs_quota.h"
      27             : #include "xfs_dquot_item.h"
      28             : #include "xfs_dquot.h"
      29             : #include "xfs_reflink.h"
      30             : #include "xfs_health.h"
      31             : 
      32             : #define XFS_ALLOC_ALIGN(mp, off) \
      33             :         (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
      34             : 
      35             : static int
      36           0 : xfs_alert_fsblock_zero(
      37             :         xfs_inode_t     *ip,
      38             :         xfs_bmbt_irec_t *imap)
      39             : {
      40           0 :         xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
      41             :                         "Access to block zero in inode %llu "
      42             :                         "start_block: %llx start_off: %llx "
      43             :                         "blkcnt: %llx extent-state: %x",
      44             :                 (unsigned long long)ip->i_ino,
      45             :                 (unsigned long long)imap->br_startblock,
      46             :                 (unsigned long long)imap->br_startoff,
      47             :                 (unsigned long long)imap->br_blockcount,
      48             :                 imap->br_state);
      49           0 :         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
      50           0 :         return -EFSCORRUPTED;
      51             : }
      52             : 
      53             : u64
      54   336324436 : xfs_iomap_inode_sequence(
      55             :         struct xfs_inode        *ip,
      56             :         u16                     iomap_flags)
      57             : {
      58   419903050 :         u64                     cookie = 0;
      59             : 
      60   336324436 :         if (iomap_flags & IOMAP_F_XATTR)
      61           0 :                 return READ_ONCE(ip->i_af.if_seq);
      62   336324436 :         if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
      63     3734008 :                 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
      64   336324436 :         return cookie | READ_ONCE(ip->i_df.if_seq);
      65             : }
      66             : 
      67             : /*
      68             :  * Check that the iomap passed to us is still valid for the given offset and
      69             :  * length.
      70             :  */
      71             : static bool
      72   157461955 : xfs_iomap_valid(
      73             :         struct inode            *inode,
      74             :         const struct iomap      *iomap)
      75             : {
      76   157461955 :         struct xfs_inode        *ip = XFS_I(inode);
      77             : 
      78   157455720 :         if (iomap->validity_cookie !=
      79   157461955 :                         xfs_iomap_inode_sequence(ip, iomap->flags)) {
      80       10230 :                 trace_xfs_iomap_invalid(ip, iomap);
      81       10230 :                 return false;
      82             :         }
      83             : 
      84   157448490 :         XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS);
      85             :         return true;
      86             : }
      87             : 
      88             : static const struct iomap_folio_ops xfs_iomap_folio_ops = {
      89             :         .iomap_valid            = xfs_iomap_valid,
      90             : };
      91             : 
      92             : int
      93   283641784 : xfs_bmbt_to_iomap(
      94             :         struct xfs_inode        *ip,
      95             :         struct iomap            *iomap,
      96             :         struct xfs_bmbt_irec    *imap,
      97             :         unsigned int            mapping_flags,
      98             :         u16                     iomap_flags,
      99             :         u64                     sequence_cookie)
     100             : {
     101   283641784 :         struct xfs_mount        *mp = ip->i_mount;
     102   283641784 :         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
     103             : 
     104   283641784 :         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
     105           0 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     106           0 :                 return xfs_alert_fsblock_zero(ip, imap);
     107             :         }
     108             : 
     109   283641784 :         if (imap->br_startblock == HOLESTARTBLOCK) {
     110   135828879 :                 iomap->addr = IOMAP_NULL_ADDR;
     111   135828879 :                 iomap->type = IOMAP_HOLE;
     112   147812905 :         } else if (imap->br_startblock == DELAYSTARTBLOCK ||
     113             :                    isnullstartblock(imap->br_startblock)) {
     114    58241463 :                 iomap->addr = IOMAP_NULL_ADDR;
     115    58241463 :                 iomap->type = IOMAP_DELALLOC;
     116             :         } else {
     117    89571442 :                 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
     118    89571596 :                 if (mapping_flags & IOMAP_DAX)
     119             :                         iomap->addr += target->bt_dax_part_off;
     120             : 
     121    89571596 :                 if (imap->br_state == XFS_EXT_UNWRITTEN)
     122    46858203 :                         iomap->type = IOMAP_UNWRITTEN;
     123             :                 else
     124    42713393 :                         iomap->type = IOMAP_MAPPED;
     125             : 
     126             :         }
     127   283641938 :         iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
     128   283641938 :         iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
     129   283641938 :         if (mapping_flags & IOMAP_DAX)
     130             :                 iomap->dax_dev = target->bt_daxdev;
     131             :         else
     132   283641938 :                 iomap->bdev = xfs_buftarg_bdev(target);
     133   283641938 :         iomap->flags = iomap_flags;
     134             : 
     135   283641938 :         if (xfs_ipincount(ip) &&
     136   144357547 :             (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
     137   119825805 :                 iomap->flags |= IOMAP_F_DIRTY;
     138             : 
     139   283641938 :         iomap->validity_cookie = sequence_cookie;
     140   283641938 :         iomap->folio_ops = &xfs_iomap_folio_ops;
     141   283641938 :         return 0;
     142             : }
     143             : 
     144             : static void
     145    18176462 : xfs_hole_to_iomap(
     146             :         struct xfs_inode        *ip,
     147             :         struct iomap            *iomap,
     148             :         xfs_fileoff_t           offset_fsb,
     149             :         xfs_fileoff_t           end_fsb)
     150             : {
     151    18176462 :         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
     152             : 
     153    18176462 :         iomap->addr = IOMAP_NULL_ADDR;
     154    18176462 :         iomap->type = IOMAP_HOLE;
     155    18176462 :         iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
     156    18176462 :         iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
     157    18176462 :         iomap->bdev = xfs_buftarg_bdev(target);
     158    18176462 :         iomap->dax_dev = target->bt_daxdev;
     159    18176462 : }
     160             : 
     161             : static inline xfs_fileoff_t
     162   309044782 : xfs_iomap_end_fsb(
     163             :         struct xfs_mount        *mp,
     164             :         loff_t                  offset,
     165             :         loff_t                  count)
     166             : {
     167   309044782 :         ASSERT(offset <= mp->m_super->s_maxbytes);
     168   309044782 :         return min(XFS_B_TO_FSB(mp, offset + count),
     169             :                    XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
     170             : }
     171             : 
     172             : static xfs_extlen_t
     173    10104747 : xfs_eof_alignment(
     174             :         struct xfs_inode        *ip)
     175             : {
     176    10104747 :         struct xfs_mount        *mp = ip->i_mount;
     177    10104747 :         xfs_extlen_t            align = 0;
     178             : 
     179    10104747 :         if (!XFS_IS_REALTIME_INODE(ip)) {
     180             :                 /*
     181             :                  * Round up the allocation request to a stripe unit
     182             :                  * (m_dalign) boundary if the file size is >= stripe unit
     183             :                  * size, and we are allocating past the allocation eof.
     184             :                  *
     185             :                  * If mounted with the "-o swalloc" option the alignment is
     186             :                  * increased from the strip unit size to the stripe width.
     187             :                  */
     188     7413742 :                 if (mp->m_swidth && xfs_has_swalloc(mp))
     189           0 :                         align = mp->m_swidth;
     190     7413742 :                 else if (mp->m_dalign)
     191       10048 :                         align = mp->m_dalign;
     192             : 
     193       20096 :                 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
     194        2651 :                         align = 0;
     195             :         }
     196             : 
     197    10104747 :         return align;
     198             : }
     199             : 
     200             : /*
     201             :  * Check if last_fsb is outside the last extent, and if so grow it to the next
     202             :  * stripe unit boundary.
     203             :  */
     204             : xfs_fileoff_t
     205     6727341 : xfs_iomap_eof_align_last_fsb(
     206             :         struct xfs_inode        *ip,
     207             :         xfs_fileoff_t           end_fsb)
     208             : {
     209     6727341 :         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
     210     6727341 :         xfs_extlen_t            extsz = xfs_get_extsz_hint(ip);
     211     6727349 :         xfs_extlen_t            align = xfs_eof_alignment(ip);
     212     6727349 :         struct xfs_bmbt_irec    irec;
     213     6727349 :         struct xfs_iext_cursor  icur;
     214             : 
     215     6727349 :         ASSERT(!xfs_need_iread_extents(ifp));
     216             : 
     217             :         /*
     218             :          * Always round up the allocation request to the extent hint boundary.
     219             :          */
     220     6727379 :         if (extsz) {
     221     5787751 :                 if (align)
     222           0 :                         align = roundup_64(align, extsz);
     223             :                 else
     224             :                         align = extsz;
     225             :         }
     226             : 
     227     6727379 :         if (align) {
     228     5788619 :                 xfs_fileoff_t   aligned_end_fsb = roundup_64(end_fsb, align);
     229             : 
     230     5788619 :                 xfs_iext_last(ifp, &icur);
     231     5788666 :                 if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
     232     5115330 :                     aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
     233     5708991 :                         return aligned_end_fsb;
     234             :         }
     235             : 
     236             :         return end_fsb;
     237             : }
     238             : 
     239             : int
     240    10911732 : xfs_iomap_write_direct(
     241             :         struct xfs_inode        *ip,
     242             :         xfs_fileoff_t           offset_fsb,
     243             :         xfs_fileoff_t           count_fsb,
     244             :         unsigned int            flags,
     245             :         struct xfs_bmbt_irec    *imap,
     246             :         u64                     *seq)
     247             : {
     248    10911732 :         struct xfs_mount        *mp = ip->i_mount;
     249    10911732 :         struct xfs_trans        *tp;
     250    10911732 :         xfs_filblks_t           resaligned;
     251    10911732 :         int                     nimaps;
     252    10911732 :         unsigned int            dblocks, rblocks;
     253    10911732 :         bool                    force = false;
     254    10911732 :         int                     error;
     255    10911732 :         int                     bmapi_flags = XFS_BMAPI_PREALLOC;
     256    10911732 :         int                     nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
     257             : 
     258    10911732 :         ASSERT(count_fsb > 0);
     259             : 
     260    10911732 :         resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
     261             :                                            xfs_get_extsz_hint(ip));
     262    10911892 :         if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
     263     6097082 :                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
     264     6097082 :                 rblocks = resaligned;
     265             :         } else {
     266     4814810 :                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
     267     4814810 :                 rblocks = 0;
     268             :         }
     269             : 
     270    10911892 :         error = xfs_qm_dqattach(ip);
     271    10911639 :         if (error)
     272             :                 return error;
     273             : 
     274             :         /*
     275             :          * For DAX, we do not allocate unwritten extents, but instead we zero
     276             :          * the block before we commit the transaction.  Ideally we'd like to do
     277             :          * this outside the transaction context, but if we commit and then crash
     278             :          * we may not have zeroed the blocks and this will be exposed on
     279             :          * recovery of the allocation. Hence we must zero before commit.
     280             :          *
     281             :          * Further, if we are mapping unwritten extents here, we need to zero
     282             :          * and convert them to written so that we don't need an unwritten extent
     283             :          * callback for DAX. This also means that we need to be able to dip into
     284             :          * the reserve block pool for bmbt block allocation if there is no space
     285             :          * left but we need to do unwritten extent conversion.
     286             :          */
     287    10911602 :         if (flags & IOMAP_DAX) {
     288             :                 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
     289             :                 if (imap->br_state == XFS_EXT_UNWRITTEN) {
     290             :                         force = true;
     291             :                         nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
     292             :                         dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
     293             :                 }
     294             :         }
     295             : 
     296    10911602 :         error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
     297             :                         rblocks, force, &tp);
     298    10911943 :         if (error)
     299             :                 return error;
     300             : 
     301    10778334 :         error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, nr_exts);
     302    10778364 :         if (error == -EFBIG)
     303           0 :                 error = xfs_iext_count_upgrade(tp, ip, nr_exts);
     304    10778364 :         if (error)
     305           0 :                 goto out_trans_cancel;
     306             : 
     307             :         /*
     308             :          * From this point onwards we overwrite the imap pointer that the
     309             :          * caller gave to us.
     310             :          */
     311    10778364 :         nimaps = 1;
     312    10778364 :         error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
     313             :                                 imap, &nimaps);
     314    10778499 :         if (error)
     315         147 :                 goto out_trans_cancel;
     316             : 
     317             :         /*
     318             :          * Complete the transaction
     319             :          */
     320    10778352 :         error = xfs_trans_commit(tp);
     321    10778433 :         if (error)
     322          95 :                 goto out_unlock;
     323             : 
     324             :         /*
     325             :          * Copy any maps to caller's array and return any error.
     326             :          */
     327    10778338 :         if (nimaps == 0) {
     328           0 :                 error = -ENOSPC;
     329           0 :                 goto out_unlock;
     330             :         }
     331             : 
     332    10778338 :         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
     333           0 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     334           0 :                 error = xfs_alert_fsblock_zero(ip, imap);
     335             :         }
     336             : 
     337    10761007 : out_unlock:
     338    10778580 :         *seq = xfs_iomap_inode_sequence(ip, 0);
     339    10778580 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     340    10778580 :         return error;
     341             : 
     342         147 : out_trans_cancel:
     343         147 :         xfs_trans_cancel(tp);
     344         147 :         goto out_unlock;
     345             : }
     346             : 
     347             : STATIC bool
     348      573199 : xfs_quota_need_throttle(
     349             :         struct xfs_inode        *ip,
     350             :         xfs_dqtype_t            type,
     351             :         xfs_fsblock_t           alloc_blocks)
     352             : {
     353      573199 :         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
     354             : 
     355      573199 :         if (!dq || !xfs_this_quota_on(ip->i_mount, type))
     356             :                 return false;
     357             : 
     358             :         /* no hi watermark, no throttle */
     359      560543 :         if (!dq->q_prealloc_hi_wmark)
     360             :                 return false;
     361             : 
     362             :         /* under the lo watermark, no throttle */
     363        3872 :         if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
     364         409 :                 return false;
     365             : 
     366             :         return true;
     367             : }
     368             : 
     369             : STATIC void
     370        3463 : xfs_quota_calc_throttle(
     371             :         struct xfs_inode        *ip,
     372             :         xfs_dqtype_t            type,
     373             :         xfs_fsblock_t           *qblocks,
     374             :         int                     *qshift,
     375             :         int64_t                 *qfreesp)
     376             : {
     377        3463 :         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
     378        3463 :         int64_t                 freesp;
     379        3463 :         int                     shift = 0;
     380             : 
     381             :         /* no dq, or over hi wmark, squash the prealloc completely */
     382        3463 :         if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
     383         305 :                 *qblocks = 0;
     384         305 :                 *qfreesp = 0;
     385         305 :                 return;
     386             :         }
     387             : 
     388        3158 :         freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
     389        3158 :         if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
     390         402 :                 shift = 2;
     391         402 :                 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
     392         366 :                         shift += 2;
     393         402 :                 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
     394         316 :                         shift += 2;
     395             :         }
     396             : 
     397        3158 :         if (freesp < *qfreesp)
     398        3154 :                 *qfreesp = freesp;
     399             : 
     400             :         /* only overwrite the throttle values if we are more aggressive */
     401        3158 :         if ((freesp >> shift) < (*qblocks >> *qshift)) {
     402         528 :                 *qblocks = freesp;
     403         528 :                 *qshift = shift;
     404             :         }
     405             : }
     406             : 
     407             : /*
     408             :  * If we don't have a user specified preallocation size, dynamically increase
     409             :  * the preallocation size as the size of the file grows.  Cap the maximum size
     410             :  * at a single extent or less if the filesystem is near full. The closer the
     411             :  * filesystem is to being full, the smaller the maximum preallocation.
     412             :  */
     413             : STATIC xfs_fsblock_t
     414     7350485 : xfs_iomap_prealloc_size(
     415             :         struct xfs_inode        *ip,
     416             :         int                     whichfork,
     417             :         loff_t                  offset,
     418             :         loff_t                  count,
     419             :         struct xfs_iext_cursor  *icur)
     420             : {
     421     7350485 :         struct xfs_iext_cursor  ncur = *icur;
     422     7350485 :         struct xfs_bmbt_irec    prev, got;
     423     7350485 :         struct xfs_mount        *mp = ip->i_mount;
     424     7350485 :         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, whichfork);
     425     7349580 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     426     7349580 :         int64_t                 freesp;
     427     7349580 :         xfs_fsblock_t           qblocks;
     428     7349580 :         xfs_fsblock_t           alloc_blocks = 0;
     429     7349580 :         xfs_extlen_t            plen;
     430     7349580 :         int                     shift = 0;
     431     7349580 :         int                     qshift = 0;
     432             : 
     433             :         /*
     434             :          * As an exception we don't do any preallocation at all if the file is
     435             :          * smaller than the minimum preallocation and we are using the default
     436             :          * dynamic preallocation scheme, as it is likely this is the only write
     437             :          * to the file that is going to be done.
     438             :          */
     439    14699160 :         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
     440             :                 return 0;
     441             : 
     442             :         /*
     443             :          * Use the minimum preallocation size for small files or if we are
     444             :          * writing right after a hole.
     445             :          */
     446     6733976 :         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
     447     3366639 :             !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
     448     3170099 :             prev.br_startoff + prev.br_blockcount < offset_fsb)
     449     3176224 :                 return mp->m_allocsize_blocks;
     450             : 
     451             :         /*
     452             :          * Take the size of the preceding data extents as the basis for the
     453             :          * preallocation size. Note that we don't care if the previous extents
     454             :          * are written or not.
     455             :          */
     456      191113 :         plen = prev.br_blockcount;
     457      210208 :         while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
     458      108748 :                 if (plen > XFS_MAX_BMBT_EXTLEN / 2 ||
     459      108745 :                     isnullstartblock(got.br_startblock) ||
     460       84436 :                     got.br_startoff + got.br_blockcount != prev.br_startoff ||
     461       43925 :                     got.br_startblock + got.br_blockcount != prev.br_startblock)
     462             :                         break;
     463       19095 :                 plen += got.br_blockcount;
     464       19095 :                 prev = got;
     465             :         }
     466             : 
     467             :         /*
     468             :          * If the size of the extents is greater than half the maximum extent
     469             :          * length, then use the current offset as the basis.  This ensures that
     470             :          * for large files the preallocation size always extends to
     471             :          * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
     472             :          * unit/width alignment of real extents.
     473             :          */
     474      191096 :         alloc_blocks = plen * 2;
     475      191096 :         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
     476           3 :                 alloc_blocks = XFS_B_TO_FSB(mp, offset);
     477      191096 :         qblocks = alloc_blocks;
     478             : 
     479             :         /*
     480             :          * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
     481             :          * down to the nearest power of two value after throttling. To prevent
     482             :          * the round down from unconditionally reducing the maximum supported
     483             :          * prealloc size, we round up first, apply appropriate throttling, round
     484             :          * down and cap the value to XFS_BMBT_MAX_EXTLEN.
     485             :          */
     486      191096 :         alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN),
     487             :                                        alloc_blocks);
     488             : 
     489      191096 :         freesp = percpu_counter_read_positive(&mp->m_fdblocks);
     490      191096 :         if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
     491        7361 :                 shift = 2;
     492        7361 :                 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
     493        7055 :                         shift++;
     494        7361 :                 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
     495        6614 :                         shift++;
     496        7361 :                 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
     497        6074 :                         shift++;
     498        7361 :                 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
     499        5084 :                         shift++;
     500             :         }
     501             : 
     502             :         /*
     503             :          * Check each quota to cap the prealloc size, provide a shift value to
     504             :          * throttle with and adjust amount of available space.
     505             :          */
     506      191096 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
     507        1192 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
     508             :                                         &freesp);
     509      191096 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
     510        1169 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
     511             :                                         &freesp);
     512      191096 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
     513        1102 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
     514             :                                         &freesp);
     515             : 
     516             :         /*
     517             :          * The final prealloc size is set to the minimum of free space available
     518             :          * in each of the quotas and the overall filesystem.
     519             :          *
     520             :          * The shift throttle value is set to the maximum value as determined by
     521             :          * the global low free space values and per-quota low free space values.
     522             :          */
     523      191096 :         alloc_blocks = min(alloc_blocks, qblocks);
     524      191096 :         shift = max(shift, qshift);
     525             : 
     526      191096 :         if (shift)
     527        7763 :                 alloc_blocks >>= shift;
     528             :         /*
     529             :          * rounddown_pow_of_two() returns an undefined result if we pass in
     530             :          * alloc_blocks = 0.
     531             :          */
     532      191096 :         if (alloc_blocks)
     533      189036 :                 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
     534      191096 :         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
     535           0 :                 alloc_blocks = XFS_MAX_BMBT_EXTLEN;
     536             : 
     537             :         /*
     538             :          * If we are still trying to allocate more space than is
     539             :          * available, squash the prealloc hard. This can happen if we
     540             :          * have a large file on a small filesystem and the above
     541             :          * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
     542             :          */
     543      194385 :         while (alloc_blocks && alloc_blocks >= freesp)
     544        3289 :                 alloc_blocks >>= 4;
     545      191096 :         if (alloc_blocks < mp->m_allocsize_blocks)
     546             :                 alloc_blocks = mp->m_allocsize_blocks;
     547      191096 :         trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
     548             :                                       mp->m_allocsize_blocks);
     549      191096 :         return alloc_blocks;
     550             : }
     551             : 
     552             : int
     553    24552886 : xfs_iomap_write_unwritten(
     554             :         xfs_inode_t     *ip,
     555             :         xfs_off_t       offset,
     556             :         xfs_off_t       count,
     557             :         bool            update_isize)
     558             : {
     559    24552886 :         xfs_mount_t     *mp = ip->i_mount;
     560    24552886 :         xfs_fileoff_t   offset_fsb;
     561    24552886 :         xfs_filblks_t   count_fsb;
     562    24552886 :         xfs_filblks_t   numblks_fsb;
     563    24552886 :         int             nimaps;
     564    24552886 :         xfs_trans_t     *tp;
     565    24552886 :         xfs_bmbt_irec_t imap;
     566    24552886 :         struct inode    *inode = VFS_I(ip);
     567    24552886 :         xfs_fsize_t     i_size;
     568    24552886 :         uint            resblks;
     569    24552886 :         int             error;
     570             : 
     571    24552886 :         trace_xfs_unwritten_convert(ip, offset, count);
     572             : 
     573    24552740 :         offset_fsb = XFS_B_TO_FSBT(mp, offset);
     574    24552740 :         count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
     575    24552740 :         count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
     576             : 
     577             :         /*
     578             :          * Reserve enough blocks in this transaction for two complete extent
     579             :          * btree splits.  We may be converting the middle part of an unwritten
     580             :          * extent and in this case we will insert two new extents in the btree
     581             :          * each of which could cause a full split.
     582             :          *
     583             :          * This reservation amount will be used in the first call to
     584             :          * xfs_bmbt_split() to select an AG with enough space to satisfy the
     585             :          * rest of the operation.
     586             :          */
     587    24552740 :         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
     588             : 
     589             :         /* Attach dquots so that bmbt splits are accounted correctly. */
     590    24552740 :         error = xfs_qm_dqattach(ip);
     591    24552770 :         if (error)
     592             :                 return error;
     593             : 
     594    25038073 :         do {
     595             :                 /*
     596             :                  * Set up a transaction to convert the range of extents
     597             :                  * from unwritten to real. Do allocations in a loop until
     598             :                  * we have covered the range passed in.
     599             :                  *
     600             :                  * Note that we can't risk to recursing back into the filesystem
     601             :                  * here as we might be asked to write out the same inode that we
     602             :                  * complete here and might deadlock on the iolock.
     603             :                  */
     604    25038073 :                 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
     605             :                                 0, true, &tp);
     606    25037494 :                 if (error)
     607          15 :                         return error;
     608             : 
     609    25037479 :                 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
     610             :                                 XFS_IEXT_WRITE_UNWRITTEN_CNT);
     611    25037437 :                 if (error == -EFBIG)
     612           7 :                         error = xfs_iext_count_upgrade(tp, ip,
     613             :                                         XFS_IEXT_WRITE_UNWRITTEN_CNT);
     614    25037437 :                 if (error)
     615           7 :                         goto error_on_bmapi_transaction;
     616             : 
     617             :                 /*
     618             :                  * Modify the unwritten extent state of the buffer.
     619             :                  */
     620    25037430 :                 nimaps = 1;
     621    25037430 :                 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
     622             :                                         XFS_BMAPI_CONVERT, resblks, &imap,
     623             :                                         &nimaps);
     624    25038572 :                 if (error)
     625          56 :                         goto error_on_bmapi_transaction;
     626             : 
     627             :                 /*
     628             :                  * Log the updated inode size as we go.  We have to be careful
     629             :                  * to only log it up to the actual write offset if it is
     630             :                  * halfway into a block.
     631             :                  */
     632    25038516 :                 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
     633    25038516 :                 if (i_size > offset + count)
     634             :                         i_size = offset + count;
     635    25038516 :                 if (update_isize && i_size > i_size_read(inode))
     636     1683785 :                         i_size_write(inode, i_size);
     637    25038516 :                 i_size = xfs_new_eof(ip, i_size);
     638    10544542 :                 if (i_size) {
     639    10544516 :                         ip->i_disk_size = i_size;
     640    10544516 :                         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
     641             :                 }
     642             : 
     643    25038522 :                 error = xfs_trans_commit(tp);
     644    25037682 :                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
     645    25037464 :                 if (error)
     646         430 :                         return error;
     647             : 
     648    25037034 :                 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) {
     649           0 :                         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     650           0 :                         return xfs_alert_fsblock_zero(ip, &imap);
     651             :                 }
     652             : 
     653    25037034 :                 if ((numblks_fsb = imap.br_blockcount) == 0) {
     654             :                         /*
     655             :                          * The numblks_fsb value should always get
     656             :                          * smaller, otherwise the loop is stuck.
     657             :                          */
     658           0 :                         ASSERT(imap.br_blockcount);
     659             :                         break;
     660             :                 }
     661    25037034 :                 offset_fsb += numblks_fsb;
     662    25037034 :                 count_fsb -= numblks_fsb;
     663    25037034 :         } while (count_fsb > 0);
     664             : 
     665             :         return 0;
     666             : 
     667          63 : error_on_bmapi_transaction:
     668          63 :         xfs_trans_cancel(tp);
     669          63 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     670          63 :         return error;
     671             : }
     672             : 
     673             : static inline bool
     674             : imap_needs_alloc(
     675             :         struct inode            *inode,
     676             :         unsigned                flags,
     677             :         struct xfs_bmbt_irec    *imap,
     678             :         int                     nimaps)
     679             : {
     680             :         /* don't allocate blocks when just zeroing */
     681    22888020 :         if (flags & IOMAP_ZERO)
     682             :                 return false;
     683    15927742 :         if (!nimaps ||
     684    15927886 :             imap->br_startblock == HOLESTARTBLOCK ||
     685             :             imap->br_startblock == DELAYSTARTBLOCK)
     686    11515827 :                 return true;
     687             :         /* we convert unwritten extents before copying the data for DAX */
     688             :         if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
     689             :                 return true;
     690             :         return false;
     691             : }
     692             : 
     693             : static inline bool
     694    24297649 : imap_needs_cow(
     695             :         struct xfs_inode        *ip,
     696             :         unsigned int            flags,
     697             :         struct xfs_bmbt_irec    *imap,
     698             :         int                     nimaps)
     699             : {
     700    24297649 :         if (!xfs_is_cow_inode(ip))
     701             :                 return false;
     702             : 
     703             :         /* when zeroing we don't have to COW holes or unwritten extents */
     704     3057603 :         if (flags & IOMAP_ZERO) {
     705           0 :                 if (!nimaps ||
     706           0 :                     imap->br_startblock == HOLESTARTBLOCK ||
     707           0 :                     imap->br_state == XFS_EXT_UNWRITTEN)
     708           0 :                         return false;
     709             :         }
     710             : 
     711             :         return true;
     712             : }
     713             : 
     714             : static int
     715   263243587 : xfs_ilock_for_iomap(
     716             :         struct xfs_inode        *ip,
     717             :         unsigned                flags,
     718             :         unsigned                *lockmode)
     719             : {
     720   263243587 :         unsigned int            mode = *lockmode;
     721   263243587 :         bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
     722             : 
     723             :         /*
     724             :          * COW writes may allocate delalloc space or convert unwritten COW
     725             :          * extents, so we need to make sure to take the lock exclusively here.
     726             :          */
     727   263243587 :         if (xfs_is_cow_inode(ip) && is_write)
     728    35307455 :                 mode = XFS_ILOCK_EXCL;
     729             : 
     730             :         /*
     731             :          * Extents not yet cached requires exclusive access, don't block.  This
     732             :          * is an opencoded xfs_ilock_data_map_shared() call but with
     733             :          * non-blocking behaviour.
     734             :          */
     735   263243587 :         if (xfs_need_iread_extents(&ip->i_df)) {
     736      998590 :                 if (flags & IOMAP_NOWAIT)
     737             :                         return -EAGAIN;
     738             :                 mode = XFS_ILOCK_EXCL;
     739             :         }
     740             : 
     741   262251222 : relock:
     742   263137023 :         if (flags & IOMAP_NOWAIT) {
     743           0 :                 if (!xfs_ilock_nowait(ip, mode))
     744             :                         return -EAGAIN;
     745             :         } else {
     746   263137023 :                 xfs_ilock(ip, mode);
     747             :         }
     748             : 
     749             :         /*
     750             :          * The reflink iflag could have changed since the earlier unlocked
     751             :          * check, so if we got ILOCK_SHARED for a write and but we're now a
     752             :          * reflink inode we have to switch to ILOCK_EXCL and relock.
     753             :          */
     754   263269381 :         if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
     755           0 :                 xfs_iunlock(ip, mode);
     756        7837 :                 mode = XFS_ILOCK_EXCL;
     757        7837 :                 goto relock;
     758             :         }
     759             : 
     760   263269381 :         *lockmode = mode;
     761   263269381 :         return 0;
     762             : }
     763             : 
     764             : /*
     765             :  * Check that the imap we are going to return to the caller spans the entire
     766             :  * range that the caller requested for the IO.
     767             :  */
     768             : static bool
     769             : imap_spans_range(
     770             :         struct xfs_bmbt_irec    *imap,
     771             :         xfs_fileoff_t           offset_fsb,
     772             :         xfs_fileoff_t           end_fsb)
     773             : {
     774      270705 :         if (imap->br_startoff > offset_fsb)
     775             :                 return false;
     776      270705 :         if (imap->br_startoff + imap->br_blockcount < end_fsb)
     777             :                 return false;
     778             :         return true;
     779             : }
     780             : 
     781             : static int
     782    24298072 : xfs_direct_write_iomap_begin(
     783             :         struct inode            *inode,
     784             :         loff_t                  offset,
     785             :         loff_t                  length,
     786             :         unsigned                flags,
     787             :         struct iomap            *iomap,
     788             :         struct iomap            *srcmap)
     789             : {
     790    24298072 :         struct xfs_inode        *ip = XFS_I(inode);
     791    24298072 :         struct xfs_mount        *mp = ip->i_mount;
     792    24298072 :         struct xfs_bmbt_irec    imap, cmap;
     793    24298072 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     794    24298072 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
     795    24298223 :         int                     nimaps = 1, error = 0;
     796    24298223 :         bool                    shared = false;
     797    24298223 :         u16                     iomap_flags = 0;
     798    24298223 :         unsigned int            lockmode = XFS_ILOCK_SHARED;
     799    24298223 :         u64                     seq;
     800             : 
     801    24298223 :         ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
     802             : 
     803    48596446 :         if (xfs_is_shutdown(mp))
     804             :                 return -EIO;
     805             : 
     806             :         /*
     807             :          * Writes that span EOF might trigger an IO size update on completion,
     808             :          * so consider them to be dirty for the purposes of O_DSYNC even if
     809             :          * there is no other metadata changes pending or have been made here.
     810             :          */
     811    24298213 :         if (offset + length > i_size_read(inode))
     812    12699644 :                 iomap_flags |= IOMAP_F_DIRTY;
     813             : 
     814    24298213 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
     815    24298063 :         if (error)
     816             :                 return error;
     817             : 
     818    24298158 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
     819             :                                &nimaps, 0);
     820    24297890 :         if (error)
     821          11 :                 goto out_unlock;
     822             : 
     823    24297879 :         if (imap_needs_cow(ip, flags, &imap, nimaps)) {
     824     3057603 :                 error = -EAGAIN;
     825     3057603 :                 if (flags & IOMAP_NOWAIT)
     826           0 :                         goto out_unlock;
     827             : 
     828             :                 /* may drop and re-acquire the ilock */
     829     3057603 :                 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
     830             :                                 &lockmode,
     831     3057603 :                                 (flags & IOMAP_DIRECT) || IS_DAX(inode));
     832     3057603 :                 if (error)
     833          31 :                         goto out_unlock;
     834     3057572 :                 if (shared)
     835     1409828 :                         goto out_found_cow;
     836     1647744 :                 end_fsb = imap.br_startoff + imap.br_blockcount;
     837     1647744 :                 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
     838             :         }
     839             : 
     840    22888020 :         if (imap_needs_alloc(inode, flags, &imap, nimaps))
     841    11515827 :                 goto allocate_blocks;
     842             : 
     843             :         /*
     844             :          * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
     845             :          * a single map so that we avoid partial IO failures due to the rest of
     846             :          * the I/O range not covered by this map triggering an EAGAIN condition
     847             :          * when it is subsequently mapped and aborting the I/O.
     848             :          */
     849    11372193 :         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
     850      270705 :                 error = -EAGAIN;
     851      270705 :                 if (!imap_spans_range(&imap, offset_fsb, end_fsb))
     852       74855 :                         goto out_unlock;
     853             :         }
     854             : 
     855             :         /*
     856             :          * For overwrite only I/O, we cannot convert unwritten extents without
     857             :          * requiring sub-block zeroing.  This can only be done under an
     858             :          * exclusive IOLOCK, hence return -EAGAIN if this is not a written
     859             :          * extent to tell the caller to try again.
     860             :          */
     861    11297338 :         if (flags & IOMAP_OVERWRITE_ONLY) {
     862      195859 :                 error = -EAGAIN;
     863      195859 :                 if (imap.br_state != XFS_EXT_NORM &&
     864       81750 :                     ((offset | length) & mp->m_blockmask))
     865       81751 :                         goto out_unlock;
     866             :         }
     867             : 
     868    11215587 :         seq = xfs_iomap_inode_sequence(ip, iomap_flags);
     869    11215540 :         xfs_iunlock(ip, lockmode);
     870    11215619 :         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
     871    11215533 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
     872             : 
     873             : allocate_blocks:
     874    11515827 :         error = -EAGAIN;
     875    11515827 :         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
     876      604031 :                 goto out_unlock;
     877             : 
     878             :         /*
     879             :          * We cap the maximum length we map to a sane size  to keep the chunks
     880             :          * of work done where somewhat symmetric with the work writeback does.
     881             :          * This is a completely arbitrary number pulled out of thin air as a
     882             :          * best guess for initial testing.
     883             :          *
     884             :          * Note that the values needs to be less than 32-bits wide until the
     885             :          * lower level functions are updated.
     886             :          */
     887    10911796 :         length = min_t(loff_t, length, 1024 * PAGE_SIZE);
     888    10911796 :         end_fsb = xfs_iomap_end_fsb(mp, offset, length);
     889             : 
     890    21823668 :         if (offset + length > XFS_ISIZE(ip))
     891     6727231 :                 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
     892     4184603 :         else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
     893     4184595 :                 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
     894    10911784 :         xfs_iunlock(ip, lockmode);
     895             : 
     896    10911765 :         error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
     897             :                         flags, &imap, &seq);
     898    10912079 :         if (error)
     899             :                 return error;
     900             : 
     901    10778339 :         trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
     902    10778338 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
     903             :                                  iomap_flags | IOMAP_F_NEW, seq);
     904             : 
     905             : out_found_cow:
     906     1409828 :         length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
     907     1409828 :         trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
     908     1409828 :         if (imap.br_startblock != HOLESTARTBLOCK) {
     909     1402439 :                 seq = xfs_iomap_inode_sequence(ip, 0);
     910     1402439 :                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
     911     1402439 :                 if (error)
     912           0 :                         goto out_unlock;
     913             :         }
     914     1409828 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
     915     1409828 :         xfs_iunlock(ip, lockmode);
     916     1409828 :         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
     917             : 
     918      760679 : out_unlock:
     919      760679 :         if (lockmode)
     920      760670 :                 xfs_iunlock(ip, lockmode);
     921             :         return error;
     922             : }
     923             : 
     924             : const struct iomap_ops xfs_direct_write_iomap_ops = {
     925             :         .iomap_begin            = xfs_direct_write_iomap_begin,
     926             : };
     927             : 
     928             : static int
     929           0 : xfs_dax_write_iomap_end(
     930             :         struct inode            *inode,
     931             :         loff_t                  pos,
     932             :         loff_t                  length,
     933             :         ssize_t                 written,
     934             :         unsigned                flags,
     935             :         struct iomap            *iomap)
     936             : {
     937           0 :         struct xfs_inode        *ip = XFS_I(inode);
     938             : 
     939           0 :         if (!xfs_is_cow_inode(ip))
     940             :                 return 0;
     941             : 
     942           0 :         if (!written) {
     943           0 :                 xfs_reflink_cancel_cow_range(ip, pos, length, true);
     944           0 :                 return 0;
     945             :         }
     946             : 
     947           0 :         return xfs_reflink_end_cow(ip, pos, written);
     948             : }
     949             : 
     950             : const struct iomap_ops xfs_dax_write_iomap_ops = {
     951             :         .iomap_begin    = xfs_direct_write_iomap_begin,
     952             :         .iomap_end      = xfs_dax_write_iomap_end,
     953             : };
     954             : 
     955             : static int
     956   103570718 : xfs_buffered_write_iomap_begin(
     957             :         struct inode            *inode,
     958             :         loff_t                  offset,
     959             :         loff_t                  count,
     960             :         unsigned                flags,
     961             :         struct iomap            *iomap,
     962             :         struct iomap            *srcmap)
     963             : {
     964   103570718 :         struct xfs_inode        *ip = XFS_I(inode);
     965   103570718 :         struct xfs_mount        *mp = ip->i_mount;
     966   103570718 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     967   103570718 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, count);
     968   103566593 :         struct xfs_bmbt_irec    imap, cmap;
     969   103566593 :         struct xfs_iext_cursor  icur, ccur;
     970   103566593 :         xfs_fsblock_t           prealloc_blocks = 0;
     971   103566593 :         bool                    eof = false, cow_eof = false, shared = false;
     972   103566593 :         int                     allocfork = XFS_DATA_FORK;
     973   103566593 :         int                     error = 0;
     974   103566593 :         unsigned int            lockmode = XFS_ILOCK_EXCL;
     975   103566593 :         u64                     seq;
     976             : 
     977   207133186 :         if (xfs_is_shutdown(mp))
     978             :                 return -EIO;
     979             : 
     980             :         /* we can't use delayed allocations when using extent size hints */
     981   103566497 :         if (xfs_get_extsz_hint(ip))
     982    16521966 :                 return xfs_direct_write_iomap_begin(inode, offset, count,
     983             :                                 flags, iomap, srcmap);
     984             : 
     985    87059380 :         ASSERT(!XFS_IS_REALTIME_INODE(ip));
     986             : 
     987    87059380 :         error = xfs_qm_dqattach(ip);
     988    87084778 :         if (error)
     989             :                 return error;
     990             : 
     991    87085781 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
     992    87086380 :         if (error)
     993             :                 return error;
     994             : 
     995   174172929 :         if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
     996    87086380 :             XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
     997         980 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     998           0 :                 error = -EFSCORRUPTED;
     999           0 :                 goto out_unlock;
    1000             :         }
    1001             : 
    1002    87085729 :         XFS_STATS_INC(mp, xs_blk_mapw);
    1003             : 
    1004    87085729 :         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
    1005    87086696 :         if (error)
    1006           2 :                 goto out_unlock;
    1007             : 
    1008             :         /*
    1009             :          * Search the data fork first to look up our source mapping.  We
    1010             :          * always need the data fork map, as we have to return it to the
    1011             :          * iomap code so that the higher level write code can read data in to
    1012             :          * perform read-modify-write cycles for unaligned writes.
    1013             :          */
    1014    87086694 :         eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
    1015    87064297 :         if (eof)
    1016    18557799 :                 imap.br_startoff = end_fsb; /* fake hole until the end */
    1017             : 
    1018             :         /* We never need to allocate blocks for zeroing or unsharing a hole. */
    1019    87064297 :         if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) &&
    1020    35034193 :             imap.br_startoff > offset_fsb) {
    1021    18176394 :                 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
    1022    18176378 :                 goto out_unlock;
    1023             :         }
    1024             : 
    1025             :         /*
    1026             :          * Search the COW fork extent list even if we did not find a data fork
    1027             :          * extent.  This serves two purposes: first this implements the
    1028             :          * speculative preallocation using cowextsize, so that we also unshare
    1029             :          * block adjacent to shared blocks instead of just the shared blocks
    1030             :          * themselves.  Second the lookup in the extent list is generally faster
    1031             :          * than going out to the shared extent tree.
    1032             :          */
    1033    68887903 :         if (xfs_is_cow_inode(ip)) {
    1034    21520895 :                 if (!ip->i_cowfp) {
    1035           0 :                         ASSERT(!xfs_is_reflink_inode(ip));
    1036           0 :                         xfs_ifork_init_cow(ip);
    1037             :                 }
    1038    21520895 :                 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
    1039    21520846 :                                 &ccur, &cmap);
    1040    21520846 :                 if (!cow_eof && cmap.br_startoff <= offset_fsb) {
    1041      921651 :                         trace_xfs_reflink_cow_found(ip, &cmap);
    1042      921756 :                         goto found_cow;
    1043             :                 }
    1044             :         }
    1045             : 
    1046    67966203 :         if (imap.br_startoff <= offset_fsb) {
    1047             :                 /*
    1048             :                  * For reflink files we may need a delalloc reservation when
    1049             :                  * overwriting shared extents.   This includes zeroing of
    1050             :                  * existing extents that contain data.
    1051             :                  */
    1052    49473394 :                 if (!xfs_is_cow_inode(ip) ||
    1053     8456640 :                     ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
    1054    43068085 :                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
    1055             :                                         &imap);
    1056    43061564 :                         goto found_imap;
    1057             :                 }
    1058             : 
    1059     6405309 :                 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
    1060             : 
    1061             :                 /* Trim the mapping to the nearest shared extent boundary. */
    1062     6405276 :                 error = xfs_bmap_trim_cow(ip, &imap, &shared);
    1063     6405308 :                 if (error)
    1064          88 :                         goto out_unlock;
    1065             : 
    1066             :                 /* Not shared?  Just report the (potentially capped) extent. */
    1067     6405220 :                 if (!shared) {
    1068     6244328 :                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
    1069             :                                         &imap);
    1070     6244303 :                         goto found_imap;
    1071             :                 }
    1072             : 
    1073             :                 /*
    1074             :                  * Fork all the shared blocks from our write offset until the
    1075             :                  * end of the extent.
    1076             :                  */
    1077      160892 :                 allocfork = XFS_COW_FORK;
    1078      160892 :                 end_fsb = imap.br_startoff + imap.br_blockcount;
    1079             :         } else {
    1080             :                 /*
    1081             :                  * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
    1082             :                  * pages to keep the chunks of work done where somewhat
    1083             :                  * symmetric with the work writeback does.  This is a completely
    1084             :                  * arbitrary number pulled out of thin air.
    1085             :                  *
    1086             :                  * Note that the values needs to be less than 32-bits wide until
    1087             :                  * the lower level functions are updated.
    1088             :                  */
    1089    18492809 :                 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
    1090    18492809 :                 end_fsb = xfs_iomap_end_fsb(mp, offset, count);
    1091             : 
    1092    18512325 :                 if (xfs_is_always_cow_inode(ip))
    1093           0 :                         allocfork = XFS_COW_FORK;
    1094             :         }
    1095             : 
    1096    28441380 :         if (eof && offset + count > XFS_ISIZE(ip)) {
    1097             :                 /*
    1098             :                  * Determine the initial size of the preallocation.
    1099             :                  * We clean up any extra preallocation when the file is closed.
    1100             :                  */
    1101     7359769 :                 if (xfs_has_allocsize(mp))
    1102       10022 :                         prealloc_blocks = mp->m_allocsize_blocks;
    1103     7349747 :                 else if (allocfork == XFS_DATA_FORK)
    1104     7349747 :                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
    1105             :                                                 offset, count, &icur);
    1106             :                 else
    1107           0 :                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
    1108             :                                                 offset, count, &ccur);
    1109     7358714 :                 if (prealloc_blocks) {
    1110     3377258 :                         xfs_extlen_t    align;
    1111     3377258 :                         xfs_off_t       end_offset;
    1112     3377258 :                         xfs_fileoff_t   p_end_fsb;
    1113             : 
    1114     3377258 :                         end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
    1115     3377258 :                         p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
    1116             :                                         prealloc_blocks;
    1117             : 
    1118     3377258 :                         align = xfs_eof_alignment(ip);
    1119     3377258 :                         if (align)
    1120        6456 :                                 p_end_fsb = roundup_64(p_end_fsb, align);
    1121             : 
    1122     3377258 :                         p_end_fsb = min(p_end_fsb,
    1123             :                                 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
    1124     3377258 :                         ASSERT(p_end_fsb > offset_fsb);
    1125     3377258 :                         prealloc_blocks = p_end_fsb - end_fsb;
    1126             :                 }
    1127             :         }
    1128             : 
    1129    15294904 : retry:
    1130    19289220 :         error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
    1131             :                         end_fsb - offset_fsb, prealloc_blocks,
    1132             :                         allocfork == XFS_DATA_FORK ? &imap : &cmap,
    1133             :                         allocfork == XFS_DATA_FORK ? &icur : &ccur,
    1134             :                         allocfork == XFS_DATA_FORK ? eof : cow_eof);
    1135    18796481 :         switch (error) {
    1136             :         case 0:
    1137    18061971 :                 break;
    1138      734513 :         case -ENOSPC:
    1139             :         case -EDQUOT:
    1140             :                 /* retry without any preallocation */
    1141      734513 :                 trace_xfs_delalloc_enospc(ip, offset, count);
    1142      734544 :                 if (prealloc_blocks) {
    1143      134382 :                         prealloc_blocks = 0;
    1144      134382 :                         goto retry;
    1145             :                 }
    1146      600159 :                 fallthrough;
    1147             :         default:
    1148      600159 :                 goto out_unlock;
    1149             :         }
    1150             : 
    1151    18061971 :         if (allocfork == XFS_COW_FORK) {
    1152      160436 :                 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
    1153      160436 :                 goto found_cow;
    1154             :         }
    1155             : 
    1156             :         /*
    1157             :          * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
    1158             :          * them out if the write happens to fail.
    1159             :          */
    1160    17901535 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW);
    1161    17901535 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1162    17910394 :         trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
    1163    17911897 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq);
    1164             : 
    1165    49305867 : found_imap:
    1166    49305867 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1167    49305867 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1168    49328566 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
    1169             : 
    1170     1082192 : found_cow:
    1171     1082192 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1172     1082192 :         if (imap.br_startoff <= offset_fsb) {
    1173     1028863 :                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
    1174     1028823 :                 if (error)
    1175           0 :                         goto out_unlock;
    1176     1028823 :                 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
    1177     1028823 :                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1178     1028502 :                 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
    1179             :                                          IOMAP_F_SHARED, seq);
    1180             :         }
    1181             : 
    1182       53329 :         xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
    1183       53329 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1184       53329 :         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq);
    1185             : 
    1186    18776627 : out_unlock:
    1187    18776627 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1188    18776627 :         return error;
    1189             : }
    1190             : 
    1191             : static int
    1192        5736 : xfs_buffered_write_delalloc_punch(
    1193             :         struct inode            *inode,
    1194             :         loff_t                  offset,
    1195             :         loff_t                  length)
    1196             : {
    1197        5736 :         return xfs_bmap_punch_delalloc_range(XFS_I(inode), offset,
    1198             :                         offset + length);
    1199             : }
    1200             : 
    1201             : static int
    1202    97681642 : xfs_buffered_write_iomap_end(
    1203             :         struct inode            *inode,
    1204             :         loff_t                  offset,
    1205             :         loff_t                  length,
    1206             :         ssize_t                 written,
    1207             :         unsigned                flags,
    1208             :         struct iomap            *iomap)
    1209             : {
    1210             : 
    1211    97681642 :         struct xfs_mount        *mp = XFS_M(inode->i_sb);
    1212    97681642 :         int                     error;
    1213             : 
    1214    97681642 :         error = iomap_file_buffered_write_punch_delalloc(inode, iomap, offset,
    1215             :                         length, written, &xfs_buffered_write_delalloc_punch);
    1216    97717175 :         if (error && !xfs_is_shutdown(mp)) {
    1217           0 :                 xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
    1218             :                         __func__, XFS_I(inode)->i_ino);
    1219           0 :                 return error;
    1220             :         }
    1221             :         return 0;
    1222             : }
    1223             : 
    1224             : const struct iomap_ops xfs_buffered_write_iomap_ops = {
    1225             :         .iomap_begin            = xfs_buffered_write_iomap_begin,
    1226             :         .iomap_end              = xfs_buffered_write_iomap_end,
    1227             : };
    1228             : 
    1229             : /*
    1230             :  * iomap_page_mkwrite() will never fail in a way that requires delalloc extents
    1231             :  * that it allocated to be revoked. Hence we do not need an .iomap_end method
    1232             :  * for this operation.
    1233             :  */
    1234             : const struct iomap_ops xfs_page_mkwrite_iomap_ops = {
    1235             :         .iomap_begin            = xfs_buffered_write_iomap_begin,
    1236             : };
    1237             : 
    1238             : static int
    1239   151890284 : xfs_read_iomap_begin(
    1240             :         struct inode            *inode,
    1241             :         loff_t                  offset,
    1242             :         loff_t                  length,
    1243             :         unsigned                flags,
    1244             :         struct iomap            *iomap,
    1245             :         struct iomap            *srcmap)
    1246             : {
    1247   151890284 :         struct xfs_inode        *ip = XFS_I(inode);
    1248   151890284 :         struct xfs_mount        *mp = ip->i_mount;
    1249   151890284 :         struct xfs_bmbt_irec    imap;
    1250   151890284 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1251   151890284 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
    1252   151893830 :         int                     nimaps = 1, error = 0;
    1253   151893830 :         bool                    shared = false;
    1254   151893830 :         unsigned int            lockmode = XFS_ILOCK_SHARED;
    1255   151893830 :         u64                     seq;
    1256             : 
    1257   151893830 :         ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
    1258             : 
    1259   303787660 :         if (xfs_is_shutdown(mp))
    1260             :                 return -EIO;
    1261             : 
    1262   151893452 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
    1263   151894339 :         if (error)
    1264             :                 return error;
    1265   151894024 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
    1266             :                                &nimaps, 0);
    1267   151891790 :         if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
    1268     2691663 :                 error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
    1269   303668267 :         seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
    1270   151892984 :         xfs_iunlock(ip, lockmode);
    1271             : 
    1272   151893517 :         if (error)
    1273             :                 return error;
    1274   151893726 :         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
    1275   151893482 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
    1276   151893482 :                                  shared ? IOMAP_F_SHARED : 0, seq);
    1277             : }
    1278             : 
    1279             : const struct iomap_ops xfs_read_iomap_ops = {
    1280             :         .iomap_begin            = xfs_read_iomap_begin,
    1281             : };
    1282             : 
    1283             : static int
    1284      414001 : xfs_seek_iomap_begin(
    1285             :         struct inode            *inode,
    1286             :         loff_t                  offset,
    1287             :         loff_t                  length,
    1288             :         unsigned                flags,
    1289             :         struct iomap            *iomap,
    1290             :         struct iomap            *srcmap)
    1291             : {
    1292      414001 :         struct xfs_inode        *ip = XFS_I(inode);
    1293      414001 :         struct xfs_mount        *mp = ip->i_mount;
    1294      414001 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1295      414001 :         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
    1296      414001 :         xfs_fileoff_t           cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
    1297      414001 :         struct xfs_iext_cursor  icur;
    1298      414001 :         struct xfs_bmbt_irec    imap, cmap;
    1299      414001 :         int                     error = 0;
    1300      414001 :         unsigned                lockmode;
    1301      414001 :         u64                     seq;
    1302             : 
    1303      828002 :         if (xfs_is_shutdown(mp))
    1304             :                 return -EIO;
    1305             : 
    1306      414001 :         lockmode = xfs_ilock_data_map_shared(ip);
    1307      414001 :         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
    1308      414001 :         if (error)
    1309           0 :                 goto out_unlock;
    1310             : 
    1311      414001 :         if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
    1312             :                 /*
    1313             :                  * If we found a data extent we are done.
    1314             :                  */
    1315      405068 :                 if (imap.br_startoff <= offset_fsb)
    1316      210739 :                         goto done;
    1317             :                 data_fsb = imap.br_startoff;
    1318             :         } else {
    1319             :                 /*
    1320             :                  * Fake a hole until the end of the file.
    1321             :                  */
    1322        8933 :                 data_fsb = xfs_iomap_end_fsb(mp, offset, length);
    1323             :         }
    1324             : 
    1325             :         /*
    1326             :          * If a COW fork extent covers the hole, report it - capped to the next
    1327             :          * data fork extent:
    1328             :          */
    1329      407068 :         if (xfs_inode_has_cow_data(ip) &&
    1330         544 :             xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
    1331         527 :                 cow_fsb = cmap.br_startoff;
    1332      203262 :         if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
    1333         167 :                 if (data_fsb < cow_fsb + cmap.br_blockcount)
    1334          42 :                         end_fsb = min(end_fsb, data_fsb);
    1335         167 :                 xfs_trim_extent(&cmap, offset_fsb, end_fsb);
    1336         167 :                 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
    1337         167 :                 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
    1338             :                                 IOMAP_F_SHARED, seq);
    1339             :                 /*
    1340             :                  * This is a COW extent, so we must probe the page cache
    1341             :                  * because there could be dirty page cache being backed
    1342             :                  * by this extent.
    1343             :                  */
    1344         167 :                 iomap->type = IOMAP_UNWRITTEN;
    1345         167 :                 goto out_unlock;
    1346             :         }
    1347             : 
    1348             :         /*
    1349             :          * Else report a hole, capped to the next found data or COW extent.
    1350             :          */
    1351      203095 :         if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
    1352          52 :                 imap.br_blockcount = cow_fsb - offset_fsb;
    1353             :         else
    1354      203043 :                 imap.br_blockcount = data_fsb - offset_fsb;
    1355      203095 :         imap.br_startoff = offset_fsb;
    1356      203095 :         imap.br_startblock = HOLESTARTBLOCK;
    1357      203095 :         imap.br_state = XFS_EXT_NORM;
    1358      413834 : done:
    1359      413834 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1360      413834 :         xfs_trim_extent(&imap, offset_fsb, end_fsb);
    1361      413834 :         error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
    1362      414001 : out_unlock:
    1363      414001 :         xfs_iunlock(ip, lockmode);
    1364      414001 :         return error;
    1365             : }
    1366             : 
    1367             : const struct iomap_ops xfs_seek_iomap_ops = {
    1368             :         .iomap_begin            = xfs_seek_iomap_begin,
    1369             : };
    1370             : 
    1371             : static int
    1372      255351 : xfs_xattr_iomap_begin(
    1373             :         struct inode            *inode,
    1374             :         loff_t                  offset,
    1375             :         loff_t                  length,
    1376             :         unsigned                flags,
    1377             :         struct iomap            *iomap,
    1378             :         struct iomap            *srcmap)
    1379             : {
    1380      255351 :         struct xfs_inode        *ip = XFS_I(inode);
    1381      255351 :         struct xfs_mount        *mp = ip->i_mount;
    1382      255351 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1383      255351 :         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
    1384      255351 :         struct xfs_bmbt_irec    imap;
    1385      255351 :         int                     nimaps = 1, error = 0;
    1386      255351 :         unsigned                lockmode;
    1387      255351 :         int                     seq;
    1388             : 
    1389      510702 :         if (xfs_is_shutdown(mp))
    1390             :                 return -EIO;
    1391             : 
    1392      255349 :         lockmode = xfs_ilock_attr_map_shared(ip);
    1393             : 
    1394             :         /* if there are no attribute fork or extents, return ENOENT */
    1395      255349 :         if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
    1396      249965 :                 error = -ENOENT;
    1397      249965 :                 goto out_unlock;
    1398             :         }
    1399             : 
    1400        5384 :         ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
    1401        5384 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
    1402             :                                &nimaps, XFS_BMAPI_ATTRFORK);
    1403      255349 : out_unlock:
    1404             : 
    1405      255349 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
    1406      255349 :         xfs_iunlock(ip, lockmode);
    1407             : 
    1408      255349 :         if (error)
    1409             :                 return error;
    1410        5384 :         ASSERT(nimaps);
    1411        5384 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
    1412             : }
    1413             : 
    1414             : const struct iomap_ops xfs_xattr_iomap_ops = {
    1415             :         .iomap_begin            = xfs_xattr_iomap_begin,
    1416             : };
    1417             : 
    1418             : int
    1419    30468201 : xfs_zero_range(
    1420             :         struct xfs_inode        *ip,
    1421             :         loff_t                  pos,
    1422             :         loff_t                  len,
    1423             :         bool                    *did_zero)
    1424             : {
    1425    30468201 :         struct inode            *inode = VFS_I(ip);
    1426             : 
    1427    30468201 :         if (IS_DAX(inode))
    1428             :                 return dax_zero_range(inode, pos, len, did_zero,
    1429             :                                       &xfs_dax_write_iomap_ops);
    1430    30468201 :         return iomap_zero_range(inode, pos, len, did_zero,
    1431             :                                 &xfs_buffered_write_iomap_ops);
    1432             : }
    1433             : 
    1434             : int
    1435     2665279 : xfs_truncate_page(
    1436             :         struct xfs_inode        *ip,
    1437             :         loff_t                  pos,
    1438             :         bool                    *did_zero)
    1439             : {
    1440     2665279 :         struct inode            *inode = VFS_I(ip);
    1441             : 
    1442     2665279 :         if (IS_DAX(inode))
    1443             :                 return dax_truncate_page(inode, pos, did_zero,
    1444             :                                         &xfs_dax_write_iomap_ops);
    1445     2665279 :         return iomap_truncate_page(inode, pos, did_zero,
    1446             :                                    &xfs_buffered_write_iomap_ops);
    1447             : }

Generated by: LCOV version 1.14