LCOV - code coverage report
Current view: top level - fs/xfs - xfs_iomap.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 576 620 92.9 %
Date: 2023-07-31 20:08:34 Functions: 24 26 92.3 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
       4             :  * Copyright (c) 2016-2018 Christoph Hellwig.
       5             :  * All Rights Reserved.
       6             :  */
       7             : #include "xfs.h"
       8             : #include "xfs_fs.h"
       9             : #include "xfs_shared.h"
      10             : #include "xfs_format.h"
      11             : #include "xfs_log_format.h"
      12             : #include "xfs_trans_resv.h"
      13             : #include "xfs_mount.h"
      14             : #include "xfs_inode.h"
      15             : #include "xfs_btree.h"
      16             : #include "xfs_bmap_btree.h"
      17             : #include "xfs_bmap.h"
      18             : #include "xfs_bmap_util.h"
      19             : #include "xfs_errortag.h"
      20             : #include "xfs_error.h"
      21             : #include "xfs_trans.h"
      22             : #include "xfs_trans_space.h"
      23             : #include "xfs_inode_item.h"
      24             : #include "xfs_iomap.h"
      25             : #include "xfs_trace.h"
      26             : #include "xfs_quota.h"
      27             : #include "xfs_dquot_item.h"
      28             : #include "xfs_dquot.h"
      29             : #include "xfs_reflink.h"
      30             : #include "xfs_health.h"
      31             : 
      32             : #define XFS_ALLOC_ALIGN(mp, off) \
      33             :         (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
      34             : 
      35             : static int
      36           0 : xfs_alert_fsblock_zero(
      37             :         xfs_inode_t     *ip,
      38             :         xfs_bmbt_irec_t *imap)
      39             : {
      40           0 :         xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
      41             :                         "Access to block zero in inode %llu "
      42             :                         "start_block: %llx start_off: %llx "
      43             :                         "blkcnt: %llx extent-state: %x",
      44             :                 (unsigned long long)ip->i_ino,
      45             :                 (unsigned long long)imap->br_startblock,
      46             :                 (unsigned long long)imap->br_startoff,
      47             :                 (unsigned long long)imap->br_blockcount,
      48             :                 imap->br_state);
      49           0 :         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
      50           0 :         return -EFSCORRUPTED;
      51             : }
      52             : 
      53             : u64
      54  5917901105 : xfs_iomap_inode_sequence(
      55             :         struct xfs_inode        *ip,
      56             :         u16                     iomap_flags)
      57             : {
      58  6288162001 :         u64                     cookie = 0;
      59             : 
      60  5917901105 :         if (iomap_flags & IOMAP_F_XATTR)
      61           0 :                 return READ_ONCE(ip->i_af.if_seq);
      62  5917901105 :         if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
      63   162937910 :                 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
      64  5917901105 :         return cookie | READ_ONCE(ip->i_df.if_seq);
      65             : }
      66             : 
      67             : /*
      68             :  * Check that the iomap passed to us is still valid for the given offset and
      69             :  * length.
      70             :  */
      71             : static bool
      72  2092516963 : xfs_iomap_valid(
      73             :         struct inode            *inode,
      74             :         const struct iomap      *iomap)
      75             : {
      76  2092516963 :         struct xfs_inode        *ip = XFS_I(inode);
      77             : 
      78  2092175359 :         if (iomap->validity_cookie !=
      79  2092516963 :                         xfs_iomap_inode_sequence(ip, iomap->flags)) {
      80      100230 :                 trace_xfs_iomap_invalid(ip, iomap);
      81      100230 :                 return false;
      82             :         }
      83             : 
      84  2092093629 :         XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS);
      85             :         return true;
      86             : }
      87             : 
      88             : static const struct iomap_folio_ops xfs_iomap_folio_ops = {
      89             :         .iomap_valid            = xfs_iomap_valid,
      90             : };
      91             : 
      92             : int
      93  4304452915 : xfs_bmbt_to_iomap(
      94             :         struct xfs_inode        *ip,
      95             :         struct iomap            *iomap,
      96             :         struct xfs_bmbt_irec    *imap,
      97             :         unsigned int            mapping_flags,
      98             :         u16                     iomap_flags,
      99             :         u64                     sequence_cookie)
     100             : {
     101  4304452915 :         struct xfs_mount        *mp = ip->i_mount;
     102  4304452915 :         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
     103             : 
     104  4304452915 :         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
     105           0 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     106           0 :                 return xfs_alert_fsblock_zero(ip, imap);
     107             :         }
     108             : 
     109  4304452915 :         if (imap->br_startblock == HOLESTARTBLOCK) {
     110  3400303765 :                 iomap->addr = IOMAP_NULL_ADDR;
     111  3400303765 :                 iomap->type = IOMAP_HOLE;
     112   904149150 :         } else if (imap->br_startblock == DELAYSTARTBLOCK ||
     113             :                    isnullstartblock(imap->br_startblock)) {
     114   212147872 :                 iomap->addr = IOMAP_NULL_ADDR;
     115   212147872 :                 iomap->type = IOMAP_DELALLOC;
     116             :         } else {
     117   692001278 :                 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
     118   692163470 :                 if (mapping_flags & IOMAP_DAX)
     119           0 :                         iomap->addr += target->bt_dax_part_off;
     120             : 
     121   692163470 :                 if (imap->br_state == XFS_EXT_UNWRITTEN)
     122   505600942 :                         iomap->type = IOMAP_UNWRITTEN;
     123             :                 else
     124   186562528 :                         iomap->type = IOMAP_MAPPED;
     125             : 
     126             :         }
     127  4304615107 :         iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
     128  4304615107 :         iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
     129  4304615107 :         if (mapping_flags & IOMAP_DAX)
     130       26407 :                 iomap->dax_dev = target->bt_daxdev;
     131             :         else
     132  8610099196 :                 iomap->bdev = xfs_buftarg_bdev(target);
     133  4304615107 :         iomap->flags = iomap_flags;
     134             : 
     135  4304615107 :         if (xfs_ipincount(ip) &&
     136   948558228 :             (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
     137   870661842 :                 iomap->flags |= IOMAP_F_DIRTY;
     138             : 
     139  4304615107 :         iomap->validity_cookie = sequence_cookie;
     140  4304615107 :         iomap->folio_ops = &xfs_iomap_folio_ops;
     141  4304615107 :         return 0;
     142             : }
     143             : 
     144             : static void
     145    49511409 : xfs_hole_to_iomap(
     146             :         struct xfs_inode        *ip,
     147             :         struct iomap            *iomap,
     148             :         xfs_fileoff_t           offset_fsb,
     149             :         xfs_fileoff_t           end_fsb)
     150             : {
     151    49511409 :         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
     152             : 
     153    49511409 :         iomap->addr = IOMAP_NULL_ADDR;
     154    49511409 :         iomap->type = IOMAP_HOLE;
     155    49511409 :         iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
     156    49511409 :         iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
     157    49511409 :         iomap->bdev = xfs_buftarg_bdev(target);
     158    49511409 :         iomap->dax_dev = target->bt_daxdev;
     159    49511409 : }
     160             : 
     161             : static inline xfs_fileoff_t
     162  4607674820 : xfs_iomap_end_fsb(
     163             :         struct xfs_mount        *mp,
     164             :         loff_t                  offset,
     165             :         loff_t                  count)
     166             : {
     167  4607674820 :         ASSERT(offset <= mp->m_super->s_maxbytes);
     168  4607674820 :         return min(XFS_B_TO_FSB(mp, offset + count),
     169             :                    XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
     170             : }
     171             : 
     172             : static xfs_extlen_t
     173    58366737 : xfs_eof_alignment(
     174             :         struct xfs_inode        *ip)
     175             : {
     176    58366737 :         struct xfs_mount        *mp = ip->i_mount;
     177    58366737 :         xfs_extlen_t            align = 0;
     178             : 
     179    58366737 :         if (!XFS_IS_REALTIME_INODE(ip)) {
     180             :                 /*
     181             :                  * Round up the allocation request to a stripe unit
     182             :                  * (m_dalign) boundary if the file size is >= stripe unit
     183             :                  * size, and we are allocating past the allocation eof.
     184             :                  *
     185             :                  * If mounted with the "-o swalloc" option the alignment is
     186             :                  * increased from the strip unit size to the stripe width.
     187             :                  */
     188    18386189 :                 if (mp->m_swidth && xfs_has_swalloc(mp))
     189           0 :                         align = mp->m_swidth;
     190    18386189 :                 else if (mp->m_dalign)
     191      777589 :                         align = mp->m_dalign;
     192             : 
     193     1555178 :                 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
     194      121018 :                         align = 0;
     195             :         }
     196             : 
     197    58366737 :         return align;
     198             : }
     199             : 
     200             : /*
     201             :  * Check if last_fsb is outside the last extent, and if so grow it to the next
     202             :  * stripe unit boundary.
     203             :  */
     204             : xfs_fileoff_t
     205    50466181 : xfs_iomap_eof_align_last_fsb(
     206             :         struct xfs_inode        *ip,
     207             :         xfs_fileoff_t           end_fsb)
     208             : {
     209    50466181 :         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
     210    50466181 :         xfs_extlen_t            extsz = xfs_get_extsz_hint(ip);
     211    50434720 :         xfs_extlen_t            align = xfs_eof_alignment(ip);
     212    50446809 :         struct xfs_bmbt_irec    irec;
     213    50446809 :         struct xfs_iext_cursor  icur;
     214             : 
     215    50446809 :         ASSERT(!xfs_need_iread_extents(ifp));
     216             : 
     217             :         /*
     218             :          * Always round up the allocation request to the extent hint boundary.
     219             :          */
     220    50446713 :         if (extsz) {
     221    48369972 :                 if (align)
     222       76276 :                         align = roundup_64(align, extsz);
     223             :                 else
     224             :                         align = extsz;
     225             :         }
     226             : 
     227    50446713 :         if (align) {
     228    48430239 :                 xfs_fileoff_t   aligned_end_fsb = roundup_64(end_fsb, align);
     229             : 
     230    48425794 :                 xfs_iext_last(ifp, &icur);
     231    48424690 :                 if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
     232    39146841 :                     aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
     233    48108930 :                         return aligned_end_fsb;
     234             :         }
     235             : 
     236             :         return end_fsb;
     237             : }
     238             : 
     239             : int
     240    88424666 : xfs_iomap_write_direct(
     241             :         struct xfs_inode        *ip,
     242             :         xfs_fileoff_t           offset_fsb,
     243             :         xfs_fileoff_t           count_fsb,
     244             :         unsigned int            flags,
     245             :         struct xfs_bmbt_irec    *imap,
     246             :         u64                     *seq)
     247             : {
     248    88424666 :         struct xfs_mount        *mp = ip->i_mount;
     249    88424666 :         struct xfs_trans        *tp;
     250    88424666 :         xfs_filblks_t           resaligned;
     251    88424666 :         int                     nimaps;
     252    88424666 :         unsigned int            dblocks, rblocks;
     253    88424666 :         bool                    force = false;
     254    88424666 :         int                     error;
     255    88424666 :         int                     bmapi_flags = XFS_BMAPI_PREALLOC;
     256    88424666 :         int                     nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
     257             : 
     258    88424666 :         ASSERT(count_fsb > 0);
     259             : 
     260    88424666 :         resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
     261             :                                            xfs_get_extsz_hint(ip));
     262    88397305 :         if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
     263    75673917 :                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
     264    75673917 :                 rblocks = resaligned;
     265             :         } else {
     266    12723388 :                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
     267    12723388 :                 rblocks = 0;
     268             :         }
     269             : 
     270    88397305 :         error = xfs_qm_dqattach(ip);
     271    88379122 :         if (error)
     272             :                 return error;
     273             : 
     274             :         /*
     275             :          * For DAX, we do not allocate unwritten extents, but instead we zero
     276             :          * the block before we commit the transaction.  Ideally we'd like to do
     277             :          * this outside the transaction context, but if we commit and then crash
     278             :          * we may not have zeroed the blocks and this will be exposed on
     279             :          * recovery of the allocation. Hence we must zero before commit.
     280             :          *
     281             :          * Further, if we are mapping unwritten extents here, we need to zero
     282             :          * and convert them to written so that we don't need an unwritten extent
     283             :          * callback for DAX. This also means that we need to be able to dip into
     284             :          * the reserve block pool for bmbt block allocation if there is no space
     285             :          * left but we need to do unwritten extent conversion.
     286             :          */
     287    88379122 :         if (flags & IOMAP_DAX) {
     288           0 :                 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
     289           0 :                 if (imap->br_state == XFS_EXT_UNWRITTEN) {
     290           0 :                         force = true;
     291           0 :                         nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
     292           0 :                         dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
     293             :                 }
     294             :         }
     295             : 
     296    88379122 :         error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
     297             :                         rblocks, force, &tp);
     298    88486321 :         if (error)
     299             :                 return error;
     300             : 
     301    85772735 :         error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, nr_exts);
     302    85747384 :         if (error == -EFBIG)
     303           0 :                 error = xfs_iext_count_upgrade(tp, ip, nr_exts);
     304    85747384 :         if (error)
     305           0 :                 goto out_trans_cancel;
     306             : 
     307             :         /*
     308             :          * From this point onwards we overwrite the imap pointer that the
     309             :          * caller gave to us.
     310             :          */
     311    85747384 :         nimaps = 1;
     312    85747384 :         error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
     313             :                                 imap, &nimaps);
     314    85776406 :         if (error)
     315         291 :                 goto out_trans_cancel;
     316             : 
     317             :         /*
     318             :          * Complete the transaction
     319             :          */
     320    85776115 :         error = xfs_trans_commit(tp);
     321    85777425 :         if (error)
     322         253 :                 goto out_unlock;
     323             : 
     324             :         /*
     325             :          * Copy any maps to caller's array and return any error.
     326             :          */
     327    85777172 :         if (nimaps == 0) {
     328           0 :                 error = -ENOSPC;
     329           0 :                 goto out_unlock;
     330             :         }
     331             : 
     332    85777172 :         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
     333           0 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     334           0 :                 error = xfs_alert_fsblock_zero(ip, imap);
     335             :         }
     336             : 
     337    85770745 : out_unlock:
     338    85777716 :         *seq = xfs_iomap_inode_sequence(ip, 0);
     339    85777716 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     340    85777716 :         return error;
     341             : 
     342         291 : out_trans_cancel:
     343         291 :         xfs_trans_cancel(tp);
     344         291 :         goto out_unlock;
     345             : }
     346             : 
     347             : STATIC bool
     348     4606074 : xfs_quota_need_throttle(
     349             :         struct xfs_inode        *ip,
     350             :         xfs_dqtype_t            type,
     351             :         xfs_fsblock_t           alloc_blocks)
     352             : {
     353     4606074 :         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
     354             : 
     355     4606074 :         if (!dq || !xfs_this_quota_on(ip->i_mount, type))
     356             :                 return false;
     357             : 
     358             :         /* no hi watermark, no throttle */
     359     4137643 :         if (!dq->q_prealloc_hi_wmark)
     360             :                 return false;
     361             : 
     362             :         /* under the lo watermark, no throttle */
     363       19150 :         if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
     364        1525 :                 return false;
     365             : 
     366             :         return true;
     367             : }
     368             : 
     369             : STATIC void
     370       17625 : xfs_quota_calc_throttle(
     371             :         struct xfs_inode        *ip,
     372             :         xfs_dqtype_t            type,
     373             :         xfs_fsblock_t           *qblocks,
     374             :         int                     *qshift,
     375             :         int64_t                 *qfreesp)
     376             : {
     377       17625 :         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
     378       17625 :         int64_t                 freesp;
     379       17625 :         int                     shift = 0;
     380             : 
     381             :         /* no dq, or over hi wmark, squash the prealloc completely */
     382       17625 :         if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
     383        1087 :                 *qblocks = 0;
     384        1087 :                 *qfreesp = 0;
     385        1087 :                 return;
     386             :         }
     387             : 
     388       16538 :         freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
     389       16538 :         if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
     390        2146 :                 shift = 2;
     391        2146 :                 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
     392        2091 :                         shift += 2;
     393        2146 :                 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
     394        1871 :                         shift += 2;
     395             :         }
     396             : 
     397       16538 :         if (freesp < *qfreesp)
     398       16518 :                 *qfreesp = freesp;
     399             : 
     400             :         /* only overwrite the throttle values if we are more aggressive */
     401       16538 :         if ((freesp >> shift) < (*qblocks >> *qshift)) {
     402        2718 :                 *qblocks = freesp;
     403        2718 :                 *qshift = shift;
     404             :         }
     405             : }
     406             : 
     407             : /*
     408             :  * If we don't have a user specified preallocation size, dynamically increase
     409             :  * the preallocation size as the size of the file grows.  Cap the maximum size
     410             :  * at a single extent or less if the filesystem is near full. The closer the
     411             :  * filesystem is to being full, the smaller the maximum preallocation.
     412             :  */
     413             : STATIC xfs_fsblock_t
     414    18645361 : xfs_iomap_prealloc_size(
     415             :         struct xfs_inode        *ip,
     416             :         int                     whichfork,
     417             :         loff_t                  offset,
     418             :         loff_t                  count,
     419             :         struct xfs_iext_cursor  *icur)
     420             : {
     421    18645361 :         struct xfs_iext_cursor  ncur = *icur;
     422    18645361 :         struct xfs_bmbt_irec    prev, got;
     423    18645361 :         struct xfs_mount        *mp = ip->i_mount;
     424    18645361 :         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, whichfork);
     425    18568374 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     426    18568374 :         int64_t                 freesp;
     427    18568374 :         xfs_fsblock_t           qblocks;
     428    18568374 :         xfs_fsblock_t           alloc_blocks = 0;
     429    18568374 :         xfs_extlen_t            plen;
     430    18568374 :         int                     shift = 0;
     431    18568374 :         int                     qshift = 0;
     432             : 
     433             :         /*
     434             :          * As an exception we don't do any preallocation at all if the file is
     435             :          * smaller than the minimum preallocation and we are using the default
     436             :          * dynamic preallocation scheme, as it is likely this is the only write
     437             :          * to the file that is going to be done.
     438             :          */
     439    37136748 :         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
     440             :                 return 0;
     441             : 
     442             :         /*
     443             :          * Use the minimum preallocation size for small files or if we are
     444             :          * writing right after a hole.
     445             :          */
     446    23609178 :         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
     447     7856269 :             !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
     448     7453386 :             prev.br_startoff + prev.br_blockcount < offset_fsb)
     449     6341399 :                 return mp->m_allocsize_blocks;
     450             : 
     451             :         /*
     452             :          * Take the size of the preceding data extents as the basis for the
     453             :          * preallocation size. Note that we don't care if the previous extents
     454             :          * are written or not.
     455             :          */
     456     1535356 :         plen = prev.br_blockcount;
     457     1613066 :         while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
     458      949484 :                 if (plen > XFS_MAX_BMBT_EXTLEN / 2 ||
     459      949478 :                     isnullstartblock(got.br_startblock) ||
     460      844838 :                     got.br_startoff + got.br_blockcount != prev.br_startoff ||
     461      159597 :                     got.br_startblock + got.br_blockcount != prev.br_startblock)
     462             :                         break;
     463       77710 :                 plen += got.br_blockcount;
     464       77710 :                 prev = got;
     465             :         }
     466             : 
     467             :         /*
     468             :          * If the size of the extents is greater than half the maximum extent
     469             :          * length, then use the current offset as the basis.  This ensures that
     470             :          * for large files the preallocation size always extends to
     471             :          * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
     472             :          * unit/width alignment of real extents.
     473             :          */
     474     1535347 :         alloc_blocks = plen * 2;
     475     1535347 :         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
     476          10 :                 alloc_blocks = XFS_B_TO_FSB(mp, offset);
     477     1535347 :         qblocks = alloc_blocks;
     478             : 
     479             :         /*
     480             :          * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
     481             :          * down to the nearest power of two value after throttling. To prevent
     482             :          * the round down from unconditionally reducing the maximum supported
     483             :          * prealloc size, we round up first, apply appropriate throttling, round
     484             :          * down and cap the value to XFS_BMBT_MAX_EXTLEN.
     485             :          */
     486     1535347 :         alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN),
     487             :                                        alloc_blocks);
     488             : 
     489     1535347 :         freesp = percpu_counter_read_positive(&mp->m_fdblocks);
     490     1535347 :         if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
     491      105081 :                 shift = 2;
     492      105081 :                 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
     493      103451 :                         shift++;
     494      105081 :                 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
     495      101314 :                         shift++;
     496      105081 :                 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
     497       98867 :                         shift++;
     498      105081 :                 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
     499       95012 :                         shift++;
     500             :         }
     501             : 
     502             :         /*
     503             :          * Check each quota to cap the prealloc size, provide a shift value to
     504             :          * throttle with and adjust amount of available space.
     505             :          */
     506     1535347 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
     507        6117 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
     508             :                                         &freesp);
     509     1535347 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
     510        5896 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
     511             :                                         &freesp);
     512     1535347 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
     513        5612 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
     514             :                                         &freesp);
     515             : 
     516             :         /*
     517             :          * The final prealloc size is set to the minimum of free space available
     518             :          * in each of the quotas and the overall filesystem.
     519             :          *
     520             :          * The shift throttle value is set to the maximum value as determined by
     521             :          * the global low free space values and per-quota low free space values.
     522             :          */
     523     1535347 :         alloc_blocks = min(alloc_blocks, qblocks);
     524     1535347 :         shift = max(shift, qshift);
     525             : 
     526     1535347 :         if (shift)
     527      107232 :                 alloc_blocks >>= shift;
     528             :         /*
     529             :          * rounddown_pow_of_two() returns an undefined result if we pass in
     530             :          * alloc_blocks = 0.
     531             :          */
     532     1535347 :         if (alloc_blocks)
     533     1453523 :                 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
     534     1535347 :         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
     535             :                 alloc_blocks = XFS_MAX_BMBT_EXTLEN;
     536             : 
     537             :         /*
     538             :          * If we are still trying to allocate more space than is
     539             :          * available, squash the prealloc hard. This can happen if we
     540             :          * have a large file on a small filesystem and the above
     541             :          * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
     542             :          */
     543     1548048 :         while (alloc_blocks && alloc_blocks >= freesp)
     544       12701 :                 alloc_blocks >>= 4;
     545     1535347 :         if (alloc_blocks < mp->m_allocsize_blocks)
     546             :                 alloc_blocks = mp->m_allocsize_blocks;
     547     1535347 :         trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
     548             :                                       mp->m_allocsize_blocks);
     549     1535347 :         return alloc_blocks;
     550             : }
     551             : 
     552             : int
     553    74651338 : xfs_iomap_write_unwritten(
     554             :         xfs_inode_t     *ip,
     555             :         xfs_off_t       offset,
     556             :         xfs_off_t       count,
     557             :         bool            update_isize)
     558             : {
     559    74651338 :         xfs_mount_t     *mp = ip->i_mount;
     560    74651338 :         xfs_fileoff_t   offset_fsb;
     561    74651338 :         xfs_filblks_t   count_fsb;
     562    74651338 :         xfs_filblks_t   numblks_fsb;
     563    74651338 :         int             nimaps;
     564    74651338 :         xfs_trans_t     *tp;
     565    74651338 :         xfs_bmbt_irec_t imap;
     566    74651338 :         struct inode    *inode = VFS_I(ip);
     567    74651338 :         xfs_fsize_t     i_size;
     568    74651338 :         uint            resblks;
     569    74651338 :         int             error;
     570             : 
     571    74651338 :         trace_xfs_unwritten_convert(ip, offset, count);
     572             : 
     573    74651211 :         offset_fsb = XFS_B_TO_FSBT(mp, offset);
     574    74651211 :         count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
     575    74651211 :         count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
     576             : 
     577             :         /*
     578             :          * Reserve enough blocks in this transaction for two complete extent
     579             :          * btree splits.  We may be converting the middle part of an unwritten
     580             :          * extent and in this case we will insert two new extents in the btree
     581             :          * each of which could cause a full split.
     582             :          *
     583             :          * This reservation amount will be used in the first call to
     584             :          * xfs_bmbt_split() to select an AG with enough space to satisfy the
     585             :          * rest of the operation.
     586             :          */
     587    74651211 :         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
     588             : 
     589             :         /* Attach dquots so that bmbt splits are accounted correctly. */
     590    74651211 :         error = xfs_qm_dqattach(ip);
     591    74650876 :         if (error)
     592             :                 return error;
     593             : 
     594    77019340 :         do {
     595             :                 /*
     596             :                  * Set up a transaction to convert the range of extents
     597             :                  * from unwritten to real. Do allocations in a loop until
     598             :                  * we have covered the range passed in.
     599             :                  *
     600             :                  * Note that we can't risk to recursing back into the filesystem
     601             :                  * here as we might be asked to write out the same inode that we
     602             :                  * complete here and might deadlock on the iolock.
     603             :                  */
     604    77019340 :                 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
     605             :                                 0, true, &tp);
     606    77019512 :                 if (error)
     607         194 :                         return error;
     608             : 
     609    77019318 :                 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
     610             :                                 XFS_IEXT_WRITE_UNWRITTEN_CNT);
     611    77017058 :                 if (error == -EFBIG)
     612          40 :                         error = xfs_iext_count_upgrade(tp, ip,
     613             :                                         XFS_IEXT_WRITE_UNWRITTEN_CNT);
     614    77017058 :                 if (error)
     615          40 :                         goto error_on_bmapi_transaction;
     616             : 
     617             :                 /*
     618             :                  * Modify the unwritten extent state of the buffer.
     619             :                  */
     620    77017018 :                 nimaps = 1;
     621    77017018 :                 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
     622             :                                         XFS_BMAPI_CONVERT, resblks, &imap,
     623             :                                         &nimaps);
     624    77016452 :                 if (error)
     625          69 :                         goto error_on_bmapi_transaction;
     626             : 
     627             :                 /*
     628             :                  * Log the updated inode size as we go.  We have to be careful
     629             :                  * to only log it up to the actual write offset if it is
     630             :                  * halfway into a block.
     631             :                  */
     632    77016383 :                 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
     633    77016383 :                 if (i_size > offset + count)
     634             :                         i_size = offset + count;
     635    77016383 :                 if (update_isize && i_size > i_size_read(inode))
     636     4921447 :                         i_size_write(inode, i_size);
     637    77016383 :                 i_size = xfs_new_eof(ip, i_size);
     638    30345812 :                 if (i_size) {
     639    30345451 :                         ip->i_disk_size = i_size;
     640    30345451 :                         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
     641             :                 }
     642             : 
     643    77018464 :                 error = xfs_trans_commit(tp);
     644    77019756 :                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
     645    77019652 :                 if (error)
     646         517 :                         return error;
     647             : 
     648    77019135 :                 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) {
     649           0 :                         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     650           0 :                         return xfs_alert_fsblock_zero(ip, &imap);
     651             :                 }
     652             : 
     653    77019135 :                 if ((numblks_fsb = imap.br_blockcount) == 0) {
     654             :                         /*
     655             :                          * The numblks_fsb value should always get
     656             :                          * smaller, otherwise the loop is stuck.
     657             :                          */
     658           0 :                         ASSERT(imap.br_blockcount);
     659             :                         break;
     660             :                 }
     661    77019135 :                 offset_fsb += numblks_fsb;
     662    77019135 :                 count_fsb -= numblks_fsb;
     663    77019135 :         } while (count_fsb > 0);
     664             : 
     665             :         return 0;
     666             : 
     667         109 : error_on_bmapi_transaction:
     668         109 :         xfs_trans_cancel(tp);
     669         109 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     670         109 :         return error;
     671             : }
     672             : 
     673             : static inline bool
     674   284119827 : imap_needs_alloc(
     675             :         struct inode            *inode,
     676             :         unsigned                flags,
     677             :         struct xfs_bmbt_irec    *imap,
     678             :         int                     nimaps)
     679             : {
     680             :         /* don't allocate blocks when just zeroing */
     681   284119827 :         if (flags & IOMAP_ZERO)
     682             :                 return false;
     683   218481124 :         if (!nimaps ||
     684   218481124 :             imap->br_startblock == HOLESTARTBLOCK ||
     685             :             imap->br_startblock == DELAYSTARTBLOCK)
     686             :                 return true;
     687             :         /* we convert unwritten extents before copying the data for DAX */
     688   129392518 :         if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
     689           0 :                 return true;
     690             :         return false;
     691             : }
     692             : 
     693             : static inline bool
     694   296945736 : imap_needs_cow(
     695             :         struct xfs_inode        *ip,
     696             :         unsigned int            flags,
     697             :         struct xfs_bmbt_irec    *imap,
     698             :         int                     nimaps)
     699             : {
     700   296945736 :         if (!xfs_is_cow_inode(ip))
     701             :                 return false;
     702             : 
     703             :         /* when zeroing we don't have to COW holes or unwritten extents */
     704   108610091 :         if (flags & IOMAP_ZERO) {
     705    21136416 :                 if (!nimaps ||
     706    21136454 :                     imap->br_startblock == HOLESTARTBLOCK ||
     707     9108657 :                     imap->br_state == XFS_EXT_UNWRITTEN)
     708    15942788 :                         return false;
     709             :         }
     710             : 
     711             :         return true;
     712             : }
     713             : 
     714             : static int
     715  4197393047 : xfs_ilock_for_iomap(
     716             :         struct xfs_inode        *ip,
     717             :         unsigned                flags,
     718             :         unsigned                *lockmode)
     719             : {
     720  4197393047 :         unsigned int            mode = *lockmode;
     721  4197393047 :         bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
     722             : 
     723             :         /*
     724             :          * COW writes may allocate delalloc space or convert unwritten COW
     725             :          * extents, so we need to make sure to take the lock exclusively here.
     726             :          */
     727  4197393047 :         if (xfs_is_cow_inode(ip) && is_write)
     728   244254784 :                 mode = XFS_ILOCK_EXCL;
     729             : 
     730             :         /*
     731             :          * Extents not yet cached requires exclusive access, don't block.  This
     732             :          * is an opencoded xfs_ilock_data_map_shared() call but with
     733             :          * non-blocking behaviour.
     734             :          */
     735  4193905191 :         if (xfs_need_iread_extents(&ip->i_df)) {
     736        5424 :                 if (flags & IOMAP_NOWAIT)
     737             :                         return -EAGAIN;
     738             :                 mode = XFS_ILOCK_EXCL;
     739             :         }
     740             : 
     741  4194100520 : relock:
     742  4194547992 :         if (flags & IOMAP_NOWAIT) {
     743           0 :                 if (!xfs_ilock_nowait(ip, mode))
     744             :                         return -EAGAIN;
     745             :         } else {
     746  4194547992 :                 xfs_ilock(ip, mode);
     747             :         }
     748             : 
     749             :         /*
     750             :          * The reflink iflag could have changed since the earlier unlocked
     751             :          * check, so if we got ILOCK_SHARED for a write and but we're now a
     752             :          * reflink inode we have to switch to ILOCK_EXCL and relock.
     753             :          */
     754  4195146732 :         if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
     755           0 :                 xfs_iunlock(ip, mode);
     756      442048 :                 mode = XFS_ILOCK_EXCL;
     757      442048 :                 goto relock;
     758             :         }
     759             : 
     760  4195126191 :         *lockmode = mode;
     761  4195126191 :         return 0;
     762             : }
     763             : 
     764             : /*
     765             :  * Check that the imap we are going to return to the caller spans the entire
     766             :  * range that the caller requested for the IO.
     767             :  */
     768             : static bool
     769             : imap_spans_range(
     770             :         struct xfs_bmbt_irec    *imap,
     771             :         xfs_fileoff_t           offset_fsb,
     772             :         xfs_fileoff_t           end_fsb)
     773             : {
     774      488943 :         if (imap->br_startoff > offset_fsb)
     775             :                 return false;
     776      488943 :         if (imap->br_startoff + imap->br_blockcount < end_fsb)
     777             :                 return false;
     778             :         return true;
     779             : }
     780             : 
     781             : static int
     782   296977716 : xfs_direct_write_iomap_begin(
     783             :         struct inode            *inode,
     784             :         loff_t                  offset,
     785             :         loff_t                  length,
     786             :         unsigned                flags,
     787             :         struct iomap            *iomap,
     788             :         struct iomap            *srcmap)
     789             : {
     790   296977716 :         struct xfs_inode        *ip = XFS_I(inode);
     791   296977716 :         struct xfs_mount        *mp = ip->i_mount;
     792   296977716 :         struct xfs_bmbt_irec    imap, cmap;
     793   296977716 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     794   296977716 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
     795   296702256 :         int                     nimaps = 1, error = 0;
     796   296702256 :         bool                    shared = false;
     797   296702256 :         u16                     iomap_flags = 0;
     798   296702256 :         unsigned int            lockmode = XFS_ILOCK_SHARED;
     799   296702256 :         u64                     seq;
     800             : 
     801   296702256 :         ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
     802             : 
     803   593404512 :         if (xfs_is_shutdown(mp))
     804             :                 return -EIO;
     805             : 
     806             :         /*
     807             :          * Writes that span EOF might trigger an IO size update on completion,
     808             :          * so consider them to be dirty for the purposes of O_DSYNC even if
     809             :          * there is no other metadata changes pending or have been made here.
     810             :          */
     811   296702250 :         if (offset + length > i_size_read(inode))
     812   155959859 :                 iomap_flags |= IOMAP_F_DIRTY;
     813             : 
     814   296702250 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
     815   297040328 :         if (error)
     816             :                 return error;
     817             : 
     818   297066415 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
     819             :                                &nimaps, 0);
     820   296743437 :         if (error)
     821           0 :                 goto out_unlock;
     822             : 
     823   296743437 :         if (imap_needs_cow(ip, flags, &imap, nimaps)) {
     824    92667834 :                 error = -EAGAIN;
     825    92667834 :                 if (flags & IOMAP_NOWAIT)
     826           0 :                         goto out_unlock;
     827             : 
     828             :                 /* may drop and re-acquire the ilock */
     829    92667834 :                 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
     830             :                                 &lockmode,
     831    92667834 :                                 (flags & IOMAP_DIRECT) || IS_DAX(inode));
     832    92667580 :                 if (error)
     833        1815 :                         goto out_unlock;
     834    92665765 :                 if (shared)
     835    12688919 :                         goto out_found_cow;
     836    79976846 :                 end_fsb = imap.br_startoff + imap.br_blockcount;
     837    79976846 :                 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
     838             :         }
     839             : 
     840   284045990 :         if (imap_needs_alloc(inode, flags, &imap, nimaps))
     841    89083756 :                 goto allocate_blocks;
     842             : 
     843             :         /*
     844             :          * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
     845             :          * a single map so that we avoid partial IO failures due to the rest of
     846             :          * the I/O range not covered by this map triggering an EAGAIN condition
     847             :          * when it is subsequently mapped and aborting the I/O.
     848             :          */
     849   194962234 :         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
     850      488943 :                 error = -EAGAIN;
     851      488943 :                 if (!imap_spans_range(&imap, offset_fsb, end_fsb))
     852      109840 :                         goto out_unlock;
     853             :         }
     854             : 
     855             :         /*
     856             :          * For overwrite only I/O, we cannot convert unwritten extents without
     857             :          * requiring sub-block zeroing.  This can only be done under an
     858             :          * exclusive IOLOCK, hence return -EAGAIN if this is not a written
     859             :          * extent to tell the caller to try again.
     860             :          */
     861   194852394 :         if (flags & IOMAP_OVERWRITE_ONLY) {
     862      379127 :                 error = -EAGAIN;
     863      379127 :                 if (imap.br_state != XFS_EXT_NORM &&
     864      123183 :                     ((offset | length) & mp->m_blockmask))
     865      123180 :                         goto out_unlock;
     866             :         }
     867             : 
     868   194729214 :         seq = xfs_iomap_inode_sequence(ip, iomap_flags);
     869   194698660 :         xfs_iunlock(ip, lockmode);
     870   194536644 :         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
     871   194511720 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
     872             : 
     873             : allocate_blocks:
     874    89083756 :         error = -EAGAIN;
     875    89083756 :         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
     876      705936 :                 goto out_unlock;
     877             : 
     878             :         /*
     879             :          * We cap the maximum length we map to a sane size  to keep the chunks
     880             :          * of work done where somewhat symmetric with the work writeback does.
     881             :          * This is a completely arbitrary number pulled out of thin air as a
     882             :          * best guess for initial testing.
     883             :          *
     884             :          * Note that the values needs to be less than 32-bits wide until the
     885             :          * lower level functions are updated.
     886             :          */
     887    88377820 :         length = min_t(loff_t, length, 1024 * PAGE_SIZE);
     888    88377820 :         end_fsb = xfs_iomap_end_fsb(mp, offset, length);
     889             : 
     890   176778492 :         if (offset + length > XFS_ISIZE(ip))
     891    50446546 :                 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
     892    37942700 :         else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
     893    37942153 :                 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
     894    88392805 :         xfs_iunlock(ip, lockmode);
     895             : 
     896    88423238 :         error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
     897             :                         flags, &imap, &seq);
     898    88491166 :         if (error)
     899             :                 return error;
     900             : 
     901    85777066 :         trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
     902    85776931 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
     903             :                                  iomap_flags | IOMAP_F_NEW, seq);
     904             : 
     905             : out_found_cow:
     906    12688919 :         length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
     907    12688919 :         trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
     908    12688852 :         if (imap.br_startblock != HOLESTARTBLOCK) {
     909    11856482 :                 seq = xfs_iomap_inode_sequence(ip, 0);
     910    11856482 :                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
     911    11856502 :                 if (error)
     912           0 :                         goto out_unlock;
     913             :         }
     914    12688872 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
     915    12688872 :         xfs_iunlock(ip, lockmode);
     916    12688956 :         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
     917             : 
     918      940771 : out_unlock:
     919      940771 :         if (lockmode)
     920      938976 :                 xfs_iunlock(ip, lockmode);
     921             :         return error;
     922             : }
     923             : 
     924             : const struct iomap_ops xfs_direct_write_iomap_ops = {
     925             :         .iomap_begin            = xfs_direct_write_iomap_begin,
     926             : };
     927             : 
     928             : static int
     929           0 : xfs_dax_write_iomap_end(
     930             :         struct inode            *inode,
     931             :         loff_t                  pos,
     932             :         loff_t                  length,
     933             :         ssize_t                 written,
     934             :         unsigned                flags,
     935             :         struct iomap            *iomap)
     936             : {
     937           0 :         struct xfs_inode        *ip = XFS_I(inode);
     938             : 
     939           0 :         if (!xfs_is_cow_inode(ip))
     940             :                 return 0;
     941             : 
     942           0 :         if (!written) {
     943           0 :                 xfs_reflink_cancel_cow_range(ip, pos, length, true);
     944           0 :                 return 0;
     945             :         }
     946             : 
     947           0 :         return xfs_reflink_end_cow(ip, pos, written);
     948             : }
     949             : 
     950             : const struct iomap_ops xfs_dax_write_iomap_ops = {
     951             :         .iomap_begin    = xfs_direct_write_iomap_begin,
     952             :         .iomap_end      = xfs_dax_write_iomap_end,
     953             : };
     954             : 
     955             : static int
     956   570333988 : xfs_buffered_write_iomap_begin(
     957             :         struct inode            *inode,
     958             :         loff_t                  offset,
     959             :         loff_t                  count,
     960             :         unsigned                flags,
     961             :         struct iomap            *iomap,
     962             :         struct iomap            *srcmap)
     963             : {
     964   570333988 :         struct xfs_inode        *ip = XFS_I(inode);
     965   570333988 :         struct xfs_mount        *mp = ip->i_mount;
     966   570333988 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     967   570333988 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, count);
     968   570054818 :         struct xfs_bmbt_irec    imap, cmap;
     969   570054818 :         struct xfs_iext_cursor  icur, ccur;
     970   570054818 :         xfs_fsblock_t           prealloc_blocks = 0;
     971   570054818 :         bool                    eof = false, cow_eof = false, shared = false;
     972   570054818 :         int                     allocfork = XFS_DATA_FORK;
     973   570054818 :         int                     error = 0;
     974   570054818 :         unsigned int            lockmode = XFS_ILOCK_EXCL;
     975   570054818 :         u64                     seq;
     976             : 
     977  1140109636 :         if (xfs_is_shutdown(mp))
     978             :                 return -EIO;
     979             : 
     980             :         /* we can't use delayed allocations when using extent size hints */
     981   570054665 :         if (xfs_get_extsz_hint(ip))
     982   269640409 :                 return xfs_direct_write_iomap_begin(inode, offset, count,
     983             :                                 flags, iomap, srcmap);
     984             : 
     985   300976896 :         ASSERT(!XFS_IS_REALTIME_INODE(ip));
     986             : 
     987   300976896 :         error = xfs_qm_dqattach(ip);
     988   301083253 :         if (error)
     989             :                 return error;
     990             : 
     991   301039669 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
     992   301400102 :         if (error)
     993             :                 return error;
     994             : 
     995   601641801 :         if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
     996   301400102 :             XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
     997       70037 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     998           0 :                 error = -EFSCORRUPTED;
     999           0 :                 goto out_unlock;
    1000             :         }
    1001             : 
    1002   300171755 :         XFS_STATS_INC(mp, xs_blk_mapw);
    1003             : 
    1004   300334288 :         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
    1005   301367416 :         if (error)
    1006          10 :                 goto out_unlock;
    1007             : 
    1008             :         /*
    1009             :          * Search the data fork first to look up our source mapping.  We
    1010             :          * always need the data fork map, as we have to return it to the
    1011             :          * iomap code so that the higher level write code can read data in to
    1012             :          * perform read-modify-write cycles for unaligned writes.
    1013             :          */
    1014   301367406 :         eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
    1015   301382654 :         if (eof)
    1016    74721024 :                 imap.br_startoff = end_fsb; /* fake hole until the end */
    1017             : 
    1018             :         /* We never need to allocate blocks for zeroing or unsharing a hole. */
    1019   301382654 :         if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) &&
    1020    89013925 :             imap.br_startoff > offset_fsb) {
    1021    49500700 :                 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
    1022    49518901 :                 goto out_unlock;
    1023             :         }
    1024             : 
    1025             :         /*
    1026             :          * Search the COW fork extent list even if we did not find a data fork
    1027             :          * extent.  This serves two purposes: first this implements the
    1028             :          * speculative preallocation using cowextsize, so that we also unshare
    1029             :          * block adjacent to shared blocks instead of just the shared blocks
    1030             :          * themselves.  Second the lookup in the extent list is generally faster
    1031             :          * than going out to the shared extent tree.
    1032             :          */
    1033   251881954 :         if (xfs_is_cow_inode(ip)) {
    1034   104322160 :                 if (!ip->i_cowfp) {
    1035      790234 :                         ASSERT(!xfs_is_reflink_inode(ip));
    1036      790234 :                         xfs_ifork_init_cow(ip);
    1037             :                 }
    1038   104324098 :                 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
    1039   104325606 :                                 &ccur, &cmap);
    1040   104325606 :                 if (!cow_eof && cmap.br_startoff <= offset_fsb) {
    1041    36195652 :                         trace_xfs_reflink_cow_found(ip, &cmap);
    1042    36194550 :                         goto found_cow;
    1043             :                 }
    1044             :         }
    1045             : 
    1046   215590344 :         if (imap.br_startoff <= offset_fsb) {
    1047             :                 /*
    1048             :                  * For reflink files we may need a delalloc reservation when
    1049             :                  * overwriting shared extents.   This includes zeroing of
    1050             :                  * existing extents that contain data.
    1051             :                  */
    1052   160554363 :                 if (!xfs_is_cow_inode(ip) ||
    1053    32726016 :                     ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
    1054   133352346 :                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
    1055             :                                         &imap);
    1056   133107029 :                         goto found_imap;
    1057             :                 }
    1058             : 
    1059    27285394 :                 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
    1060             : 
    1061             :                 /* Trim the mapping to the nearest shared extent boundary. */
    1062    27285147 :                 error = xfs_bmap_trim_cow(ip, &imap, &shared);
    1063    27285259 :                 if (error)
    1064         136 :                         goto out_unlock;
    1065             : 
    1066             :                 /* Not shared?  Just report the (potentially capped) extent. */
    1067    27285123 :                 if (!shared) {
    1068    25568150 :                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
    1069             :                                         &imap);
    1070    25567946 :                         goto found_imap;
    1071             :                 }
    1072             : 
    1073             :                 /*
    1074             :                  * Fork all the shared blocks from our write offset until the
    1075             :                  * end of the extent.
    1076             :                  */
    1077     1716973 :                 allocfork = XFS_COW_FORK;
    1078     1716973 :                 end_fsb = imap.br_startoff + imap.br_blockcount;
    1079             :         } else {
    1080             :                 /*
    1081             :                  * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
    1082             :                  * pages to keep the chunks of work done where somewhat
    1083             :                  * symmetric with the work writeback does.  This is a completely
    1084             :                  * arbitrary number pulled out of thin air.
    1085             :                  *
    1086             :                  * Note that the values needs to be less than 32-bits wide until
    1087             :                  * the lower level functions are updated.
    1088             :                  */
    1089    55035981 :                 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
    1090    55035981 :                 end_fsb = xfs_iomap_end_fsb(mp, offset, count);
    1091             : 
    1092    55057468 :                 if (xfs_is_always_cow_inode(ip))
    1093     3024566 :                         allocfork = XFS_COW_FORK;
    1094             :         }
    1095             : 
    1096    83541978 :         if (eof && offset + count > XFS_ISIZE(ip)) {
    1097             :                 /*
    1098             :                  * Determine the initial size of the preallocation.
    1099             :                  * We clean up any extra preallocation when the file is closed.
    1100             :                  */
    1101    18596573 :                 if (xfs_has_allocsize(mp))
    1102       29869 :                         prealloc_blocks = mp->m_allocsize_blocks;
    1103    18566704 :                 else if (allocfork == XFS_DATA_FORK)
    1104    16515886 :                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
    1105             :                                                 offset, count, &icur);
    1106             :                 else
    1107     2050818 :                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
    1108             :                                                 offset, count, &ccur);
    1109    18568845 :                 if (prealloc_blocks) {
    1110     7906578 :                         xfs_extlen_t    align;
    1111     7906578 :                         xfs_off_t       end_offset;
    1112     7906578 :                         xfs_fileoff_t   p_end_fsb;
    1113             : 
    1114     7906578 :                         end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
    1115     7906578 :                         p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
    1116             :                                         prealloc_blocks;
    1117             : 
    1118     7906578 :                         align = xfs_eof_alignment(ip);
    1119     7905486 :                         if (align)
    1120      515914 :                                 p_end_fsb = roundup_64(p_end_fsb, align);
    1121             : 
    1122     7905476 :                         p_end_fsb = min(p_end_fsb,
    1123             :                                 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
    1124     7905476 :                         ASSERT(p_end_fsb > offset_fsb);
    1125     7905476 :                         prealloc_blocks = p_end_fsb - end_fsb;
    1126             :                 }
    1127             :         }
    1128             : 
    1129    48770015 : retry:
    1130    71588675 :         error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
    1131             :                         end_fsb - offset_fsb, prealloc_blocks,
    1132             :                         allocfork == XFS_DATA_FORK ? &imap : &cmap,
    1133             :                         allocfork == XFS_DATA_FORK ? &icur : &ccur,
    1134             :                         allocfork == XFS_DATA_FORK ? eof : cow_eof);
    1135    57338249 :         switch (error) {
    1136             :         case 0:
    1137    54863894 :                 break;
    1138     2473115 :         case -ENOSPC:
    1139             :         case -EDQUOT:
    1140             :                 /* retry without any preallocation */
    1141     2473115 :                 trace_xfs_delalloc_enospc(ip, offset, count);
    1142     2471016 :                 if (prealloc_blocks) {
    1143      607305 :                         prealloc_blocks = 0;
    1144      607305 :                         goto retry;
    1145             :                 }
    1146     1864951 :                 fallthrough;
    1147             :         default:
    1148     1864951 :                 goto out_unlock;
    1149             :         }
    1150             : 
    1151    54863894 :         if (allocfork == XFS_COW_FORK) {
    1152     4515774 :                 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
    1153     4499831 :                 goto found_cow;
    1154             :         }
    1155             : 
    1156             :         /*
    1157             :          * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
    1158             :          * them out if the write happens to fail.
    1159             :          */
    1160    50348120 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW);
    1161    50348120 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1162    50426603 :         trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
    1163    50398663 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq);
    1164             : 
    1165   158674975 : found_imap:
    1166   158674975 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1167   158674975 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1168   158735503 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
    1169             : 
    1170    40694381 : found_cow:
    1171    40694381 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1172    40694381 :         if (imap.br_startoff <= offset_fsb) {
    1173     9108874 :                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
    1174     9108375 :                 if (error)
    1175           0 :                         goto out_unlock;
    1176     9108375 :                 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
    1177     9108375 :                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1178     9108964 :                 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
    1179             :                                          IOMAP_F_SHARED, seq);
    1180             :         }
    1181             : 
    1182    31585507 :         xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
    1183    31558106 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1184    31622776 :         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq);
    1185             : 
    1186    51383998 : out_unlock:
    1187    51383998 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1188    51383998 :         return error;
    1189             : }
    1190             : 
    1191             : static int
    1192       39977 : xfs_buffered_write_delalloc_punch(
    1193             :         struct inode            *inode,
    1194             :         loff_t                  offset,
    1195             :         loff_t                  length)
    1196             : {
    1197       39977 :         return xfs_bmap_punch_delalloc_range(XFS_I(inode), offset,
    1198             :                         offset + length);
    1199             : }
    1200             : 
    1201             : static int
    1202   471590470 : xfs_buffered_write_iomap_end(
    1203             :         struct inode            *inode,
    1204             :         loff_t                  offset,
    1205             :         loff_t                  length,
    1206             :         ssize_t                 written,
    1207             :         unsigned                flags,
    1208             :         struct iomap            *iomap)
    1209             : {
    1210             : 
    1211   471590470 :         struct xfs_mount        *mp = XFS_M(inode->i_sb);
    1212   471590470 :         int                     error;
    1213             : 
    1214   471590470 :         error = iomap_file_buffered_write_punch_delalloc(inode, iomap, offset,
    1215             :                         length, written, &xfs_buffered_write_delalloc_punch);
    1216   471433384 :         if (error && !xfs_is_shutdown(mp)) {
    1217           0 :                 xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
    1218             :                         __func__, XFS_I(inode)->i_ino);
    1219           0 :                 return error;
    1220             :         }
    1221             :         return 0;
    1222             : }
    1223             : 
    1224             : const struct iomap_ops xfs_buffered_write_iomap_ops = {
    1225             :         .iomap_begin            = xfs_buffered_write_iomap_begin,
    1226             :         .iomap_end              = xfs_buffered_write_iomap_end,
    1227             : };
    1228             : 
    1229             : /*
    1230             :  * iomap_page_mkwrite() will never fail in a way that requires delalloc extents
    1231             :  * that it allocated to be revoked. Hence we do not need an .iomap_end method
    1232             :  * for this operation.
    1233             :  */
    1234             : const struct iomap_ops xfs_page_mkwrite_iomap_ops = {
    1235             :         .iomap_begin            = xfs_buffered_write_iomap_begin,
    1236             : };
    1237             : 
    1238             : static int
    1239  3597090718 : xfs_read_iomap_begin(
    1240             :         struct inode            *inode,
    1241             :         loff_t                  offset,
    1242             :         loff_t                  length,
    1243             :         unsigned                flags,
    1244             :         struct iomap            *iomap,
    1245             :         struct iomap            *srcmap)
    1246             : {
    1247  3597090718 :         struct xfs_inode        *ip = XFS_I(inode);
    1248  3597090718 :         struct xfs_mount        *mp = ip->i_mount;
    1249  3597090718 :         struct xfs_bmbt_irec    imap;
    1250  3597090718 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1251  3597090718 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
    1252  3598313088 :         int                     nimaps = 1, error = 0;
    1253  3598313088 :         bool                    shared = false;
    1254  3598313088 :         unsigned int            lockmode = XFS_ILOCK_SHARED;
    1255  3598313088 :         u64                     seq;
    1256             : 
    1257  3598313088 :         ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
    1258             : 
    1259  7196626176 :         if (xfs_is_shutdown(mp))
    1260             :                 return -EIO;
    1261             : 
    1262  3598312294 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
    1263  3597363060 :         if (error)
    1264             :                 return error;
    1265  3597564542 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
    1266             :                                &nimaps, 0);
    1267  3599826427 :         if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
    1268    11597521 :                 error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
    1269  7198901480 :         seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
    1270  3598883732 :         xfs_iunlock(ip, lockmode);
    1271             : 
    1272  3597434727 :         if (error)
    1273             :                 return error;
    1274  3597508375 :         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
    1275  3597534244 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
    1276  3597534244 :                                  shared ? IOMAP_F_SHARED : 0, seq);
    1277             : }
    1278             : 
    1279             : const struct iomap_ops xfs_read_iomap_ops = {
    1280             :         .iomap_begin            = xfs_read_iomap_begin,
    1281             : };
    1282             : 
    1283             : static int
    1284      612678 : xfs_seek_iomap_begin(
    1285             :         struct inode            *inode,
    1286             :         loff_t                  offset,
    1287             :         loff_t                  length,
    1288             :         unsigned                flags,
    1289             :         struct iomap            *iomap,
    1290             :         struct iomap            *srcmap)
    1291             : {
    1292      612678 :         struct xfs_inode        *ip = XFS_I(inode);
    1293      612678 :         struct xfs_mount        *mp = ip->i_mount;
    1294      612678 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1295      612678 :         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
    1296      612678 :         xfs_fileoff_t           cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
    1297      612678 :         struct xfs_iext_cursor  icur;
    1298      612678 :         struct xfs_bmbt_irec    imap, cmap;
    1299      612678 :         int                     error = 0;
    1300      612678 :         unsigned                lockmode;
    1301      612678 :         u64                     seq;
    1302             : 
    1303     1225356 :         if (xfs_is_shutdown(mp))
    1304             :                 return -EIO;
    1305             : 
    1306      612678 :         lockmode = xfs_ilock_data_map_shared(ip);
    1307      612678 :         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
    1308      612678 :         if (error)
    1309           0 :                 goto out_unlock;
    1310             : 
    1311      612678 :         if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
    1312             :                 /*
    1313             :                  * If we found a data extent we are done.
    1314             :                  */
    1315      599374 :                 if (imap.br_startoff <= offset_fsb)
    1316      312926 :                         goto done;
    1317             :                 data_fsb = imap.br_startoff;
    1318             :         } else {
    1319             :                 /*
    1320             :                  * Fake a hole until the end of the file.
    1321             :                  */
    1322       13304 :                 data_fsb = xfs_iomap_end_fsb(mp, offset, length);
    1323             :         }
    1324             : 
    1325             :         /*
    1326             :          * If a COW fork extent covers the hole, report it - capped to the next
    1327             :          * data fork extent:
    1328             :          */
    1329      625391 :         if (xfs_inode_has_cow_data(ip) &&
    1330       25887 :             xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
    1331       25743 :                 cow_fsb = cmap.br_startoff;
    1332      299752 :         if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
    1333        7941 :                 if (data_fsb < cow_fsb + cmap.br_blockcount)
    1334        1030 :                         end_fsb = min(end_fsb, data_fsb);
    1335        7941 :                 xfs_trim_extent(&cmap, offset_fsb, end_fsb);
    1336        7941 :                 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
    1337        7941 :                 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
    1338             :                                 IOMAP_F_SHARED, seq);
    1339             :                 /*
    1340             :                  * This is a COW extent, so we must probe the page cache
    1341             :                  * because there could be dirty page cache being backed
    1342             :                  * by this extent.
    1343             :                  */
    1344        7941 :                 iomap->type = IOMAP_UNWRITTEN;
    1345        7941 :                 goto out_unlock;
    1346             :         }
    1347             : 
    1348             :         /*
    1349             :          * Else report a hole, capped to the next found data or COW extent.
    1350             :          */
    1351      291811 :         if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
    1352        5100 :                 imap.br_blockcount = cow_fsb - offset_fsb;
    1353             :         else
    1354      286711 :                 imap.br_blockcount = data_fsb - offset_fsb;
    1355      291811 :         imap.br_startoff = offset_fsb;
    1356      291811 :         imap.br_startblock = HOLESTARTBLOCK;
    1357      291811 :         imap.br_state = XFS_EXT_NORM;
    1358      604737 : done:
    1359      604737 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1360      604737 :         xfs_trim_extent(&imap, offset_fsb, end_fsb);
    1361      604737 :         error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
    1362      612678 : out_unlock:
    1363      612678 :         xfs_iunlock(ip, lockmode);
    1364      612678 :         return error;
    1365             : }
    1366             : 
    1367             : const struct iomap_ops xfs_seek_iomap_ops = {
    1368             :         .iomap_begin            = xfs_seek_iomap_begin,
    1369             : };
    1370             : 
    1371             : static int
    1372      499297 : xfs_xattr_iomap_begin(
    1373             :         struct inode            *inode,
    1374             :         loff_t                  offset,
    1375             :         loff_t                  length,
    1376             :         unsigned                flags,
    1377             :         struct iomap            *iomap,
    1378             :         struct iomap            *srcmap)
    1379             : {
    1380      499297 :         struct xfs_inode        *ip = XFS_I(inode);
    1381      499297 :         struct xfs_mount        *mp = ip->i_mount;
    1382      499297 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1383      499297 :         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
    1384      499297 :         struct xfs_bmbt_irec    imap;
    1385      499297 :         int                     nimaps = 1, error = 0;
    1386      499297 :         unsigned                lockmode;
    1387      499297 :         int                     seq;
    1388             : 
    1389      998594 :         if (xfs_is_shutdown(mp))
    1390             :                 return -EIO;
    1391             : 
    1392      499297 :         lockmode = xfs_ilock_attr_map_shared(ip);
    1393             : 
    1394             :         /* if there are no attribute fork or extents, return ENOENT */
    1395      499297 :         if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
    1396      488112 :                 error = -ENOENT;
    1397      488112 :                 goto out_unlock;
    1398             :         }
    1399             : 
    1400       11185 :         ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
    1401       11185 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
    1402             :                                &nimaps, XFS_BMAPI_ATTRFORK);
    1403      499297 : out_unlock:
    1404             : 
    1405      499297 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
    1406      499297 :         xfs_iunlock(ip, lockmode);
    1407             : 
    1408      499296 :         if (error)
    1409             :                 return error;
    1410       11185 :         ASSERT(nimaps);
    1411       11185 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
    1412             : }
    1413             : 
    1414             : const struct iomap_ops xfs_xattr_iomap_ops = {
    1415             :         .iomap_begin            = xfs_xattr_iomap_begin,
    1416             : };
    1417             : 
    1418             : int
    1419   124002907 : xfs_zero_range(
    1420             :         struct xfs_inode        *ip,
    1421             :         loff_t                  pos,
    1422             :         loff_t                  len,
    1423             :         bool                    *did_zero)
    1424             : {
    1425   124002907 :         struct inode            *inode = VFS_I(ip);
    1426             : 
    1427   124002907 :         if (IS_DAX(inode))
    1428           0 :                 return dax_zero_range(inode, pos, len, did_zero,
    1429             :                                       &xfs_dax_write_iomap_ops);
    1430   124002907 :         return iomap_zero_range(inode, pos, len, did_zero,
    1431             :                                 &xfs_buffered_write_iomap_ops);
    1432             : }
    1433             : 
    1434             : int
    1435     7412388 : xfs_truncate_page(
    1436             :         struct xfs_inode        *ip,
    1437             :         loff_t                  pos,
    1438             :         bool                    *did_zero)
    1439             : {
    1440     7412388 :         struct inode            *inode = VFS_I(ip);
    1441             : 
    1442     7412388 :         if (IS_DAX(inode))
    1443           0 :                 return dax_truncate_page(inode, pos, did_zero,
    1444             :                                         &xfs_dax_write_iomap_ops);
    1445     7412388 :         return iomap_truncate_page(inode, pos, did_zero,
    1446             :                                    &xfs_buffered_write_iomap_ops);
    1447             : }

Generated by: LCOV version 1.14