LCOV - code coverage report
Current view: top level - fs/xfs - xfs_iomap.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsa @ Mon Jul 31 20:08:27 PDT 2023 Lines: 570 610 93.4 %
Date: 2023-07-31 20:08:27 Functions: 23 25 92.0 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
       4             :  * Copyright (c) 2016-2018 Christoph Hellwig.
       5             :  * All Rights Reserved.
       6             :  */
       7             : #include "xfs.h"
       8             : #include "xfs_fs.h"
       9             : #include "xfs_shared.h"
      10             : #include "xfs_format.h"
      11             : #include "xfs_log_format.h"
      12             : #include "xfs_trans_resv.h"
      13             : #include "xfs_mount.h"
      14             : #include "xfs_inode.h"
      15             : #include "xfs_btree.h"
      16             : #include "xfs_bmap_btree.h"
      17             : #include "xfs_bmap.h"
      18             : #include "xfs_bmap_util.h"
      19             : #include "xfs_errortag.h"
      20             : #include "xfs_error.h"
      21             : #include "xfs_trans.h"
      22             : #include "xfs_trans_space.h"
      23             : #include "xfs_inode_item.h"
      24             : #include "xfs_iomap.h"
      25             : #include "xfs_trace.h"
      26             : #include "xfs_quota.h"
      27             : #include "xfs_dquot_item.h"
      28             : #include "xfs_dquot.h"
      29             : #include "xfs_reflink.h"
      30             : #include "xfs_health.h"
      31             : 
      32             : #define XFS_ALLOC_ALIGN(mp, off) \
      33             :         (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
      34             : 
      35             : static int
      36           0 : xfs_alert_fsblock_zero(
      37             :         xfs_inode_t     *ip,
      38             :         xfs_bmbt_irec_t *imap)
      39             : {
      40           0 :         xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
      41             :                         "Access to block zero in inode %llu "
      42             :                         "start_block: %llx start_off: %llx "
      43             :                         "blkcnt: %llx extent-state: %x",
      44             :                 (unsigned long long)ip->i_ino,
      45             :                 (unsigned long long)imap->br_startblock,
      46             :                 (unsigned long long)imap->br_startoff,
      47             :                 (unsigned long long)imap->br_blockcount,
      48             :                 imap->br_state);
      49           0 :         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
      50           0 :         return -EFSCORRUPTED;
      51             : }
      52             : 
      53             : u64
      54   591373337 : xfs_iomap_inode_sequence(
      55             :         struct xfs_inode        *ip,
      56             :         u16                     iomap_flags)
      57             : {
      58   690021568 :         u64                     cookie = 0;
      59             : 
      60   591373337 :         if (iomap_flags & IOMAP_F_XATTR)
      61           0 :                 return READ_ONCE(ip->i_af.if_seq);
      62   591373337 :         if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp)
      63     5696365 :                 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32;
      64   591373337 :         return cookie | READ_ONCE(ip->i_df.if_seq);
      65             : }
      66             : 
      67             : /*
      68             :  * Check that the iomap passed to us is still valid for the given offset and
      69             :  * length.
      70             :  */
      71             : static bool
      72   178955637 : xfs_iomap_valid(
      73             :         struct inode            *inode,
      74             :         const struct iomap      *iomap)
      75             : {
      76   178955637 :         struct xfs_inode        *ip = XFS_I(inode);
      77             : 
      78   178853103 :         if (iomap->validity_cookie !=
      79   178955637 :                         xfs_iomap_inode_sequence(ip, iomap->flags)) {
      80       11773 :                 trace_xfs_iomap_invalid(ip, iomap);
      81       11773 :                 return false;
      82             :         }
      83             : 
      84   178844330 :         XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS);
      85             :         return true;
      86             : }
      87             : 
      88             : static const struct iomap_folio_ops xfs_iomap_folio_ops = {
      89             :         .iomap_valid            = xfs_iomap_valid,
      90             : };
      91             : 
      92             : int
      93   560859197 : xfs_bmbt_to_iomap(
      94             :         struct xfs_inode        *ip,
      95             :         struct iomap            *iomap,
      96             :         struct xfs_bmbt_irec    *imap,
      97             :         unsigned int            mapping_flags,
      98             :         u16                     iomap_flags,
      99             :         u64                     sequence_cookie)
     100             : {
     101   560859197 :         struct xfs_mount        *mp = ip->i_mount;
     102   560859197 :         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
     103             : 
     104   560859197 :         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
     105           0 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     106           0 :                 return xfs_alert_fsblock_zero(ip, imap);
     107             :         }
     108             : 
     109   560859197 :         if (imap->br_startblock == HOLESTARTBLOCK) {
     110   320105976 :                 iomap->addr = IOMAP_NULL_ADDR;
     111   320105976 :                 iomap->type = IOMAP_HOLE;
     112   240753221 :         } else if (imap->br_startblock == DELAYSTARTBLOCK ||
     113             :                    isnullstartblock(imap->br_startblock)) {
     114    60482088 :                 iomap->addr = IOMAP_NULL_ADDR;
     115    60482088 :                 iomap->type = IOMAP_DELALLOC;
     116             :         } else {
     117   180271133 :                 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
     118   180271905 :                 if (mapping_flags & IOMAP_DAX)
     119             :                         iomap->addr += target->bt_dax_part_off;
     120             : 
     121   180271905 :                 if (imap->br_state == XFS_EXT_UNWRITTEN)
     122    94897618 :                         iomap->type = IOMAP_UNWRITTEN;
     123             :                 else
     124    85374287 :                         iomap->type = IOMAP_MAPPED;
     125             : 
     126             :         }
     127   560859969 :         iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
     128   560859969 :         iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
     129   560859969 :         if (mapping_flags & IOMAP_DAX)
     130             :                 iomap->dax_dev = target->bt_daxdev;
     131             :         else
     132   560859969 :                 iomap->bdev = xfs_buftarg_bdev(target);
     133   560859969 :         iomap->flags = iomap_flags;
     134             : 
     135   560859969 :         if (xfs_ipincount(ip) &&
     136   199775974 :             (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
     137   166960036 :                 iomap->flags |= IOMAP_F_DIRTY;
     138             : 
     139   560859969 :         iomap->validity_cookie = sequence_cookie;
     140   560859969 :         iomap->folio_ops = &xfs_iomap_folio_ops;
     141   560859969 :         return 0;
     142             : }
     143             : 
     144             : static void
     145    19663829 : xfs_hole_to_iomap(
     146             :         struct xfs_inode        *ip,
     147             :         struct iomap            *iomap,
     148             :         xfs_fileoff_t           offset_fsb,
     149             :         xfs_fileoff_t           end_fsb)
     150             : {
     151    19663829 :         struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
     152             : 
     153    19663829 :         iomap->addr = IOMAP_NULL_ADDR;
     154    19663829 :         iomap->type = IOMAP_HOLE;
     155    19663829 :         iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
     156    19663829 :         iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
     157    19663829 :         iomap->bdev = xfs_buftarg_bdev(target);
     158    19663829 :         iomap->dax_dev = target->bt_daxdev;
     159    19663829 : }
     160             : 
     161             : static inline xfs_fileoff_t
     162   585939850 : xfs_iomap_end_fsb(
     163             :         struct xfs_mount        *mp,
     164             :         loff_t                  offset,
     165             :         loff_t                  count)
     166             : {
     167   585939850 :         ASSERT(offset <= mp->m_super->s_maxbytes);
     168   585939850 :         return min(XFS_B_TO_FSB(mp, offset + count),
     169             :                    XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
     170             : }
     171             : 
     172             : static xfs_extlen_t
     173    13791787 : xfs_eof_alignment(
     174             :         struct xfs_inode        *ip)
     175             : {
     176    13791787 :         struct xfs_mount        *mp = ip->i_mount;
     177    13791787 :         xfs_extlen_t            align = 0;
     178             : 
     179    13791787 :         if (!XFS_IS_REALTIME_INODE(ip)) {
     180             :                 /*
     181             :                  * Round up the allocation request to a stripe unit
     182             :                  * (m_dalign) boundary if the file size is >= stripe unit
     183             :                  * size, and we are allocating past the allocation eof.
     184             :                  *
     185             :                  * If mounted with the "-o swalloc" option the alignment is
     186             :                  * increased from the strip unit size to the stripe width.
     187             :                  */
     188     7963011 :                 if (mp->m_swidth && xfs_has_swalloc(mp))
     189           0 :                         align = mp->m_swidth;
     190     7963011 :                 else if (mp->m_dalign)
     191        9658 :                         align = mp->m_dalign;
     192             : 
     193       19316 :                 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
     194        2625 :                         align = 0;
     195             :         }
     196             : 
     197    13791787 :         return align;
     198             : }
     199             : 
     200             : /*
     201             :  * Check if last_fsb is outside the last extent, and if so grow it to the next
     202             :  * stripe unit boundary.
     203             :  */
     204             : xfs_fileoff_t
     205     9930693 : xfs_iomap_eof_align_last_fsb(
     206             :         struct xfs_inode        *ip,
     207             :         xfs_fileoff_t           end_fsb)
     208             : {
     209     9930693 :         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
     210     9930693 :         xfs_extlen_t            extsz = xfs_get_extsz_hint(ip);
     211     9930647 :         xfs_extlen_t            align = xfs_eof_alignment(ip);
     212     9930647 :         struct xfs_bmbt_irec    irec;
     213     9930647 :         struct xfs_iext_cursor  icur;
     214             : 
     215     9930647 :         ASSERT(!xfs_need_iread_extents(ifp));
     216             : 
     217             :         /*
     218             :          * Always round up the allocation request to the extent hint boundary.
     219             :          */
     220     9930636 :         if (extsz) {
     221     8925584 :                 if (align)
     222           0 :                         align = roundup_64(align, extsz);
     223             :                 else
     224             :                         align = extsz;
     225             :         }
     226             : 
     227     9930636 :         if (align) {
     228     8926382 :                 xfs_fileoff_t   aligned_end_fsb = roundup_64(end_fsb, align);
     229             : 
     230     8926382 :                 xfs_iext_last(ifp, &icur);
     231     8926377 :                 if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
     232     7834507 :                     aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
     233     8755604 :                         return aligned_end_fsb;
     234             :         }
     235             : 
     236             :         return end_fsb;
     237             : }
     238             : 
     239             : int
     240    21360627 : xfs_iomap_write_direct(
     241             :         struct xfs_inode        *ip,
     242             :         xfs_fileoff_t           offset_fsb,
     243             :         xfs_fileoff_t           count_fsb,
     244             :         unsigned int            flags,
     245             :         struct xfs_bmbt_irec    *imap,
     246             :         u64                     *seq)
     247             : {
     248    21360627 :         struct xfs_mount        *mp = ip->i_mount;
     249    21360627 :         struct xfs_trans        *tp;
     250    21360627 :         xfs_filblks_t           resaligned;
     251    21360627 :         int                     nimaps;
     252    21360627 :         unsigned int            dblocks, rblocks;
     253    21360627 :         bool                    force = false;
     254    21360627 :         int                     error;
     255    21360627 :         int                     bmapi_flags = XFS_BMAPI_PREALLOC;
     256    21360627 :         int                     nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT;
     257             : 
     258    21360627 :         ASSERT(count_fsb > 0);
     259             : 
     260    21360627 :         resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
     261             :                                            xfs_get_extsz_hint(ip));
     262    21360693 :         if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
     263    16413450 :                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
     264    16413450 :                 rblocks = resaligned;
     265             :         } else {
     266     4947243 :                 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
     267     4947243 :                 rblocks = 0;
     268             :         }
     269             : 
     270    21360693 :         error = xfs_qm_dqattach(ip);
     271    21360588 :         if (error)
     272             :                 return error;
     273             : 
     274             :         /*
     275             :          * For DAX, we do not allocate unwritten extents, but instead we zero
     276             :          * the block before we commit the transaction.  Ideally we'd like to do
     277             :          * this outside the transaction context, but if we commit and then crash
     278             :          * we may not have zeroed the blocks and this will be exposed on
     279             :          * recovery of the allocation. Hence we must zero before commit.
     280             :          *
     281             :          * Further, if we are mapping unwritten extents here, we need to zero
     282             :          * and convert them to written so that we don't need an unwritten extent
     283             :          * callback for DAX. This also means that we need to be able to dip into
     284             :          * the reserve block pool for bmbt block allocation if there is no space
     285             :          * left but we need to do unwritten extent conversion.
     286             :          */
     287    21360581 :         if (flags & IOMAP_DAX) {
     288             :                 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
     289             :                 if (imap->br_state == XFS_EXT_UNWRITTEN) {
     290             :                         force = true;
     291             :                         nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT;
     292             :                         dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
     293             :                 }
     294             :         }
     295             : 
     296    21360581 :         error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks,
     297             :                         rblocks, force, &tp);
     298    21360894 :         if (error)
     299             :                 return error;
     300             : 
     301    21336685 :         error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK, nr_exts);
     302    21336589 :         if (error == -EFBIG)
     303           0 :                 error = xfs_iext_count_upgrade(tp, ip, nr_exts);
     304    21336589 :         if (error)
     305           0 :                 goto out_trans_cancel;
     306             : 
     307             :         /*
     308             :          * From this point onwards we overwrite the imap pointer that the
     309             :          * caller gave to us.
     310             :          */
     311    21336589 :         nimaps = 1;
     312    21336589 :         error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
     313             :                                 imap, &nimaps);
     314    21336660 :         if (error)
     315         285 :                 goto out_trans_cancel;
     316             : 
     317             :         /*
     318             :          * Complete the transaction
     319             :          */
     320    21336375 :         error = xfs_trans_commit(tp);
     321    21336401 :         if (error)
     322         143 :                 goto out_unlock;
     323             : 
     324             :         /*
     325             :          * Copy any maps to caller's array and return any error.
     326             :          */
     327    21336258 :         if (nimaps == 0) {
     328           0 :                 error = -ENOSPC;
     329           0 :                 goto out_unlock;
     330             :         }
     331             : 
     332    21336258 :         if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) {
     333           0 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     334           0 :                 error = xfs_alert_fsblock_zero(ip, imap);
     335             :         }
     336             : 
     337    21336258 : out_unlock:
     338    21336686 :         *seq = xfs_iomap_inode_sequence(ip, 0);
     339    21336686 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     340    21336686 :         return error;
     341             : 
     342         285 : out_trans_cancel:
     343         285 :         xfs_trans_cancel(tp);
     344         285 :         goto out_unlock;
     345             : }
     346             : 
     347             : STATIC bool
     348      701629 : xfs_quota_need_throttle(
     349             :         struct xfs_inode        *ip,
     350             :         xfs_dqtype_t            type,
     351             :         xfs_fsblock_t           alloc_blocks)
     352             : {
     353      701629 :         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
     354             : 
     355      701629 :         if (!dq || !xfs_this_quota_on(ip->i_mount, type))
     356             :                 return false;
     357             : 
     358             :         /* no hi watermark, no throttle */
     359      688868 :         if (!dq->q_prealloc_hi_wmark)
     360             :                 return false;
     361             : 
     362             :         /* under the lo watermark, no throttle */
     363        3905 :         if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
     364         435 :                 return false;
     365             : 
     366             :         return true;
     367             : }
     368             : 
     369             : STATIC void
     370        3470 : xfs_quota_calc_throttle(
     371             :         struct xfs_inode        *ip,
     372             :         xfs_dqtype_t            type,
     373             :         xfs_fsblock_t           *qblocks,
     374             :         int                     *qshift,
     375             :         int64_t                 *qfreesp)
     376             : {
     377        3470 :         struct xfs_dquot        *dq = xfs_inode_dquot(ip, type);
     378        3470 :         int64_t                 freesp;
     379        3470 :         int                     shift = 0;
     380             : 
     381             :         /* no dq, or over hi wmark, squash the prealloc completely */
     382        3470 :         if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
     383         305 :                 *qblocks = 0;
     384         305 :                 *qfreesp = 0;
     385         305 :                 return;
     386             :         }
     387             : 
     388        3165 :         freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
     389        3165 :         if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
     390         409 :                 shift = 2;
     391         409 :                 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
     392         373 :                         shift += 2;
     393         409 :                 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
     394         323 :                         shift += 2;
     395             :         }
     396             : 
     397        3165 :         if (freesp < *qfreesp)
     398        3161 :                 *qfreesp = freesp;
     399             : 
     400             :         /* only overwrite the throttle values if we are more aggressive */
     401        3165 :         if ((freesp >> shift) < (*qblocks >> *qshift)) {
     402         535 :                 *qblocks = freesp;
     403         535 :                 *qshift = shift;
     404             :         }
     405             : }
     406             : 
     407             : /*
     408             :  * If we don't have a user specified preallocation size, dynamically increase
     409             :  * the preallocation size as the size of the file grows.  Cap the maximum size
     410             :  * at a single extent or less if the filesystem is near full. The closer the
     411             :  * filesystem is to being full, the smaller the maximum preallocation.
     412             :  */
     413             : STATIC xfs_fsblock_t
     414     8009884 : xfs_iomap_prealloc_size(
     415             :         struct xfs_inode        *ip,
     416             :         int                     whichfork,
     417             :         loff_t                  offset,
     418             :         loff_t                  count,
     419             :         struct xfs_iext_cursor  *icur)
     420             : {
     421     8009884 :         struct xfs_iext_cursor  ncur = *icur;
     422     8009884 :         struct xfs_bmbt_irec    prev, got;
     423     8009884 :         struct xfs_mount        *mp = ip->i_mount;
     424     8009884 :         struct xfs_ifork        *ifp = xfs_ifork_ptr(ip, whichfork);
     425     8010215 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     426     8010215 :         int64_t                 freesp;
     427     8010215 :         xfs_fsblock_t           qblocks;
     428     8010215 :         xfs_fsblock_t           alloc_blocks = 0;
     429     8010215 :         xfs_extlen_t            plen;
     430     8010215 :         int                     shift = 0;
     431     8010215 :         int                     qshift = 0;
     432             : 
     433             :         /*
     434             :          * As an exception we don't do any preallocation at all if the file is
     435             :          * smaller than the minimum preallocation and we are using the default
     436             :          * dynamic preallocation scheme, as it is likely this is the only write
     437             :          * to the file that is going to be done.
     438             :          */
     439    16020430 :         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
     440             :                 return 0;
     441             : 
     442             :         /*
     443             :          * Use the minimum preallocation size for small files or if we are
     444             :          * writing right after a hole.
     445             :          */
     446     7701277 :         if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
     447     3850333 :             !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
     448     3624009 :             prev.br_startoff + prev.br_blockcount < offset_fsb)
     449     3617061 :                 return mp->m_allocsize_blocks;
     450             : 
     451             :         /*
     452             :          * Take the size of the preceding data extents as the basis for the
     453             :          * preallocation size. Note that we don't care if the previous extents
     454             :          * are written or not.
     455             :          */
     456      233883 :         plen = prev.br_blockcount;
     457      255782 :         while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
     458      123462 :                 if (plen > XFS_MAX_BMBT_EXTLEN / 2 ||
     459      123458 :                     isnullstartblock(got.br_startblock) ||
     460       96430 :                     got.br_startoff + got.br_blockcount != prev.br_startoff ||
     461       50680 :                     got.br_startblock + got.br_blockcount != prev.br_startblock)
     462             :                         break;
     463       21899 :                 plen += got.br_blockcount;
     464       21899 :                 prev = got;
     465             :         }
     466             : 
     467             :         /*
     468             :          * If the size of the extents is greater than half the maximum extent
     469             :          * length, then use the current offset as the basis.  This ensures that
     470             :          * for large files the preallocation size always extends to
     471             :          * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe
     472             :          * unit/width alignment of real extents.
     473             :          */
     474      233882 :         alloc_blocks = plen * 2;
     475      233882 :         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
     476           4 :                 alloc_blocks = XFS_B_TO_FSB(mp, offset);
     477      233882 :         qblocks = alloc_blocks;
     478             : 
     479             :         /*
     480             :          * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc
     481             :          * down to the nearest power of two value after throttling. To prevent
     482             :          * the round down from unconditionally reducing the maximum supported
     483             :          * prealloc size, we round up first, apply appropriate throttling, round
     484             :          * down and cap the value to XFS_BMBT_MAX_EXTLEN.
     485             :          */
     486      233882 :         alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN),
     487             :                                        alloc_blocks);
     488             : 
     489      233882 :         freesp = percpu_counter_read_positive(&mp->m_fdblocks);
     490      233882 :         if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
     491       10065 :                 shift = 2;
     492       10065 :                 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
     493        9584 :                         shift++;
     494       10065 :                 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
     495        8928 :                         shift++;
     496       10065 :                 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
     497        7903 :                         shift++;
     498       10065 :                 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
     499        6275 :                         shift++;
     500             :         }
     501             : 
     502             :         /*
     503             :          * Check each quota to cap the prealloc size, provide a shift value to
     504             :          * throttle with and adjust amount of available space.
     505             :          */
     506      233882 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
     507        1199 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
     508             :                                         &freesp);
     509      233882 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
     510        1169 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
     511             :                                         &freesp);
     512      233882 :         if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
     513        1102 :                 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
     514             :                                         &freesp);
     515             : 
     516             :         /*
     517             :          * The final prealloc size is set to the minimum of free space available
     518             :          * in each of the quotas and the overall filesystem.
     519             :          *
     520             :          * The shift throttle value is set to the maximum value as determined by
     521             :          * the global low free space values and per-quota low free space values.
     522             :          */
     523      233882 :         alloc_blocks = min(alloc_blocks, qblocks);
     524      233882 :         shift = max(shift, qshift);
     525             : 
     526      233882 :         if (shift)
     527       10474 :                 alloc_blocks >>= shift;
     528             :         /*
     529             :          * rounddown_pow_of_two() returns an undefined result if we pass in
     530             :          * alloc_blocks = 0.
     531             :          */
     532      233882 :         if (alloc_blocks)
     533      231459 :                 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
     534      233882 :         if (alloc_blocks > XFS_MAX_BMBT_EXTLEN)
     535           0 :                 alloc_blocks = XFS_MAX_BMBT_EXTLEN;
     536             : 
     537             :         /*
     538             :          * If we are still trying to allocate more space than is
     539             :          * available, squash the prealloc hard. This can happen if we
     540             :          * have a large file on a small filesystem and the above
     541             :          * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN.
     542             :          */
     543      237194 :         while (alloc_blocks && alloc_blocks >= freesp)
     544        3312 :                 alloc_blocks >>= 4;
     545      233882 :         if (alloc_blocks < mp->m_allocsize_blocks)
     546             :                 alloc_blocks = mp->m_allocsize_blocks;
     547      233882 :         trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
     548             :                                       mp->m_allocsize_blocks);
     549      233882 :         return alloc_blocks;
     550             : }
     551             : 
     552             : int
     553    35666387 : xfs_iomap_write_unwritten(
     554             :         xfs_inode_t     *ip,
     555             :         xfs_off_t       offset,
     556             :         xfs_off_t       count,
     557             :         bool            update_isize)
     558             : {
     559    35666387 :         xfs_mount_t     *mp = ip->i_mount;
     560    35666387 :         xfs_fileoff_t   offset_fsb;
     561    35666387 :         xfs_filblks_t   count_fsb;
     562    35666387 :         xfs_filblks_t   numblks_fsb;
     563    35666387 :         int             nimaps;
     564    35666387 :         xfs_trans_t     *tp;
     565    35666387 :         xfs_bmbt_irec_t imap;
     566    35666387 :         struct inode    *inode = VFS_I(ip);
     567    35666387 :         xfs_fsize_t     i_size;
     568    35666387 :         uint            resblks;
     569    35666387 :         int             error;
     570             : 
     571    35666387 :         trace_xfs_unwritten_convert(ip, offset, count);
     572             : 
     573    35666367 :         offset_fsb = XFS_B_TO_FSBT(mp, offset);
     574    35666367 :         count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
     575    35666367 :         count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
     576             : 
     577             :         /*
     578             :          * Reserve enough blocks in this transaction for two complete extent
     579             :          * btree splits.  We may be converting the middle part of an unwritten
     580             :          * extent and in this case we will insert two new extents in the btree
     581             :          * each of which could cause a full split.
     582             :          *
     583             :          * This reservation amount will be used in the first call to
     584             :          * xfs_bmbt_split() to select an AG with enough space to satisfy the
     585             :          * rest of the operation.
     586             :          */
     587    35666367 :         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
     588             : 
     589             :         /* Attach dquots so that bmbt splits are accounted correctly. */
     590    35666367 :         error = xfs_qm_dqattach(ip);
     591    35666461 :         if (error)
     592             :                 return error;
     593             : 
     594    36591806 :         do {
     595             :                 /*
     596             :                  * Set up a transaction to convert the range of extents
     597             :                  * from unwritten to real. Do allocations in a loop until
     598             :                  * we have covered the range passed in.
     599             :                  *
     600             :                  * Note that we can't risk to recursing back into the filesystem
     601             :                  * here as we might be asked to write out the same inode that we
     602             :                  * complete here and might deadlock on the iolock.
     603             :                  */
     604    36591806 :                 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks,
     605             :                                 0, true, &tp);
     606    36591990 :                 if (error)
     607          48 :                         return error;
     608             : 
     609    36591942 :                 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
     610             :                                 XFS_IEXT_WRITE_UNWRITTEN_CNT);
     611    36591666 :                 if (error == -EFBIG)
     612           7 :                         error = xfs_iext_count_upgrade(tp, ip,
     613             :                                         XFS_IEXT_WRITE_UNWRITTEN_CNT);
     614    36591666 :                 if (error)
     615           7 :                         goto error_on_bmapi_transaction;
     616             : 
     617             :                 /*
     618             :                  * Modify the unwritten extent state of the buffer.
     619             :                  */
     620    36591659 :                 nimaps = 1;
     621    36591659 :                 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
     622             :                                         XFS_BMAPI_CONVERT, resblks, &imap,
     623             :                                         &nimaps);
     624    36591816 :                 if (error)
     625          24 :                         goto error_on_bmapi_transaction;
     626             : 
     627             :                 /*
     628             :                  * Log the updated inode size as we go.  We have to be careful
     629             :                  * to only log it up to the actual write offset if it is
     630             :                  * halfway into a block.
     631             :                  */
     632    36591792 :                 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
     633    36591792 :                 if (i_size > offset + count)
     634             :                         i_size = offset + count;
     635    36591792 :                 if (update_isize && i_size > i_size_read(inode))
     636     1995493 :                         i_size_write(inode, i_size);
     637    36591792 :                 i_size = xfs_new_eof(ip, i_size);
     638    12885639 :                 if (i_size) {
     639    12885643 :                         ip->i_disk_size = i_size;
     640    12885643 :                         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
     641             :                 }
     642             : 
     643    36591789 :                 error = xfs_trans_commit(tp);
     644    36591968 :                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
     645    36591955 :                 if (error)
     646         333 :                         return error;
     647             : 
     648    36591622 :                 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) {
     649           0 :                         xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     650           0 :                         return xfs_alert_fsblock_zero(ip, &imap);
     651             :                 }
     652             : 
     653    36591622 :                 if ((numblks_fsb = imap.br_blockcount) == 0) {
     654             :                         /*
     655             :                          * The numblks_fsb value should always get
     656             :                          * smaller, otherwise the loop is stuck.
     657             :                          */
     658           0 :                         ASSERT(imap.br_blockcount);
     659             :                         break;
     660             :                 }
     661    36591622 :                 offset_fsb += numblks_fsb;
     662    36591622 :                 count_fsb -= numblks_fsb;
     663    36591622 :         } while (count_fsb > 0);
     664             : 
     665             :         return 0;
     666             : 
     667          31 : error_on_bmapi_transaction:
     668          31 :         xfs_trans_cancel(tp);
     669          31 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
     670          31 :         return error;
     671             : }
     672             : 
     673             : static inline bool
     674             : imap_needs_alloc(
     675             :         struct inode            *inode,
     676             :         unsigned                flags,
     677             :         struct xfs_bmbt_irec    *imap,
     678             :         int                     nimaps)
     679             : {
     680             :         /* don't allocate blocks when just zeroing */
     681    39042443 :         if (flags & IOMAP_ZERO)
     682             :                 return false;
     683    27047364 :         if (!nimaps ||
     684    27047384 :             imap->br_startblock == HOLESTARTBLOCK ||
     685             :             imap->br_startblock == DELAYSTARTBLOCK)
     686    21739982 :                 return true;
     687             :         /* we convert unwritten extents before copying the data for DAX */
     688             :         if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
     689             :                 return true;
     690             :         return false;
     691             : }
     692             : 
     693             : static inline bool
     694    41198892 : imap_needs_cow(
     695             :         struct xfs_inode        *ip,
     696             :         unsigned int            flags,
     697             :         struct xfs_bmbt_irec    *imap,
     698             :         int                     nimaps)
     699             : {
     700    41198892 :         if (!xfs_is_cow_inode(ip))
     701             :                 return false;
     702             : 
     703             :         /* when zeroing we don't have to COW holes or unwritten extents */
     704    26770494 :         if (flags & IOMAP_ZERO) {
     705     7482956 :                 if (!nimaps ||
     706     7482939 :                     imap->br_startblock == HOLESTARTBLOCK ||
     707     3291705 :                     imap->br_state == XFS_EXT_UNWRITTEN)
     708     5150717 :                         return false;
     709             :         }
     710             : 
     711             :         return true;
     712             : }
     713             : 
     714             : static int
     715   511238343 : xfs_ilock_for_iomap(
     716             :         struct xfs_inode        *ip,
     717             :         unsigned                flags,
     718             :         unsigned                *lockmode)
     719             : {
     720   511238343 :         unsigned int            mode = *lockmode;
     721   511238343 :         bool                    is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
     722             : 
     723             :         /*
     724             :          * COW writes may allocate delalloc space or convert unwritten COW
     725             :          * extents, so we need to make sure to take the lock exclusively here.
     726             :          */
     727   511238343 :         if (xfs_is_cow_inode(ip) && is_write)
     728    63029295 :                 mode = XFS_ILOCK_EXCL;
     729             : 
     730             :         /*
     731             :          * Extents not yet cached requires exclusive access, don't block.  This
     732             :          * is an opencoded xfs_ilock_data_map_shared() call but with
     733             :          * non-blocking behaviour.
     734             :          */
     735   511213355 :         if (xfs_need_iread_extents(&ip->i_df)) {
     736     1214882 :                 if (flags & IOMAP_NOWAIT)
     737             :                         return -EAGAIN;
     738             :                 mode = XFS_ILOCK_EXCL;
     739             :         }
     740             : 
     741   510041744 : relock:
     742   511254611 :         if (flags & IOMAP_NOWAIT) {
     743           0 :                 if (!xfs_ilock_nowait(ip, mode))
     744             :                         return -EAGAIN;
     745             :         } else {
     746   511254611 :                 xfs_ilock(ip, mode);
     747             :         }
     748             : 
     749             :         /*
     750             :          * The reflink iflag could have changed since the earlier unlocked
     751             :          * check, so if we got ILOCK_SHARED for a write and but we're now a
     752             :          * reflink inode we have to switch to ILOCK_EXCL and relock.
     753             :          */
     754   511261899 :         if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
     755           0 :                 xfs_iunlock(ip, mode);
     756         508 :                 mode = XFS_ILOCK_EXCL;
     757         508 :                 goto relock;
     758             :         }
     759             : 
     760   511261902 :         *lockmode = mode;
     761   511261902 :         return 0;
     762             : }
     763             : 
     764             : /*
     765             :  * Check that the imap we are going to return to the caller spans the entire
     766             :  * range that the caller requested for the IO.
     767             :  */
     768             : static bool
     769             : imap_spans_range(
     770             :         struct xfs_bmbt_irec    *imap,
     771             :         xfs_fileoff_t           offset_fsb,
     772             :         xfs_fileoff_t           end_fsb)
     773             : {
     774      242618 :         if (imap->br_startoff > offset_fsb)
     775             :                 return false;
     776      242618 :         if (imap->br_startoff + imap->br_blockcount < end_fsb)
     777             :                 return false;
     778             :         return true;
     779             : }
     780             : 
     781             : static int
     782    41198763 : xfs_direct_write_iomap_begin(
     783             :         struct inode            *inode,
     784             :         loff_t                  offset,
     785             :         loff_t                  length,
     786             :         unsigned                flags,
     787             :         struct iomap            *iomap,
     788             :         struct iomap            *srcmap)
     789             : {
     790    41198763 :         struct xfs_inode        *ip = XFS_I(inode);
     791    41198763 :         struct xfs_mount        *mp = ip->i_mount;
     792    41198763 :         struct xfs_bmbt_irec    imap, cmap;
     793    41198763 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     794    41198763 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
     795    41198991 :         int                     nimaps = 1, error = 0;
     796    41198991 :         bool                    shared = false;
     797    41198991 :         u16                     iomap_flags = 0;
     798    41198991 :         unsigned int            lockmode = XFS_ILOCK_SHARED;
     799    41198991 :         u64                     seq;
     800             : 
     801    41198991 :         ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
     802             : 
     803    82397982 :         if (xfs_is_shutdown(mp))
     804             :                 return -EIO;
     805             : 
     806             :         /*
     807             :          * Writes that span EOF might trigger an IO size update on completion,
     808             :          * so consider them to be dirty for the purposes of O_DSYNC even if
     809             :          * there is no other metadata changes pending or have been made here.
     810             :          */
     811    41198970 :         if (offset + length > i_size_read(inode))
     812    20740739 :                 iomap_flags |= IOMAP_F_DIRTY;
     813             : 
     814    41198970 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
     815    41198973 :         if (error)
     816             :                 return error;
     817             : 
     818    41198970 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
     819             :                                &nimaps, 0);
     820    41198820 :         if (error)
     821           8 :                 goto out_unlock;
     822             : 
     823    41198812 :         if (imap_needs_cow(ip, flags, &imap, nimaps)) {
     824    21619884 :                 error = -EAGAIN;
     825    21619884 :                 if (flags & IOMAP_NOWAIT)
     826           0 :                         goto out_unlock;
     827             : 
     828             :                 /* may drop and re-acquire the ilock */
     829    21619884 :                 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
     830             :                                 &lockmode,
     831    21619884 :                                 (flags & IOMAP_DIRECT) || IS_DAX(inode));
     832    21619789 :                 if (error)
     833          34 :                         goto out_unlock;
     834    21619755 :                 if (shared)
     835     2156384 :                         goto out_found_cow;
     836    19463371 :                 end_fsb = imap.br_startoff + imap.br_blockcount;
     837    19463371 :                 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
     838             :         }
     839             : 
     840    39042443 :         if (imap_needs_alloc(inode, flags, &imap, nimaps))
     841    21739982 :                 goto allocate_blocks;
     842             : 
     843             :         /*
     844             :          * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with
     845             :          * a single map so that we avoid partial IO failures due to the rest of
     846             :          * the I/O range not covered by this map triggering an EAGAIN condition
     847             :          * when it is subsequently mapped and aborting the I/O.
     848             :          */
     849    17302461 :         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) {
     850      242618 :                 error = -EAGAIN;
     851      242618 :                 if (!imap_spans_range(&imap, offset_fsb, end_fsb))
     852       59499 :                         goto out_unlock;
     853             :         }
     854             : 
     855             :         /*
     856             :          * For overwrite only I/O, we cannot convert unwritten extents without
     857             :          * requiring sub-block zeroing.  This can only be done under an
     858             :          * exclusive IOLOCK, hence return -EAGAIN if this is not a written
     859             :          * extent to tell the caller to try again.
     860             :          */
     861    17242962 :         if (flags & IOMAP_OVERWRITE_ONLY) {
     862      183116 :                 error = -EAGAIN;
     863      183116 :                 if (imap.br_state != XFS_EXT_NORM &&
     864       82695 :                     ((offset | length) & mp->m_blockmask))
     865       82695 :                         goto out_unlock;
     866             :         }
     867             : 
     868    17160267 :         seq = xfs_iomap_inode_sequence(ip, iomap_flags);
     869    17160287 :         xfs_iunlock(ip, lockmode);
     870    17160278 :         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
     871    17160282 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq);
     872             : 
     873             : allocate_blocks:
     874    21739982 :         error = -EAGAIN;
     875    21739982 :         if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY))
     876      379357 :                 goto out_unlock;
     877             : 
     878             :         /*
     879             :          * We cap the maximum length we map to a sane size  to keep the chunks
     880             :          * of work done where somewhat symmetric with the work writeback does.
     881             :          * This is a completely arbitrary number pulled out of thin air as a
     882             :          * best guess for initial testing.
     883             :          *
     884             :          * Note that the values needs to be less than 32-bits wide until the
     885             :          * lower level functions are updated.
     886             :          */
     887    21360625 :         length = min_t(loff_t, length, 1024 * PAGE_SIZE);
     888    21360625 :         end_fsb = xfs_iomap_end_fsb(mp, offset, length);
     889             : 
     890    42721544 :         if (offset + length > XFS_ISIZE(ip))
     891     9930781 :                 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
     892    11429991 :         else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
     893    11430006 :                 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
     894    21360644 :         xfs_iunlock(ip, lockmode);
     895             : 
     896    21360515 :         error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
     897             :                         flags, &imap, &seq);
     898    21360895 :         if (error)
     899             :                 return error;
     900             : 
     901    21336232 :         trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
     902    21336281 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
     903             :                                  iomap_flags | IOMAP_F_NEW, seq);
     904             : 
     905             : out_found_cow:
     906     2156384 :         length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
     907     2156384 :         trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
     908     2156384 :         if (imap.br_startblock != HOLESTARTBLOCK) {
     909     1903654 :                 seq = xfs_iomap_inode_sequence(ip, 0);
     910     1903654 :                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
     911     1903654 :                 if (error)
     912           0 :                         goto out_unlock;
     913             :         }
     914     2156384 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
     915     2156384 :         xfs_iunlock(ip, lockmode);
     916     2156383 :         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq);
     917             : 
     918      521593 : out_unlock:
     919      521593 :         if (lockmode)
     920      521586 :                 xfs_iunlock(ip, lockmode);
     921             :         return error;
     922             : }
     923             : 
     924             : const struct iomap_ops xfs_direct_write_iomap_ops = {
     925             :         .iomap_begin            = xfs_direct_write_iomap_begin,
     926             : };
     927             : 
     928             : static int
     929           0 : xfs_dax_write_iomap_end(
     930             :         struct inode            *inode,
     931             :         loff_t                  pos,
     932             :         loff_t                  length,
     933             :         ssize_t                 written,
     934             :         unsigned                flags,
     935             :         struct iomap            *iomap)
     936             : {
     937           0 :         struct xfs_inode        *ip = XFS_I(inode);
     938             : 
     939           0 :         if (!xfs_is_cow_inode(ip))
     940             :                 return 0;
     941             : 
     942           0 :         if (!written) {
     943           0 :                 xfs_reflink_cancel_cow_range(ip, pos, length, true);
     944           0 :                 return 0;
     945             :         }
     946             : 
     947           0 :         return xfs_reflink_end_cow(ip, pos, written);
     948             : }
     949             : 
     950             : const struct iomap_ops xfs_dax_write_iomap_ops = {
     951             :         .iomap_begin    = xfs_direct_write_iomap_begin,
     952             :         .iomap_end      = xfs_dax_write_iomap_end,
     953             : };
     954             : 
     955             : static int
     956   124824303 : xfs_buffered_write_iomap_begin(
     957             :         struct inode            *inode,
     958             :         loff_t                  offset,
     959             :         loff_t                  count,
     960             :         unsigned                flags,
     961             :         struct iomap            *iomap,
     962             :         struct iomap            *srcmap)
     963             : {
     964   124824303 :         struct xfs_inode        *ip = XFS_I(inode);
     965   124824303 :         struct xfs_mount        *mp = ip->i_mount;
     966   124824303 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
     967   124824303 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, count);
     968   124847811 :         struct xfs_bmbt_irec    imap, cmap;
     969   124847811 :         struct xfs_iext_cursor  icur, ccur;
     970   124847811 :         xfs_fsblock_t           prealloc_blocks = 0;
     971   124847811 :         bool                    eof = false, cow_eof = false, shared = false;
     972   124847811 :         int                     allocfork = XFS_DATA_FORK;
     973   124847811 :         int                     error = 0;
     974   124847811 :         unsigned int            lockmode = XFS_ILOCK_EXCL;
     975   124847811 :         u64                     seq;
     976             : 
     977   249695622 :         if (xfs_is_shutdown(mp))
     978             :                 return -EIO;
     979             : 
     980             :         /* we can't use delayed allocations when using extent size hints */
     981   124847732 :         if (xfs_get_extsz_hint(ip))
     982    33048840 :                 return xfs_direct_write_iomap_begin(inode, offset, count,
     983             :                                 flags, iomap, srcmap);
     984             : 
     985    91843041 :         ASSERT(!XFS_IS_REALTIME_INODE(ip));
     986             : 
     987    91843041 :         error = xfs_qm_dqattach(ip);
     988    91858379 :         if (error)
     989             :                 return error;
     990             : 
     991    91859639 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
     992    91886952 :         if (error)
     993             :                 return error;
     994             : 
     995   183774294 :         if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
     996    91886952 :             XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
     997         441 :                 xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
     998           0 :                 error = -EFSCORRUPTED;
     999           0 :                 goto out_unlock;
    1000             :         }
    1001             : 
    1002    91886901 :         XFS_STATS_INC(mp, xs_blk_mapw);
    1003             : 
    1004    91886901 :         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
    1005    91890099 :         if (error)
    1006           2 :                 goto out_unlock;
    1007             : 
    1008             :         /*
    1009             :          * Search the data fork first to look up our source mapping.  We
    1010             :          * always need the data fork map, as we have to return it to the
    1011             :          * iomap code so that the higher level write code can read data in to
    1012             :          * perform read-modify-write cycles for unaligned writes.
    1013             :          */
    1014    91890097 :         eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
    1015    91907017 :         if (eof)
    1016    20587875 :                 imap.br_startoff = end_fsb; /* fake hole until the end */
    1017             : 
    1018             :         /* We never need to allocate blocks for zeroing or unsharing a hole. */
    1019    91907017 :         if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) &&
    1020    37340384 :             imap.br_startoff > offset_fsb) {
    1021    19663753 :                 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
    1022    19663781 :                 goto out_unlock;
    1023             :         }
    1024             : 
    1025             :         /*
    1026             :          * Search the COW fork extent list even if we did not find a data fork
    1027             :          * extent.  This serves two purposes: first this implements the
    1028             :          * speculative preallocation using cowextsize, so that we also unshare
    1029             :          * block adjacent to shared blocks instead of just the shared blocks
    1030             :          * themselves.  Second the lookup in the extent list is generally faster
    1031             :          * than going out to the shared extent tree.
    1032             :          */
    1033    72243264 :         if (xfs_is_cow_inode(ip)) {
    1034    24371827 :                 if (!ip->i_cowfp) {
    1035           0 :                         ASSERT(!xfs_is_reflink_inode(ip));
    1036           0 :                         xfs_ifork_init_cow(ip);
    1037             :                 }
    1038    24371827 :                 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
    1039    24371634 :                                 &ccur, &cmap);
    1040    24371634 :                 if (!cow_eof && cmap.br_startoff <= offset_fsb) {
    1041      971918 :                         trace_xfs_reflink_cow_found(ip, &cmap);
    1042      972013 :                         goto found_cow;
    1043             :                 }
    1044             :         }
    1045             : 
    1046    71259984 :         if (imap.br_startoff <= offset_fsb) {
    1047             :                 /*
    1048             :                  * For reflink files we may need a delalloc reservation when
    1049             :                  * overwriting shared extents.   This includes zeroing of
    1050             :                  * existing extents that contain data.
    1051             :                  */
    1052    50773577 :                 if (!xfs_is_cow_inode(ip) ||
    1053     9488446 :                     ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
    1054    43606818 :                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
    1055             :                                         &imap);
    1056    43608870 :                         goto found_imap;
    1057             :                 }
    1058             : 
    1059     7169994 :                 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
    1060             : 
    1061             :                 /* Trim the mapping to the nearest shared extent boundary. */
    1062     7169984 :                 error = xfs_bmap_trim_cow(ip, &imap, &shared);
    1063     7170006 :                 if (error)
    1064          85 :                         goto out_unlock;
    1065             : 
    1066             :                 /* Not shared?  Just report the (potentially capped) extent. */
    1067     7169921 :                 if (!shared) {
    1068     6998209 :                         trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
    1069             :                                         &imap);
    1070     6998208 :                         goto found_imap;
    1071             :                 }
    1072             : 
    1073             :                 /*
    1074             :                  * Fork all the shared blocks from our write offset until the
    1075             :                  * end of the extent.
    1076             :                  */
    1077      171712 :                 allocfork = XFS_COW_FORK;
    1078      171712 :                 end_fsb = imap.br_startoff + imap.br_blockcount;
    1079             :         } else {
    1080             :                 /*
    1081             :                  * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
    1082             :                  * pages to keep the chunks of work done where somewhat
    1083             :                  * symmetric with the work writeback does.  This is a completely
    1084             :                  * arbitrary number pulled out of thin air.
    1085             :                  *
    1086             :                  * Note that the values needs to be less than 32-bits wide until
    1087             :                  * the lower level functions are updated.
    1088             :                  */
    1089    20486407 :                 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
    1090    20486407 :                 end_fsb = xfs_iomap_end_fsb(mp, offset, count);
    1091             : 
    1092    20486026 :                 if (xfs_is_always_cow_inode(ip))
    1093           0 :                         allocfork = XFS_COW_FORK;
    1094             :         }
    1095             : 
    1096    31348233 :         if (eof && offset + count > XFS_ISIZE(ip)) {
    1097             :                 /*
    1098             :                  * Determine the initial size of the preallocation.
    1099             :                  * We clean up any extra preallocation when the file is closed.
    1100             :                  */
    1101     8020312 :                 if (xfs_has_allocsize(mp))
    1102       10061 :                         prealloc_blocks = mp->m_allocsize_blocks;
    1103     8010251 :                 else if (allocfork == XFS_DATA_FORK)
    1104     8010251 :                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
    1105             :                                                 offset, count, &icur);
    1106             :                 else
    1107           0 :                         prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
    1108             :                                                 offset, count, &ccur);
    1109     8018111 :                 if (prealloc_blocks) {
    1110     3861000 :                         xfs_extlen_t    align;
    1111     3861000 :                         xfs_off_t       end_offset;
    1112     3861000 :                         xfs_fileoff_t   p_end_fsb;
    1113             : 
    1114     3861000 :                         end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
    1115     3861000 :                         p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
    1116             :                                         prealloc_blocks;
    1117             : 
    1118     3861000 :                         align = xfs_eof_alignment(ip);
    1119     3861000 :                         if (align)
    1120        6175 :                                 p_end_fsb = roundup_64(p_end_fsb, align);
    1121             : 
    1122     3861000 :                         p_end_fsb = min(p_end_fsb,
    1123             :                                 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
    1124     3861000 :                         ASSERT(p_end_fsb > offset_fsb);
    1125     3861000 :                         prealloc_blocks = p_end_fsb - end_fsb;
    1126             :                 }
    1127             :         }
    1128             : 
    1129    16795029 : retry:
    1130    21358497 :         error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
    1131             :                         end_fsb - offset_fsb, prealloc_blocks,
    1132             :                         allocfork == XFS_DATA_FORK ? &imap : &cmap,
    1133             :                         allocfork == XFS_DATA_FORK ? &icur : &ccur,
    1134             :                         allocfork == XFS_DATA_FORK ? eof : cow_eof);
    1135    20840579 :         switch (error) {
    1136             :         case 0:
    1137    19814000 :                 break;
    1138     1026572 :         case -ENOSPC:
    1139             :         case -EDQUOT:
    1140             :                 /* retry without any preallocation */
    1141     1026572 :                 trace_xfs_delalloc_enospc(ip, offset, count);
    1142     1026985 :                 if (prealloc_blocks) {
    1143      187332 :                         prealloc_blocks = 0;
    1144      187332 :                         goto retry;
    1145             :                 }
    1146      839660 :                 fallthrough;
    1147             :         default:
    1148      839660 :                 goto out_unlock;
    1149             :         }
    1150             : 
    1151    19814000 :         if (allocfork == XFS_COW_FORK) {
    1152      171176 :                 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
    1153      171176 :                 goto found_cow;
    1154             :         }
    1155             : 
    1156             :         /*
    1157             :          * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
    1158             :          * them out if the write happens to fail.
    1159             :          */
    1160    19642824 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW);
    1161    19642824 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1162    19646624 :         trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
    1163    19646872 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq);
    1164             : 
    1165    50607078 : found_imap:
    1166    50607078 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1167    50607078 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1168    50606080 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
    1169             : 
    1170     1143189 : found_cow:
    1171     1143189 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1172     1143189 :         if (imap.br_startoff <= offset_fsb) {
    1173     1084477 :                 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq);
    1174     1084352 :                 if (error)
    1175           0 :                         goto out_unlock;
    1176     1084352 :                 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
    1177     1084352 :                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1178     1084283 :                 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
    1179             :                                          IOMAP_F_SHARED, seq);
    1180             :         }
    1181             : 
    1182       58712 :         xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
    1183       58712 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1184       58712 :         return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq);
    1185             : 
    1186    20503528 : out_unlock:
    1187    20503528 :         xfs_iunlock(ip, XFS_ILOCK_EXCL);
    1188    20503528 :         return error;
    1189             : }
    1190             : 
    1191             : static int
    1192        6415 : xfs_buffered_write_delalloc_punch(
    1193             :         struct inode            *inode,
    1194             :         loff_t                  offset,
    1195             :         loff_t                  length)
    1196             : {
    1197        6415 :         return xfs_bmap_punch_delalloc_range(XFS_I(inode), offset,
    1198             :                         offset + length);
    1199             : }
    1200             : 
    1201             : static int
    1202   117114941 : xfs_buffered_write_iomap_end(
    1203             :         struct inode            *inode,
    1204             :         loff_t                  offset,
    1205             :         loff_t                  length,
    1206             :         ssize_t                 written,
    1207             :         unsigned                flags,
    1208             :         struct iomap            *iomap)
    1209             : {
    1210             : 
    1211   117114941 :         struct xfs_mount        *mp = XFS_M(inode->i_sb);
    1212   117114941 :         int                     error;
    1213             : 
    1214   117114941 :         error = iomap_file_buffered_write_punch_delalloc(inode, iomap, offset,
    1215             :                         length, written, &xfs_buffered_write_delalloc_punch);
    1216   117139353 :         if (error && !xfs_is_shutdown(mp)) {
    1217           0 :                 xfs_alert(mp, "%s: unable to clean up ino 0x%llx",
    1218             :                         __func__, XFS_I(inode)->i_ino);
    1219           0 :                 return error;
    1220             :         }
    1221             :         return 0;
    1222             : }
    1223             : 
    1224             : const struct iomap_ops xfs_buffered_write_iomap_ops = {
    1225             :         .iomap_begin            = xfs_buffered_write_iomap_begin,
    1226             :         .iomap_end              = xfs_buffered_write_iomap_end,
    1227             : };
    1228             : 
    1229             : /*
    1230             :  * iomap_page_mkwrite() will never fail in a way that requires delalloc extents
    1231             :  * that it allocated to be revoked. Hence we do not need an .iomap_end method
    1232             :  * for this operation.
    1233             :  */
    1234             : const struct iomap_ops xfs_page_mkwrite_iomap_ops = {
    1235             :         .iomap_begin            = xfs_buffered_write_iomap_begin,
    1236             : };
    1237             : 
    1238             : static int
    1239   378182404 : xfs_read_iomap_begin(
    1240             :         struct inode            *inode,
    1241             :         loff_t                  offset,
    1242             :         loff_t                  length,
    1243             :         unsigned                flags,
    1244             :         struct iomap            *iomap,
    1245             :         struct iomap            *srcmap)
    1246             : {
    1247   378182404 :         struct xfs_inode        *ip = XFS_I(inode);
    1248   378182404 :         struct xfs_mount        *mp = ip->i_mount;
    1249   378182404 :         struct xfs_bmbt_irec    imap;
    1250   378182404 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1251   378182404 :         xfs_fileoff_t           end_fsb = xfs_iomap_end_fsb(mp, offset, length);
    1252   378182625 :         int                     nimaps = 1, error = 0;
    1253   378182625 :         bool                    shared = false;
    1254   378182625 :         unsigned int            lockmode = XFS_ILOCK_SHARED;
    1255   378182625 :         u64                     seq;
    1256             : 
    1257   378182625 :         ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
    1258             : 
    1259   756365250 :         if (xfs_is_shutdown(mp))
    1260             :                 return -EIO;
    1261             : 
    1262   378182219 :         error = xfs_ilock_for_iomap(ip, flags, &lockmode);
    1263   378189049 :         if (error)
    1264             :                 return error;
    1265   378188201 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
    1266             :                                &nimaps, 0);
    1267   378187131 :         if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode)))
    1268     3396272 :                 error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
    1269   756177665 :         seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0);
    1270   378187887 :         xfs_iunlock(ip, lockmode);
    1271             : 
    1272   378191299 :         if (error)
    1273             :                 return error;
    1274   378189952 :         trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
    1275   378192379 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
    1276   378192379 :                                  shared ? IOMAP_F_SHARED : 0, seq);
    1277             : }
    1278             : 
    1279             : const struct iomap_ops xfs_read_iomap_ops = {
    1280             :         .iomap_begin            = xfs_read_iomap_begin,
    1281             : };
    1282             : 
    1283             : static int
    1284      440275 : xfs_seek_iomap_begin(
    1285             :         struct inode            *inode,
    1286             :         loff_t                  offset,
    1287             :         loff_t                  length,
    1288             :         unsigned                flags,
    1289             :         struct iomap            *iomap,
    1290             :         struct iomap            *srcmap)
    1291             : {
    1292      440275 :         struct xfs_inode        *ip = XFS_I(inode);
    1293      440275 :         struct xfs_mount        *mp = ip->i_mount;
    1294      440275 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1295      440275 :         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
    1296      440275 :         xfs_fileoff_t           cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
    1297      440275 :         struct xfs_iext_cursor  icur;
    1298      440275 :         struct xfs_bmbt_irec    imap, cmap;
    1299      440275 :         int                     error = 0;
    1300      440275 :         unsigned                lockmode;
    1301      440275 :         u64                     seq;
    1302             : 
    1303      880550 :         if (xfs_is_shutdown(mp))
    1304             :                 return -EIO;
    1305             : 
    1306      440275 :         lockmode = xfs_ilock_data_map_shared(ip);
    1307      440275 :         error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
    1308      440275 :         if (error)
    1309           0 :                 goto out_unlock;
    1310             : 
    1311      440275 :         if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
    1312             :                 /*
    1313             :                  * If we found a data extent we are done.
    1314             :                  */
    1315      430088 :                 if (imap.br_startoff <= offset_fsb)
    1316      227551 :                         goto done;
    1317             :                 data_fsb = imap.br_startoff;
    1318             :         } else {
    1319             :                 /*
    1320             :                  * Fake a hole until the end of the file.
    1321             :                  */
    1322       10187 :                 data_fsb = xfs_iomap_end_fsb(mp, offset, length);
    1323             :         }
    1324             : 
    1325             :         /*
    1326             :          * If a COW fork extent covers the hole, report it - capped to the next
    1327             :          * data fork extent:
    1328             :          */
    1329      430741 :         if (xfs_inode_has_cow_data(ip) &&
    1330        5293 :             xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
    1331        5242 :                 cow_fsb = cmap.br_startoff;
    1332      212724 :         if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
    1333         664 :                 if (data_fsb < cow_fsb + cmap.br_blockcount)
    1334         212 :                         end_fsb = min(end_fsb, data_fsb);
    1335         664 :                 xfs_trim_extent(&cmap, offset_fsb, end_fsb);
    1336         664 :                 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED);
    1337         664 :                 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
    1338             :                                 IOMAP_F_SHARED, seq);
    1339             :                 /*
    1340             :                  * This is a COW extent, so we must probe the page cache
    1341             :                  * because there could be dirty page cache being backed
    1342             :                  * by this extent.
    1343             :                  */
    1344         664 :                 iomap->type = IOMAP_UNWRITTEN;
    1345         664 :                 goto out_unlock;
    1346             :         }
    1347             : 
    1348             :         /*
    1349             :          * Else report a hole, capped to the next found data or COW extent.
    1350             :          */
    1351      212060 :         if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
    1352         340 :                 imap.br_blockcount = cow_fsb - offset_fsb;
    1353             :         else
    1354      211720 :                 imap.br_blockcount = data_fsb - offset_fsb;
    1355      212060 :         imap.br_startoff = offset_fsb;
    1356      212060 :         imap.br_startblock = HOLESTARTBLOCK;
    1357      212060 :         imap.br_state = XFS_EXT_NORM;
    1358      439611 : done:
    1359      439611 :         seq = xfs_iomap_inode_sequence(ip, 0);
    1360      439611 :         xfs_trim_extent(&imap, offset_fsb, end_fsb);
    1361      439611 :         error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq);
    1362      440275 : out_unlock:
    1363      440275 :         xfs_iunlock(ip, lockmode);
    1364      440275 :         return error;
    1365             : }
    1366             : 
    1367             : const struct iomap_ops xfs_seek_iomap_ops = {
    1368             :         .iomap_begin            = xfs_seek_iomap_begin,
    1369             : };
    1370             : 
    1371             : static int
    1372      333788 : xfs_xattr_iomap_begin(
    1373             :         struct inode            *inode,
    1374             :         loff_t                  offset,
    1375             :         loff_t                  length,
    1376             :         unsigned                flags,
    1377             :         struct iomap            *iomap,
    1378             :         struct iomap            *srcmap)
    1379             : {
    1380      333788 :         struct xfs_inode        *ip = XFS_I(inode);
    1381      333788 :         struct xfs_mount        *mp = ip->i_mount;
    1382      333788 :         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
    1383      333788 :         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + length);
    1384      333788 :         struct xfs_bmbt_irec    imap;
    1385      333788 :         int                     nimaps = 1, error = 0;
    1386      333788 :         unsigned                lockmode;
    1387      333788 :         int                     seq;
    1388             : 
    1389      667576 :         if (xfs_is_shutdown(mp))
    1390             :                 return -EIO;
    1391             : 
    1392      333788 :         lockmode = xfs_ilock_attr_map_shared(ip);
    1393             : 
    1394             :         /* if there are no attribute fork or extents, return ENOENT */
    1395      333789 :         if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) {
    1396      326107 :                 error = -ENOENT;
    1397      326107 :                 goto out_unlock;
    1398             :         }
    1399             : 
    1400        7682 :         ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL);
    1401        7682 :         error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
    1402             :                                &nimaps, XFS_BMAPI_ATTRFORK);
    1403      333789 : out_unlock:
    1404             : 
    1405      333789 :         seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR);
    1406      333789 :         xfs_iunlock(ip, lockmode);
    1407             : 
    1408      333789 :         if (error)
    1409             :                 return error;
    1410        7682 :         ASSERT(nimaps);
    1411        7682 :         return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq);
    1412             : }
    1413             : 
    1414             : const struct iomap_ops xfs_xattr_iomap_ops = {
    1415             :         .iomap_begin            = xfs_xattr_iomap_begin,
    1416             : };
    1417             : 
    1418             : int
    1419    34538577 : xfs_zero_range(
    1420             :         struct xfs_inode        *ip,
    1421             :         loff_t                  pos,
    1422             :         loff_t                  len,
    1423             :         bool                    *did_zero)
    1424             : {
    1425    34538577 :         struct inode            *inode = VFS_I(ip);
    1426             : 
    1427    34538577 :         if (IS_DAX(inode))
    1428             :                 return dax_zero_range(inode, pos, len, did_zero,
    1429             :                                       &xfs_dax_write_iomap_ops);
    1430    34538577 :         return iomap_zero_range(inode, pos, len, did_zero,
    1431             :                                 &xfs_buffered_write_iomap_ops);
    1432             : }
    1433             : 
    1434             : int
    1435     3072446 : xfs_truncate_page(
    1436             :         struct xfs_inode        *ip,
    1437             :         loff_t                  pos,
    1438             :         bool                    *did_zero)
    1439             : {
    1440     3072446 :         struct inode            *inode = VFS_I(ip);
    1441             : 
    1442     3072446 :         if (IS_DAX(inode))
    1443             :                 return dax_truncate_page(inode, pos, did_zero,
    1444             :                                         &xfs_dax_write_iomap_ops);
    1445     3072446 :         return iomap_truncate_page(inode, pos, did_zero,
    1446             :                                    &xfs_buffered_write_iomap_ops);
    1447             : }

Generated by: LCOV version 1.14