LCOV - code coverage report
Current view: top level - fs/iomap - buffered-io.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-djwa @ Mon Jul 31 20:08:17 PDT 2023 Lines: 788 861 91.5 %
Date: 2023-07-31 20:08:17 Functions: 62 66 93.9 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 2010 Red Hat, Inc.
       4             :  * Copyright (C) 2016-2019 Christoph Hellwig.
       5             :  */
       6             : #include <linux/module.h>
       7             : #include <linux/compiler.h>
       8             : #include <linux/fs.h>
       9             : #include <linux/iomap.h>
      10             : #include <linux/pagemap.h>
      11             : #include <linux/uio.h>
      12             : #include <linux/buffer_head.h>
      13             : #include <linux/dax.h>
      14             : #include <linux/writeback.h>
      15             : #include <linux/list_sort.h>
      16             : #include <linux/swap.h>
      17             : #include <linux/bio.h>
      18             : #include <linux/sched/signal.h>
      19             : #include <linux/migrate.h>
      20             : #include "trace.h"
      21             : 
      22             : #include "../internal.h"
      23             : 
      24             : #define IOEND_BATCH_SIZE        4096
      25             : 
      26             : typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
      27             : /*
      28             :  * Structure allocated for each folio to track per-block uptodate, dirty state
      29             :  * and I/O completions.
      30             :  */
      31             : struct iomap_folio_state {
      32             :         atomic_t                read_bytes_pending;
      33             :         atomic_t                write_bytes_pending;
      34             :         spinlock_t              state_lock;
      35             : 
      36             :         /*
      37             :          * Each block has two bits in this bitmap:
      38             :          * Bits [0..blocks_per_folio) has the uptodate status.
      39             :          * Bits [b_p_f...(2*b_p_f))   has the dirty status.
      40             :          */
      41             :         unsigned long           state[];
      42             : };
      43             : 
      44             : static struct bio_set iomap_ioend_bioset;
      45             : 
      46   375187255 : static inline bool ifs_is_fully_uptodate(struct folio *folio,
      47             :                 struct iomap_folio_state *ifs)
      48             : {
      49   375187255 :         struct inode *inode = folio->mapping->host;
      50             : 
      51   375187255 :         return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
      52             : }
      53             : 
      54  2504060084 : static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
      55             :                 unsigned int block)
      56             : {
      57  2504060084 :         return test_bit(block, ifs->state);
      58             : }
      59             : 
      60   232403133 : static void ifs_set_range_uptodate(struct folio *folio,
      61             :                 struct iomap_folio_state *ifs, size_t off, size_t len)
      62             : {
      63   232403133 :         struct inode *inode = folio->mapping->host;
      64   232403133 :         unsigned int first_blk = off >> inode->i_blkbits;
      65   232403133 :         unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
      66   232403133 :         unsigned int nr_blks = last_blk - first_blk + 1;
      67   232403133 :         unsigned long flags;
      68             : 
      69   232403133 :         spin_lock_irqsave(&ifs->state_lock, flags);
      70   232424738 :         bitmap_set(ifs->state, first_blk, nr_blks);
      71   232423501 :         if (ifs_is_fully_uptodate(folio, ifs))
      72   125500484 :                 folio_mark_uptodate(folio);
      73   232421421 :         spin_unlock_irqrestore(&ifs->state_lock, flags);
      74   232378377 : }
      75             : 
      76   236565432 : static void iomap_set_range_uptodate(struct folio *folio, size_t off,
      77             :                 size_t len)
      78             : {
      79   236565432 :         struct iomap_folio_state *ifs = folio->private;
      80             : 
      81   236565432 :         if (ifs)
      82   232311670 :                 ifs_set_range_uptodate(folio, ifs, off, len);
      83             :         else
      84     4253762 :                 folio_mark_uptodate(folio);
      85   236619677 : }
      86             : 
      87   537456826 : static inline bool ifs_block_is_dirty(struct folio *folio,
      88             :                 struct iomap_folio_state *ifs, int block)
      89             : {
      90   537456826 :         struct inode *inode = folio->mapping->host;
      91   537456826 :         unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
      92             : 
      93   537458742 :         return test_bit(block + blks_per_folio, ifs->state);
      94             : }
      95             : 
      96    36043617 : static void ifs_clear_range_dirty(struct folio *folio,
      97             :                 struct iomap_folio_state *ifs, size_t off, size_t len)
      98             : {
      99    36043617 :         struct inode *inode = folio->mapping->host;
     100    36043617 :         unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
     101    36043578 :         unsigned int first_blk = (off >> inode->i_blkbits);
     102    36043578 :         unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
     103    36043578 :         unsigned int nr_blks = last_blk - first_blk + 1;
     104    36043578 :         unsigned long flags;
     105             : 
     106    36043578 :         spin_lock_irqsave(&ifs->state_lock, flags);
     107    36043646 :         bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
     108    36043660 :         spin_unlock_irqrestore(&ifs->state_lock, flags);
     109    36043622 : }
     110             : 
     111    36043872 : static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
     112             : {
     113    36043872 :         struct iomap_folio_state *ifs = folio->private;
     114             : 
     115    36043872 :         if (ifs)
     116    36043608 :                 ifs_clear_range_dirty(folio, ifs, off, len);
     117    36043850 : }
     118             : 
     119    91259294 : static void ifs_set_range_dirty(struct folio *folio,
     120             :                 struct iomap_folio_state *ifs, size_t off, size_t len)
     121             : {
     122    91259294 :         struct inode *inode = folio->mapping->host;
     123    91259294 :         unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
     124    91263610 :         unsigned int first_blk = (off >> inode->i_blkbits);
     125    91263610 :         unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
     126    91263610 :         unsigned int nr_blks = last_blk - first_blk + 1;
     127    91263610 :         unsigned long flags;
     128             : 
     129    91263610 :         spin_lock_irqsave(&ifs->state_lock, flags);
     130    91268508 :         bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
     131    91268534 :         spin_unlock_irqrestore(&ifs->state_lock, flags);
     132    91267942 : }
     133             : 
     134    95514808 : static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
     135             : {
     136    95514808 :         struct iomap_folio_state *ifs = folio->private;
     137             : 
     138    95514808 :         if (ifs)
     139    91261605 :                 ifs_set_range_dirty(folio, ifs, off, len);
     140    95516396 : }
     141             : 
     142   222873188 : static struct iomap_folio_state *ifs_alloc(struct inode *inode,
     143             :                 struct folio *folio, unsigned int flags)
     144             : {
     145   222873188 :         struct iomap_folio_state *ifs = folio->private;
     146   222873188 :         unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
     147   222871437 :         gfp_t gfp;
     148             : 
     149   222871437 :         if (ifs || nr_blocks <= 1)
     150             :                 return ifs;
     151             : 
     152   142802398 :         if (flags & IOMAP_NOWAIT)
     153             :                 gfp = GFP_NOWAIT;
     154             :         else
     155   142804148 :                 gfp = GFP_NOFS | __GFP_NOFAIL;
     156             : 
     157             :         /*
     158             :          * ifs->state tracks two sets of state flags when the
     159             :          * filesystem block size is smaller than the folio size.
     160             :          * The first state tracks per-block uptodate and the
     161             :          * second tracks per-block dirty state.
     162             :          */
     163   142802398 :         ifs = kzalloc(struct_size(ifs, state,
     164             :                       BITS_TO_LONGS(2 * nr_blocks)), gfp);
     165   142805822 :         if (!ifs)
     166             :                 return ifs;
     167             : 
     168   142805822 :         spin_lock_init(&ifs->state_lock);
     169   142805955 :         if (folio_test_uptodate(folio))
     170     3560842 :                 bitmap_set(ifs->state, 0, nr_blocks);
     171   142805791 :         if (folio_test_dirty(folio))
     172        9280 :                 bitmap_set(ifs->state, nr_blocks, nr_blocks);
     173   142805527 :         folio_attach_private(folio, ifs);
     174             : 
     175   142805527 :         return ifs;
     176             : }
     177             : 
     178   142798185 : static void ifs_free(struct folio *folio)
     179             : {
     180   142798185 :         struct iomap_folio_state *ifs = folio_detach_private(folio);
     181             : 
     182   142767704 :         if (!ifs)
     183             :                 return;
     184   142767704 :         WARN_ON_ONCE(atomic_read(&ifs->read_bytes_pending));
     185   142767704 :         WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
     186   142767704 :         WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
     187             :                         folio_test_uptodate(folio));
     188   142808414 :         kfree(ifs);
     189             : }
     190             : 
     191             : /*
     192             :  * Calculate the range inside the folio that we actually need to read.
     193             :  */
     194   200146120 : static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
     195             :                 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
     196             : {
     197   200146120 :         struct iomap_folio_state *ifs = folio->private;
     198   200146120 :         loff_t orig_pos = *pos;
     199   200146120 :         loff_t isize = i_size_read(inode);
     200   200146120 :         unsigned block_bits = inode->i_blkbits;
     201   200146120 :         unsigned block_size = (1 << block_bits);
     202   200146120 :         size_t poff = offset_in_folio(folio, *pos);
     203   200134462 :         size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
     204   199976301 :         unsigned first = poff >> block_bits;
     205   199976301 :         unsigned last = (poff + plen - 1) >> block_bits;
     206             : 
     207             :         /*
     208             :          * If the block size is smaller than the page size, we need to check the
     209             :          * per-block uptodate status and adjust the offset and length if needed
     210             :          * to avoid reading in already uptodate ranges.
     211             :          */
     212   199976301 :         if (ifs) {
     213             :                 unsigned int i;
     214             : 
     215             :                 /* move forward for each leading block marked uptodate */
     216   239642630 :                 for (i = first; i <= last; i++) {
     217   208457721 :                         if (!ifs_block_is_uptodate(ifs, i))
     218             :                                 break;
     219    39666872 :                         *pos += block_size;
     220    39666872 :                         poff += block_size;
     221    39666872 :                         plen -= block_size;
     222    39666872 :                         first++;
     223             :                 }
     224             : 
     225             :                 /* truncate len if we find any trailing uptodate block(s) */
     226  2488016746 :                 for ( ; i <= last; i++) {
     227  2288424298 :                         if (ifs_block_is_uptodate(ifs, i)) {
     228      562507 :                                 plen -= (last - i + 1) * block_size;
     229      562507 :                                 last = i - 1;
     230      562507 :                                 break;
     231             :                         }
     232             :                 }
     233             :         }
     234             : 
     235             :         /*
     236             :          * If the extent spans the block that contains the i_size, we need to
     237             :          * handle both halves separately so that we properly zero data in the
     238             :          * page cache for blocks that are entirely outside of i_size.
     239             :          */
     240   200155498 :         if (orig_pos <= isize && orig_pos + length > isize) {
     241    51427155 :                 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
     242             : 
     243    51427922 :                 if (first <= end && last > end)
     244     1462156 :                         plen -= (last - end) * block_size;
     245             :         }
     246             : 
     247   200156265 :         *offp = poff;
     248   200156265 :         *lenp = plen;
     249   200156265 : }
     250             : 
     251     8572954 : static void iomap_finish_folio_read(struct folio *folio, size_t offset,
     252             :                 size_t len, int error)
     253             : {
     254     8572954 :         struct iomap_folio_state *ifs = folio->private;
     255             : 
     256     8572954 :         if (unlikely(error)) {
     257        1201 :                 folio_clear_uptodate(folio);
     258        1201 :                 folio_set_error(folio);
     259             :         } else {
     260     8571753 :                 iomap_set_range_uptodate(folio, offset, len);
     261             :         }
     262             : 
     263    17145905 :         if (!ifs || atomic_sub_and_test(len, &ifs->read_bytes_pending))
     264     6684111 :                 folio_unlock(folio);
     265     8572954 : }
     266             : 
     267     7694053 : static void iomap_read_end_io(struct bio *bio)
     268             : {
     269     7694053 :         int error = blk_status_to_errno(bio->bi_status);
     270     7694053 :         struct folio_iter fi;
     271             : 
     272    16267007 :         bio_for_each_folio_all(fi, bio)
     273     8572954 :                 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
     274     7694053 :         bio_put(bio);
     275     7694053 : }
     276             : 
     277             : struct iomap_readpage_ctx {
     278             :         struct folio            *cur_folio;
     279             :         bool                    cur_folio_in_bio;
     280             :         struct bio              *bio;
     281             :         struct readahead_control *rac;
     282             : };
     283             : 
     284             : /**
     285             :  * iomap_read_inline_data - copy inline data into the page cache
     286             :  * @iter: iteration structure
     287             :  * @folio: folio to copy to
     288             :  *
     289             :  * Copy the inline data in @iter into @folio and zero out the rest of the folio.
     290             :  * Only a single IOMAP_INLINE extent is allowed at the end of each file.
     291             :  * Returns zero for success to complete the read, or the usual negative errno.
     292             :  */
     293           0 : static int iomap_read_inline_data(const struct iomap_iter *iter,
     294             :                 struct folio *folio)
     295             : {
     296           0 :         const struct iomap *iomap = iomap_iter_srcmap(iter);
     297           0 :         size_t size = i_size_read(iter->inode) - iomap->offset;
     298           0 :         size_t poff = offset_in_page(iomap->offset);
     299           0 :         size_t offset = offset_in_folio(folio, iomap->offset);
     300           0 :         void *addr;
     301             : 
     302           0 :         if (folio_test_uptodate(folio))
     303             :                 return 0;
     304             : 
     305           0 :         if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
     306             :                 return -EIO;
     307           0 :         if (WARN_ON_ONCE(size > PAGE_SIZE -
     308             :                          offset_in_page(iomap->inline_data)))
     309             :                 return -EIO;
     310           0 :         if (WARN_ON_ONCE(size > iomap->length))
     311             :                 return -EIO;
     312           0 :         if (offset > 0)
     313           0 :                 ifs_alloc(iter->inode, folio, iter->flags);
     314             : 
     315           0 :         addr = kmap_local_folio(folio, offset);
     316           0 :         memcpy(addr, iomap->inline_data, size);
     317           0 :         memset(addr + size, 0, PAGE_SIZE - poff - size);
     318           0 :         kunmap_local(addr);
     319           0 :         iomap_set_range_uptodate(folio, offset, PAGE_SIZE - poff);
     320           0 :         return 0;
     321             : }
     322             : 
     323   156897205 : static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
     324             :                 loff_t pos)
     325             : {
     326   156897205 :         const struct iomap *srcmap = iomap_iter_srcmap(iter);
     327             : 
     328   169307311 :         return srcmap->type != IOMAP_MAPPED ||
     329   156897205 :                 (srcmap->flags & IOMAP_F_NEW) ||
     330    12410110 :                 pos >= i_size_read(iter->inode);
     331             : }
     332             : 
     333   131723197 : static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
     334             :                 struct iomap_readpage_ctx *ctx, loff_t offset)
     335             : {
     336   131723197 :         const struct iomap *iomap = &iter->iomap;
     337   131723197 :         loff_t pos = iter->pos + offset;
     338   131723197 :         loff_t length = iomap_length(iter) - offset;
     339   131723197 :         struct folio *folio = ctx->cur_folio;
     340   131723197 :         struct iomap_folio_state *ifs;
     341   131723197 :         loff_t orig_pos = pos;
     342   131723197 :         size_t poff, plen;
     343   131723197 :         sector_t sector;
     344             : 
     345   131723197 :         if (iomap->type == IOMAP_INLINE)
     346           0 :                 return iomap_read_inline_data(iter, folio);
     347             : 
     348             :         /* zero post-eof blocks as the page may be mapped */
     349   131723197 :         ifs = ifs_alloc(iter->inode, folio, iter->flags);
     350   131721311 :         iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
     351   131724462 :         if (plen == 0)
     352     3645484 :                 goto done;
     353             : 
     354   128078978 :         if (iomap_block_needs_zeroing(iter, pos)) {
     355   119415106 :                 folio_zero_range(folio, poff, plen);
     356   119412606 :                 iomap_set_range_uptodate(folio, poff, plen);
     357   119415354 :                 goto done;
     358             :         }
     359             : 
     360     8663872 :         ctx->cur_folio_in_bio = true;
     361     8663872 :         if (ifs)
     362     8663869 :                 atomic_add(plen, &ifs->read_bytes_pending);
     363             : 
     364     8663869 :         sector = iomap_sector(iomap, pos);
     365     8663869 :         if (!ctx->bio ||
     366     4299434 :             bio_end_sector(ctx->bio) != sector ||
     367      970201 :             !bio_add_folio(ctx->bio, folio, plen, poff)) {
     368     7694029 :                 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
     369     7694029 :                 gfp_t orig_gfp = gfp;
     370     7694029 :                 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
     371             : 
     372     7694029 :                 if (ctx->bio)
     373     2359393 :                         submit_bio(ctx->bio);
     374             : 
     375     7694033 :                 if (ctx->rac) /* same as readahead_gfp_mask */
     376     3689357 :                         gfp |= __GFP_NORETRY | __GFP_NOWARN;
     377     7694033 :                 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
     378             :                                      REQ_OP_READ, gfp);
     379             :                 /*
     380             :                  * If the bio_alloc fails, try it again for a single page to
     381             :                  * avoid having to deal with partial page reads.  This emulates
     382             :                  * what do_mpage_read_folio does.
     383             :                  */
     384     7694043 :                 if (!ctx->bio) {
     385           0 :                         ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
     386             :                                              orig_gfp);
     387             :                 }
     388     7694043 :                 if (ctx->rac)
     389     3689367 :                         ctx->bio->bi_opf |= REQ_RAHEAD;
     390     7694043 :                 ctx->bio->bi_iter.bi_sector = sector;
     391     7694043 :                 ctx->bio->bi_end_io = iomap_read_end_io;
     392     7694043 :                 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
     393             :         }
     394             : 
     395      969840 : done:
     396             :         /*
     397             :          * Move the caller beyond our range so that it keeps making progress.
     398             :          * For that, we have to include any leading non-uptodate ranges, but
     399             :          * we can skip trailing ones as they will be handled in the next
     400             :          * iteration.
     401             :          */
     402   131724716 :         return pos - orig_pos + plen;
     403             : }
     404             : 
     405    86340086 : int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
     406             : {
     407   172680558 :         struct iomap_iter iter = {
     408    86340086 :                 .inode          = folio->mapping->host,
     409             :                 .pos            = folio_pos(folio),
     410    86340086 :                 .len            = folio_size(folio),
     411             :         };
     412    86340472 :         struct iomap_readpage_ctx ctx = {
     413             :                 .cur_folio      = folio,
     414             :         };
     415    86340472 :         int ret;
     416             : 
     417    86340472 :         trace_iomap_readpage(iter.inode, 1);
     418             : 
     419   181149427 :         while ((ret = iomap_iter(&iter, ops)) > 0)
     420    94807968 :                 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
     421             : 
     422    86340957 :         if (ret < 0)
     423          88 :                 folio_set_error(folio);
     424             : 
     425    86340957 :         if (ctx.bio) {
     426     3581347 :                 submit_bio(ctx.bio);
     427     3581353 :                 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
     428             :         } else {
     429    82759610 :                 WARN_ON_ONCE(ctx.cur_folio_in_bio);
     430    82759610 :                 folio_unlock(folio);
     431             :         }
     432             : 
     433             :         /*
     434             :          * Just like mpage_readahead and block_read_full_folio, we always
     435             :          * return 0 and just set the folio error flag on errors.  This
     436             :          * should be cleaned up throughout the stack eventually.
     437             :          */
     438    86338845 :         return 0;
     439             : }
     440             : EXPORT_SYMBOL_GPL(iomap_read_folio);
     441             : 
     442    15926224 : static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
     443             :                 struct iomap_readpage_ctx *ctx)
     444             : {
     445    15926224 :         loff_t length = iomap_length(iter);
     446    15926224 :         loff_t done, ret;
     447             : 
     448    52844225 :         for (done = 0; done < length; done += ret) {
     449    36917984 :                 if (ctx->cur_folio &&
     450    28362938 :                     offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
     451    20176483 :                         if (!ctx->cur_folio_in_bio)
     452    18378425 :                                 folio_unlock(ctx->cur_folio);
     453    20175992 :                         ctx->cur_folio = NULL;
     454             :                 }
     455    36917525 :                 if (!ctx->cur_folio) {
     456    28730509 :                         ctx->cur_folio = readahead_folio(ctx->rac);
     457    28731445 :                         ctx->cur_folio_in_bio = false;
     458             :                 }
     459    36918461 :                 ret = iomap_readpage_iter(iter, ctx, done);
     460    36918001 :                 if (ret <= 0)
     461           0 :                         return ret;
     462             :         }
     463             : 
     464             :         return done;
     465             : }
     466             : 
     467             : /**
     468             :  * iomap_readahead - Attempt to read pages from a file.
     469             :  * @rac: Describes the pages to be read.
     470             :  * @ops: The operations vector for the filesystem.
     471             :  *
     472             :  * This function is for filesystems to call to implement their readahead
     473             :  * address_space operation.
     474             :  *
     475             :  * Context: The @ops callbacks may submit I/O (eg to read the addresses of
     476             :  * blocks from disc), and may wait for it.  The caller may be trying to
     477             :  * access a different page, and so sleeping excessively should be avoided.
     478             :  * It may allocate memory, but should avoid costly allocations.  This
     479             :  * function is called with memalloc_nofs set, so allocations will not cause
     480             :  * the filesystem to be reentered.
     481             :  */
     482     8555096 : void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
     483             : {
     484     8555096 :         struct iomap_iter iter = {
     485     8555096 :                 .inode  = rac->mapping->host,
     486             :                 .pos    = readahead_pos(rac),
     487             :                 .len    = readahead_length(rac),
     488             :         };
     489     8555096 :         struct iomap_readpage_ctx ctx = {
     490             :                 .rac    = rac,
     491             :         };
     492             : 
     493     8555096 :         trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
     494             : 
     495    24481336 :         while (iomap_iter(&iter, ops) > 0)
     496    15926205 :                 iter.processed = iomap_readahead_iter(&iter, &ctx);
     497             : 
     498     8555088 :         if (ctx.bio)
     499     1753300 :                 submit_bio(ctx.bio);
     500     8555107 :         if (ctx.cur_folio) {
     501     8555080 :                 if (!ctx.cur_folio_in_bio)
     502     7250356 :                         folio_unlock(ctx.cur_folio);
     503             :         }
     504     8555065 : }
     505             : EXPORT_SYMBOL_GPL(iomap_readahead);
     506             : 
     507             : /*
     508             :  * iomap_is_partially_uptodate checks whether blocks within a folio are
     509             :  * uptodate or not.
     510             :  *
     511             :  * Returns true if all blocks which correspond to the specified part
     512             :  * of the folio are uptodate.
     513             :  */
     514     3573904 : bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
     515             : {
     516     3573904 :         struct iomap_folio_state *ifs = folio->private;
     517     3573904 :         struct inode *inode = folio->mapping->host;
     518     3573904 :         unsigned first, last, i;
     519             : 
     520     3573904 :         if (!ifs)
     521             :                 return false;
     522             : 
     523             :         /* Caller's range may extend past the end of this folio */
     524     3552005 :         count = min(folio_size(folio) - from, count);
     525             : 
     526             :         /* First and last blocks in range within folio */
     527     3552006 :         first = from >> inode->i_blkbits;
     528     3552006 :         last = (from + count - 1) >> inode->i_blkbits;
     529             : 
     530     7920251 :         for (i = first; i <= last; i++)
     531     6998105 :                 if (!ifs_block_is_uptodate(ifs, i))
     532             :                         return false;
     533             :         return true;
     534             : }
     535             : EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
     536             : 
     537             : /**
     538             :  * iomap_get_folio - get a folio reference for writing
     539             :  * @iter: iteration structure
     540             :  * @pos: start offset of write
     541             :  * @len: Suggested size of folio to create.
     542             :  *
     543             :  * Returns a locked reference to the folio at @pos, or an error pointer if the
     544             :  * folio could not be obtained.
     545             :  */
     546    79873282 : struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
     547             : {
     548    79873282 :         fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
     549             : 
     550    79873282 :         if (iter->flags & IOMAP_NOWAIT)
     551           0 :                 fgp |= FGP_NOWAIT;
     552    79873282 :         fgp |= fgf_set_order(len);
     553             : 
     554    79873282 :         return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
     555             :                         fgp, mapping_gfp_mask(iter->inode->i_mapping));
     556             : }
     557             : EXPORT_SYMBOL_GPL(iomap_get_folio);
     558             : 
     559    35401698 : bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
     560             : {
     561    35401698 :         trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
     562    35401698 :                         folio_size(folio));
     563             : 
     564             :         /*
     565             :          * If the folio is dirty, we refuse to release our metadata because
     566             :          * it may be partially dirty.  Once we track per-block dirty state,
     567             :          * we can release the metadata if every block is dirty.
     568             :          */
     569    35401582 :         if (folio_test_dirty(folio))
     570             :                 return false;
     571    35373258 :         ifs_free(folio);
     572    35373258 :         return true;
     573             : }
     574             : EXPORT_SYMBOL_GPL(iomap_release_folio);
     575             : 
     576   109752099 : void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
     577             : {
     578   109752099 :         trace_iomap_invalidate_folio(folio->mapping->host,
     579   109752099 :                                         folio_pos(folio) + offset, len);
     580             : 
     581             :         /*
     582             :          * If we're invalidating the entire folio, clear the dirty state
     583             :          * from it and release it to avoid unnecessary buildup of the LRU.
     584             :          */
     585   109753936 :         if (offset == 0 && len == folio_size(folio)) {
     586   107433764 :                 WARN_ON_ONCE(folio_test_writeback(folio));
     587   107433796 :                 folio_cancel_dirty(folio);
     588   107433827 :                 ifs_free(folio);
     589             :         }
     590   109756442 : }
     591             : EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
     592             : 
     593    12247066 : bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
     594             : {
     595    12247066 :         struct inode *inode = mapping->host;
     596    12247066 :         size_t len = folio_size(folio);
     597             : 
     598    12246506 :         ifs_alloc(inode, folio, 0);
     599    12250676 :         iomap_set_range_dirty(folio, 0, len);
     600    12252266 :         return filemap_dirty_folio(mapping, folio);
     601             : }
     602             : EXPORT_SYMBOL_GPL(iomap_dirty_folio);
     603             : 
     604             : static void
     605        9689 : iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
     606             : {
     607        9689 :         loff_t i_size = i_size_read(inode);
     608             : 
     609             :         /*
     610             :          * Only truncate newly allocated pages beyoned EOF, even if the
     611             :          * write started inside the existing inode size.
     612             :          */
     613        9689 :         if (pos + len > i_size)
     614        3669 :                 truncate_pagecache_range(inode, max(pos, i_size),
     615             :                                          pos + len - 1);
     616        9689 : }
     617             : 
     618     3738789 : static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
     619             :                 size_t poff, size_t plen, const struct iomap *iomap)
     620             : {
     621     3738789 :         struct bio_vec bvec;
     622     3738789 :         struct bio bio;
     623             : 
     624     3738789 :         bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
     625     3738777 :         bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
     626     3738777 :         bio_add_folio_nofail(&bio, folio, plen, poff);
     627     3738789 :         return submit_bio_wait(&bio);
     628             : }
     629             : 
     630    79877077 : static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
     631             :                 size_t len, struct folio *folio)
     632             : {
     633    79877077 :         const struct iomap *srcmap = iomap_iter_srcmap(iter);
     634    79877077 :         struct iomap_folio_state *ifs;
     635    79877077 :         loff_t block_size = i_blocksize(iter->inode);
     636    79876975 :         loff_t block_start = round_down(pos, block_size);
     637    79876975 :         loff_t block_end = round_up(pos + len, block_size);
     638    79876975 :         unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
     639    79873875 :         size_t from = offset_in_folio(folio, pos), to = from + len;
     640    79878475 :         size_t poff, plen;
     641             : 
     642             :         /*
     643             :          * If the write completely overlaps the current folio, then
     644             :          * entire folio will be dirtied so there is no need for
     645             :          * per-block state tracking structures to be attached to this folio.
     646             :          */
     647    79878475 :         if (pos <= folio_pos(folio) &&
     648    18063091 :             pos + len >= folio_pos(folio) + folio_size(folio))
     649             :                 return 0;
     650             : 
     651    75514486 :         ifs = ifs_alloc(iter->inode, folio, iter->flags);
     652    75471778 :         if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
     653             :                 return -EAGAIN;
     654             : 
     655    75471778 :         if (folio_test_uptodate(folio))
     656             :                 return 0;
     657    68387447 :         folio_clear_error(folio);
     658             : 
     659    68430771 :         do {
     660    68430771 :                 iomap_adjust_read_range(iter->inode, folio, &block_start,
     661             :                                 block_end - block_start, &poff, &plen);
     662    68421333 :                 if (plen == 0)
     663             :                         break;
     664             : 
     665    40890922 :                 if (!(iter->flags & IOMAP_UNSHARE) &&
     666    40886752 :                     (from <= poff || from >= poff + plen) &&
     667    26177917 :                     (to <= poff || to >= poff + plen))
     668    12071013 :                         continue;
     669             : 
     670    28819909 :                 if (iomap_block_needs_zeroing(iter, block_start)) {
     671    25081117 :                         if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
     672             :                                 return -EIO;
     673    25081117 :                         folio_zero_segments(folio, poff, from, to, poff + plen);
     674             :                 } else {
     675     3738792 :                         int status;
     676             : 
     677     3738792 :                         if (iter->flags & IOMAP_NOWAIT)
     678             :                                 return -EAGAIN;
     679             : 
     680     3738792 :                         status = iomap_read_folio_sync(block_start, folio,
     681             :                                         poff, plen, srcmap);
     682     3738722 :                         if (status)
     683        1227 :                                 return status;
     684             :                 }
     685    28818248 :                 iomap_set_range_uptodate(folio, poff, plen);
     686    40889737 :         } while ((block_start += plen) < block_end);
     687             : 
     688             :         return 0;
     689             : }
     690             : 
     691    79873929 : static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
     692             :                 size_t len)
     693             : {
     694    79873929 :         const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
     695             : 
     696    79873929 :         if (folio_ops && folio_ops->get_folio)
     697           0 :                 return folio_ops->get_folio(iter, pos, len);
     698             :         else
     699    79873929 :                 return iomap_get_folio(iter, pos, len);
     700             : }
     701             : 
     702    79882860 : static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
     703             :                 struct folio *folio)
     704             : {
     705    79882860 :         const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
     706             : 
     707    79882860 :         if (folio_ops && folio_ops->put_folio) {
     708           0 :                 folio_ops->put_folio(iter->inode, pos, ret, folio);
     709             :         } else {
     710    79882860 :                 folio_unlock(folio);
     711    79868028 :                 folio_put(folio);
     712             :         }
     713    79882850 : }
     714             : 
     715           0 : static int iomap_write_begin_inline(const struct iomap_iter *iter,
     716             :                 struct folio *folio)
     717             : {
     718             :         /* needs more work for the tailpacking case; disable for now */
     719           0 :         if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
     720             :                 return -EIO;
     721           0 :         return iomap_read_inline_data(iter, folio);
     722             : }
     723             : 
     724    79865146 : static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
     725             :                 size_t len, struct folio **foliop)
     726             : {
     727    79865146 :         const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
     728    79865146 :         const struct iomap *srcmap = iomap_iter_srcmap(iter);
     729    79865146 :         struct folio *folio;
     730    79865146 :         int status = 0;
     731             : 
     732    79865146 :         BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
     733    79865146 :         if (srcmap != &iter->iomap)
     734      945063 :                 BUG_ON(pos + len > srcmap->offset + srcmap->length);
     735             : 
     736    79865146 :         if (fatal_signal_pending(current))
     737             :                 return -EINTR;
     738             : 
     739   159757772 :         if (!mapping_large_folio_support(iter->inode->i_mapping))
     740           0 :                 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
     741             : 
     742    79878886 :         folio = __iomap_get_folio(iter, pos, len);
     743    79884932 :         if (IS_ERR(folio))
     744           0 :                 return PTR_ERR(folio);
     745             : 
     746             :         /*
     747             :          * Now we have a locked folio, before we do anything with it we need to
     748             :          * check that the iomap we have cached is not stale. The inode extent
     749             :          * mapping can change due to concurrent IO in flight (e.g.
     750             :          * IOMAP_UNWRITTEN state can change and memory reclaim could have
     751             :          * reclaimed a previously partially written page at this index after IO
     752             :          * completion before this write reaches this file offset) and hence we
     753             :          * could do the wrong thing here (zero a page range incorrectly or fail
     754             :          * to zero) and corrupt data.
     755             :          */
     756    79884932 :         if (folio_ops && folio_ops->iomap_valid) {
     757    79884932 :                 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
     758             :                                                          &iter->iomap);
     759    79885870 :                 if (!iomap_valid) {
     760        8457 :                         iter->iomap.flags |= IOMAP_F_STALE;
     761        8457 :                         status = 0;
     762        8457 :                         goto out_unlock;
     763             :                 }
     764             :         }
     765             : 
     766    79877413 :         if (pos + len > folio_pos(folio) + folio_size(folio))
     767    12619696 :                 len = folio_pos(folio) + folio_size(folio) - pos;
     768             : 
     769    79807672 :         if (srcmap->type == IOMAP_INLINE)
     770           0 :                 status = iomap_write_begin_inline(iter, folio);
     771    79807672 :         else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
     772           0 :                 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
     773             :         else
     774    79807672 :                 status = __iomap_write_begin(iter, pos, len, folio);
     775             : 
     776    79862316 :         if (unlikely(status))
     777        1227 :                 goto out_unlock;
     778             : 
     779    79861089 :         *foliop = folio;
     780    79861089 :         return 0;
     781             : 
     782        9684 : out_unlock:
     783        9684 :         __iomap_put_folio(iter, pos, 0, folio);
     784        9684 :         iomap_write_failed(iter->inode, pos, len);
     785             : 
     786        9684 :         return status;
     787             : }
     788             : 
     789    79869867 : static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
     790             :                 size_t copied, struct folio *folio)
     791             : {
     792    79869867 :         flush_dcache_folio(folio);
     793             : 
     794             :         /*
     795             :          * The blocks that were entirely written will now be uptodate, so we
     796             :          * don't have to worry about a read_folio reading them and overwriting a
     797             :          * partial write.  However, if we've encountered a short write and only
     798             :          * partially written into a block, it will not be marked uptodate, so a
     799             :          * read_folio might come in and destroy our partial write.
     800             :          *
     801             :          * Do the simplest thing and just treat any short write to a
     802             :          * non-uptodate page as a zero-length write, and force the caller to
     803             :          * redo the whole thing.
     804             :          */
     805    79870941 :         if (unlikely(copied < len && !folio_test_uptodate(folio)))
     806             :                 return 0;
     807    79870936 :         iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
     808    79869020 :         iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
     809    79870891 :         filemap_dirty_folio(inode->i_mapping, folio);
     810    79870891 :         return copied;
     811             : }
     812             : 
     813           0 : static size_t iomap_write_end_inline(const struct iomap_iter *iter,
     814             :                 struct folio *folio, loff_t pos, size_t copied)
     815             : {
     816           0 :         const struct iomap *iomap = &iter->iomap;
     817           0 :         void *addr;
     818             : 
     819           0 :         WARN_ON_ONCE(!folio_test_uptodate(folio));
     820           0 :         BUG_ON(!iomap_inline_data_valid(iomap));
     821             : 
     822           0 :         flush_dcache_folio(folio);
     823           0 :         addr = kmap_local_folio(folio, pos);
     824           0 :         memcpy(iomap_inline_data(iomap, pos), addr, copied);
     825           0 :         kunmap_local(addr);
     826             : 
     827           0 :         mark_inode_dirty(iter->inode);
     828           0 :         return copied;
     829             : }
     830             : 
     831             : /* Returns the number of bytes copied.  May be 0.  Cannot be an errno. */
     832    79841188 : static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
     833             :                 size_t copied, struct folio *folio)
     834             : {
     835    79841188 :         const struct iomap *srcmap = iomap_iter_srcmap(iter);
     836    79841188 :         loff_t old_size = iter->inode->i_size;
     837    79841188 :         size_t ret;
     838             : 
     839    79841188 :         if (srcmap->type == IOMAP_INLINE) {
     840           0 :                 ret = iomap_write_end_inline(iter, folio, pos, copied);
     841    79841188 :         } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
     842           0 :                 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
     843             :                                 copied, &folio->page, NULL);
     844             :         } else {
     845    79841188 :                 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
     846             :         }
     847             : 
     848             :         /*
     849             :          * Update the in-memory inode size after copying the data into the page
     850             :          * cache.  It's up to the file system to write the updated size to disk,
     851             :          * preferably after I/O completion so that no stale data is exposed.
     852             :          */
     853    79861021 :         if (pos + ret > old_size) {
     854    56363196 :                 i_size_write(iter->inode, pos + ret);
     855    56363196 :                 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
     856             :         }
     857    79861021 :         __iomap_put_folio(iter, pos, ret, folio);
     858             : 
     859    79877799 :         if (old_size < pos)
     860     4949746 :                 pagecache_isize_extended(iter->inode, old_size, pos);
     861    79877790 :         if (ret < len)
     862           5 :                 iomap_write_failed(iter->inode, pos + ret, len - ret);
     863    79877790 :         return ret;
     864             : }
     865             : 
     866    52000229 : static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
     867             : {
     868    52000229 :         loff_t length = iomap_length(iter);
     869    52000229 :         size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
     870    52000229 :         loff_t pos = iter->pos;
     871    52000229 :         ssize_t written = 0;
     872    52000229 :         long status = 0;
     873    52000229 :         struct address_space *mapping = iter->inode->i_mapping;
     874    52000229 :         unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
     875             : 
     876    64606738 :         do {
     877    64606738 :                 struct folio *folio;
     878    64606738 :                 size_t offset;          /* Offset into folio */
     879    64606738 :                 size_t bytes;           /* Bytes to write to folio */
     880    64606738 :                 size_t copied;          /* Bytes copied from user */
     881             : 
     882    64606738 :                 offset = pos & (chunk - 1);
     883    64606738 :                 bytes = min(chunk - offset, iov_iter_count(i));
     884    64606738 :                 status = balance_dirty_pages_ratelimited_flags(mapping,
     885             :                                                                bdp_flags);
     886    64608647 :                 if (unlikely(status))
     887             :                         break;
     888             : 
     889    64608647 :                 if (bytes > length)
     890             :                         bytes = length;
     891             : 
     892             :                 /*
     893             :                  * Bring in the user page that we'll copy from _first_.
     894             :                  * Otherwise there's a nasty deadlock on copying from the
     895             :                  * same page as we're writing to, without it being marked
     896             :                  * up-to-date.
     897             :                  *
     898             :                  * For async buffered writes the assumption is that the user
     899             :                  * page has already been faulted in. This can be optimized by
     900             :                  * faulting the user page.
     901             :                  */
     902    64608647 :                 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
     903             :                         status = -EFAULT;
     904             :                         break;
     905             :                 }
     906             : 
     907    64587338 :                 status = iomap_write_begin(iter, pos, bytes, &folio);
     908    64603451 :                 if (unlikely(status))
     909             :                         break;
     910    64603038 :                 if (iter->iomap.flags & IOMAP_F_STALE)
     911             :                         break;
     912             : 
     913    64595189 :                 offset = offset_in_folio(folio, pos);
     914    64598517 :                 if (bytes > folio_size(folio) - offset)
     915    12606782 :                         bytes = folio_size(folio) - offset;
     916             : 
     917    64598405 :                 if (mapping_writably_mapped(mapping))
     918       57875 :                         flush_dcache_folio(folio);
     919             : 
     920    64598405 :                 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
     921    64533883 :                 status = iomap_write_end(iter, pos, bytes, copied, folio);
     922             : 
     923    64600821 :                 if (unlikely(copied != status))
     924           3 :                         iov_iter_revert(i, copied - status);
     925             : 
     926    64600821 :                 cond_resched();
     927    64600521 :                 if (unlikely(status == 0)) {
     928             :                         /*
     929             :                          * A short copy made iomap_write_end() reject the
     930             :                          * thing entirely.  Might be memory poisoning
     931             :                          * halfway through, might be a race with munmap,
     932             :                          * might be severe memory pressure.
     933             :                          */
     934           5 :                         if (copied)
     935             :                                 bytes = copied;
     936           5 :                         if (chunk > PAGE_SIZE)
     937           5 :                                 chunk /= 2;
     938             :                 } else {
     939    64600516 :                         pos += status;
     940    64600516 :                         written += status;
     941    64600516 :                         length -= status;
     942             :                 }
     943    64600521 :         } while (iov_iter_count(i) && length);
     944             : 
     945    52002274 :         if (status == -EAGAIN) {
     946           0 :                 iov_iter_revert(i, written);
     947           0 :                 return -EAGAIN;
     948             :         }
     949    52002274 :         return written ? written : status;
     950             : }
     951             : 
     952             : ssize_t
     953    50339188 : iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
     954             :                 const struct iomap_ops *ops)
     955             : {
     956    50339188 :         struct iomap_iter iter = {
     957    50339188 :                 .inode          = iocb->ki_filp->f_mapping->host,
     958    50339188 :                 .pos            = iocb->ki_pos,
     959             :                 .len            = iov_iter_count(i),
     960             :                 .flags          = IOMAP_WRITE,
     961             :         };
     962    50339188 :         ssize_t ret;
     963             : 
     964    50339188 :         if (iocb->ki_flags & IOCB_NOWAIT)
     965           0 :                 iter.flags |= IOMAP_NOWAIT;
     966             : 
     967   102341465 :         while ((ret = iomap_iter(&iter, ops)) > 0)
     968    51994753 :                 iter.processed = iomap_write_iter(&iter, i);
     969             : 
     970    50339575 :         if (unlikely(iter.pos == iocb->ki_pos))
     971             :                 return ret;
     972    49562697 :         ret = iter.pos - iocb->ki_pos;
     973    49562697 :         iocb->ki_pos = iter.pos;
     974    49562697 :         return ret;
     975             : }
     976             : EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
     977             : 
     978          13 : static int iomap_write_delalloc_ifs_punch(struct inode *inode,
     979             :                 struct folio *folio, loff_t start_byte, loff_t end_byte,
     980             :                 iomap_punch_t punch)
     981             : {
     982          13 :         unsigned int first_blk, last_blk, i;
     983          13 :         loff_t last_byte;
     984          13 :         u8 blkbits = inode->i_blkbits;
     985          13 :         struct iomap_folio_state *ifs;
     986          13 :         int ret = 0;
     987             : 
     988             :         /*
     989             :          * When we have per-block dirty tracking, there can be
     990             :          * blocks within a folio which are marked uptodate
     991             :          * but not dirty. In that case it is necessary to punch
     992             :          * out such blocks to avoid leaking any delalloc blocks.
     993             :          */
     994          13 :         ifs = folio->private;
     995          13 :         if (!ifs)
     996             :                 return ret;
     997             : 
     998          13 :         last_byte = min_t(loff_t, end_byte - 1,
     999             :                         folio_pos(folio) + folio_size(folio) - 1);
    1000          13 :         first_blk = offset_in_folio(folio, start_byte) >> blkbits;
    1001          13 :         last_blk = offset_in_folio(folio, last_byte) >> blkbits;
    1002          38 :         for (i = first_blk; i <= last_blk; i++) {
    1003          25 :                 if (!ifs_block_is_dirty(folio, ifs, i)) {
    1004          20 :                         ret = punch(inode, folio_pos(folio) + (i << blkbits),
    1005          20 :                                     1 << blkbits);
    1006          20 :                         if (ret)
    1007           0 :                                 return ret;
    1008             :                 }
    1009             :         }
    1010             : 
    1011             :         return ret;
    1012             : }
    1013             : 
    1014             : 
    1015         185 : static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
    1016             :                 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
    1017             :                 iomap_punch_t punch)
    1018             : {
    1019         185 :         int ret = 0;
    1020             : 
    1021         185 :         if (!folio_test_dirty(folio))
    1022             :                 return ret;
    1023             : 
    1024             :         /* if dirty, punch up to offset */
    1025          13 :         if (start_byte > *punch_start_byte) {
    1026           0 :                 ret = punch(inode, *punch_start_byte,
    1027             :                                 start_byte - *punch_start_byte);
    1028           0 :                 if (ret)
    1029             :                         return ret;
    1030             :         }
    1031             : 
    1032             :         /* Punch non-dirty blocks within folio */
    1033          13 :         ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
    1034             :                         end_byte, punch);
    1035          13 :         if (ret)
    1036             :                 return ret;
    1037             : 
    1038             :         /*
    1039             :          * Make sure the next punch start is correctly bound to
    1040             :          * the end of this data range, not the end of the folio.
    1041             :          */
    1042          13 :         *punch_start_byte = min_t(loff_t, end_byte,
    1043             :                                 folio_pos(folio) + folio_size(folio));
    1044             : 
    1045          13 :         return ret;
    1046             : }
    1047             : 
    1048             : /*
    1049             :  * Scan the data range passed to us for dirty page cache folios. If we find a
    1050             :  * dirty folio, punch out the preceeding range and update the offset from which
    1051             :  * the next punch will start from.
    1052             :  *
    1053             :  * We can punch out storage reservations under clean pages because they either
    1054             :  * contain data that has been written back - in which case the delalloc punch
    1055             :  * over that range is a no-op - or they have been read faults in which case they
    1056             :  * contain zeroes and we can remove the delalloc backing range and any new
    1057             :  * writes to those pages will do the normal hole filling operation...
    1058             :  *
    1059             :  * This makes the logic simple: we only need to keep the delalloc extents only
    1060             :  * over the dirty ranges of the page cache.
    1061             :  *
    1062             :  * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
    1063             :  * simplify range iterations.
    1064             :  */
    1065         155 : static int iomap_write_delalloc_scan(struct inode *inode,
    1066             :                 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
    1067             :                 iomap_punch_t punch)
    1068             : {
    1069         341 :         while (start_byte < end_byte) {
    1070         186 :                 struct folio    *folio;
    1071         186 :                 int ret;
    1072             : 
    1073             :                 /* grab locked page */
    1074         186 :                 folio = filemap_lock_folio(inode->i_mapping,
    1075         186 :                                 start_byte >> PAGE_SHIFT);
    1076         186 :                 if (IS_ERR(folio)) {
    1077           1 :                         start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
    1078             :                                         PAGE_SIZE;
    1079           1 :                         continue;
    1080             :                 }
    1081             : 
    1082         185 :                 ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
    1083             :                                                  start_byte, end_byte, punch);
    1084         185 :                 if (ret) {
    1085           0 :                         folio_unlock(folio);
    1086           0 :                         folio_put(folio);
    1087           0 :                         return ret;
    1088             :                 }
    1089             : 
    1090             :                 /* move offset to start of next folio in range */
    1091         185 :                 start_byte = folio_next_index(folio) << PAGE_SHIFT;
    1092         185 :                 folio_unlock(folio);
    1093         185 :                 folio_put(folio);
    1094             :         }
    1095             :         return 0;
    1096             : }
    1097             : 
    1098             : /*
    1099             :  * Punch out all the delalloc blocks in the range given except for those that
    1100             :  * have dirty data still pending in the page cache - those are going to be
    1101             :  * written and so must still retain the delalloc backing for writeback.
    1102             :  *
    1103             :  * As we are scanning the page cache for data, we don't need to reimplement the
    1104             :  * wheel - mapping_seek_hole_data() does exactly what we need to identify the
    1105             :  * start and end of data ranges correctly even for sub-folio block sizes. This
    1106             :  * byte range based iteration is especially convenient because it means we
    1107             :  * don't have to care about variable size folios, nor where the start or end of
    1108             :  * the data range lies within a folio, if they lie within the same folio or even
    1109             :  * if there are multiple discontiguous data ranges within the folio.
    1110             :  *
    1111             :  * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
    1112             :  * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
    1113             :  * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
    1114             :  * date. A write page fault can then mark it dirty. If we then fail a write()
    1115             :  * beyond EOF into that up to date cached range, we allocate a delalloc block
    1116             :  * beyond EOF and then have to punch it out. Because the range is up to date,
    1117             :  * mapping_seek_hole_data() will return it, and we will skip the punch because
    1118             :  * the folio is dirty. THis is incorrect - we always need to punch out delalloc
    1119             :  * beyond EOF in this case as writeback will never write back and covert that
    1120             :  * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
    1121             :  * resulting in always punching out the range from the EOF to the end of the
    1122             :  * range the iomap spans.
    1123             :  *
    1124             :  * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
    1125             :  * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
    1126             :  * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
    1127             :  * returns the end of the data range (data_end). Using closed intervals would
    1128             :  * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
    1129             :  * the code to subtle off-by-one bugs....
    1130             :  */
    1131        3081 : static int iomap_write_delalloc_release(struct inode *inode,
    1132             :                 loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
    1133             : {
    1134        3081 :         loff_t punch_start_byte = start_byte;
    1135        3081 :         loff_t scan_end_byte = min(i_size_read(inode), end_byte);
    1136        3081 :         int error = 0;
    1137             : 
    1138             :         /*
    1139             :          * Lock the mapping to avoid races with page faults re-instantiating
    1140             :          * folios and dirtying them via ->page_mkwrite whilst we walk the
    1141             :          * cache and perform delalloc extent removal. Failing to do this can
    1142             :          * leave dirty pages with no space reservation in the cache.
    1143             :          */
    1144        3081 :         filemap_invalidate_lock(inode->i_mapping);
    1145        3236 :         while (start_byte < scan_end_byte) {
    1146        2393 :                 loff_t          data_end;
    1147             : 
    1148        2393 :                 start_byte = mapping_seek_hole_data(inode->i_mapping,
    1149             :                                 start_byte, scan_end_byte, SEEK_DATA);
    1150             :                 /*
    1151             :                  * If there is no more data to scan, all that is left is to
    1152             :                  * punch out the remaining range.
    1153             :                  */
    1154        2393 :                 if (start_byte == -ENXIO || start_byte == scan_end_byte)
    1155             :                         break;
    1156         155 :                 if (start_byte < 0) {
    1157           0 :                         error = start_byte;
    1158           0 :                         goto out_unlock;
    1159             :                 }
    1160         155 :                 WARN_ON_ONCE(start_byte < punch_start_byte);
    1161         155 :                 WARN_ON_ONCE(start_byte > scan_end_byte);
    1162             : 
    1163             :                 /*
    1164             :                  * We find the end of this contiguous cached data range by
    1165             :                  * seeking from start_byte to the beginning of the next hole.
    1166             :                  */
    1167         155 :                 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
    1168             :                                 scan_end_byte, SEEK_HOLE);
    1169         155 :                 if (data_end < 0) {
    1170           0 :                         error = data_end;
    1171           0 :                         goto out_unlock;
    1172             :                 }
    1173         155 :                 WARN_ON_ONCE(data_end <= start_byte);
    1174         155 :                 WARN_ON_ONCE(data_end > scan_end_byte);
    1175             : 
    1176         155 :                 error = iomap_write_delalloc_scan(inode, &punch_start_byte,
    1177             :                                 start_byte, data_end, punch);
    1178         155 :                 if (error)
    1179           0 :                         goto out_unlock;
    1180             : 
    1181             :                 /* The next data search starts at the end of this one. */
    1182             :                 start_byte = data_end;
    1183             :         }
    1184             : 
    1185        3081 :         if (punch_start_byte < end_byte)
    1186        3070 :                 error = punch(inode, punch_start_byte,
    1187             :                                 end_byte - punch_start_byte);
    1188          11 : out_unlock:
    1189        3081 :         filemap_invalidate_unlock(inode->i_mapping);
    1190        3081 :         return error;
    1191             : }
    1192             : 
    1193             : /*
    1194             :  * When a short write occurs, the filesystem may need to remove reserved space
    1195             :  * that was allocated in ->iomap_begin from it's ->iomap_end method. For
    1196             :  * filesystems that use delayed allocation, we need to punch out delalloc
    1197             :  * extents from the range that are not dirty in the page cache. As the write can
    1198             :  * race with page faults, there can be dirty pages over the delalloc extent
    1199             :  * outside the range of a short write but still within the delalloc extent
    1200             :  * allocated for this iomap.
    1201             :  *
    1202             :  * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
    1203             :  * simplify range iterations.
    1204             :  *
    1205             :  * The punch() callback *must* only punch delalloc extents in the range passed
    1206             :  * to it. It must skip over all other types of extents in the range and leave
    1207             :  * them completely unchanged. It must do this punch atomically with respect to
    1208             :  * other extent modifications.
    1209             :  *
    1210             :  * The punch() callback may be called with a folio locked to prevent writeback
    1211             :  * extent allocation racing at the edge of the range we are currently punching.
    1212             :  * The locked folio may or may not cover the range being punched, so it is not
    1213             :  * safe for the punch() callback to lock folios itself.
    1214             :  *
    1215             :  * Lock order is:
    1216             :  *
    1217             :  * inode->i_rwsem (shared or exclusive)
    1218             :  *   inode->i_mapping->invalidate_lock (exclusive)
    1219             :  *     folio_lock()
    1220             :  *       ->punch
    1221             :  *         internal filesystem allocation lock
    1222             :  */
    1223    90093733 : int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
    1224             :                 struct iomap *iomap, loff_t pos, loff_t length,
    1225             :                 ssize_t written, iomap_punch_t punch)
    1226             : {
    1227    90093733 :         loff_t                  start_byte;
    1228    90093733 :         loff_t                  end_byte;
    1229    90093733 :         unsigned int            blocksize = i_blocksize(inode);
    1230             : 
    1231    90103108 :         if (iomap->type != IOMAP_DELALLOC)
    1232             :                 return 0;
    1233             : 
    1234             :         /* If we didn't reserve the blocks, we're not allowed to punch them. */
    1235    51551643 :         if (!(iomap->flags & IOMAP_F_NEW))
    1236             :                 return 0;
    1237             : 
    1238             :         /*
    1239             :          * start_byte refers to the first unused block after a short write. If
    1240             :          * nothing was written, round offset down to point at the first block in
    1241             :          * the range.
    1242             :          */
    1243    14068849 :         if (unlikely(!written))
    1244        1510 :                 start_byte = round_down(pos, blocksize);
    1245             :         else
    1246    14067339 :                 start_byte = round_up(pos + written, blocksize);
    1247    14068849 :         end_byte = round_up(pos + length, blocksize);
    1248             : 
    1249             :         /* Nothing to do if we've written the entire delalloc extent */
    1250    14068849 :         if (start_byte >= end_byte)
    1251             :                 return 0;
    1252             : 
    1253        3081 :         return iomap_write_delalloc_release(inode, start_byte, end_byte,
    1254             :                                         punch);
    1255             : }
    1256             : EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
    1257             : 
    1258       17365 : static loff_t iomap_unshare_iter(struct iomap_iter *iter)
    1259             : {
    1260       17365 :         struct iomap *iomap = &iter->iomap;
    1261       17365 :         const struct iomap *srcmap = iomap_iter_srcmap(iter);
    1262       17365 :         loff_t pos = iter->pos;
    1263       17365 :         loff_t length = iomap_length(iter);
    1264       17365 :         long status = 0;
    1265       17365 :         loff_t written = 0;
    1266             : 
    1267             :         /* don't bother with blocks that are not shared to start with */
    1268       17365 :         if (!(iomap->flags & IOMAP_F_SHARED))
    1269             :                 return length;
    1270             :         /* don't bother with holes or unwritten extents */
    1271        7311 :         if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
    1272             :                 return length;
    1273             : 
    1274        8699 :         do {
    1275        8699 :                 unsigned long offset = offset_in_page(pos);
    1276        8699 :                 unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
    1277        8699 :                 struct folio *folio;
    1278             : 
    1279        8699 :                 status = iomap_write_begin(iter, pos, bytes, &folio);
    1280        8699 :                 if (unlikely(status))
    1281           0 :                         return status;
    1282        8699 :                 if (iter->iomap.flags & IOMAP_F_STALE)
    1283             :                         break;
    1284             : 
    1285        8699 :                 status = iomap_write_end(iter, pos, bytes, bytes, folio);
    1286        8699 :                 if (WARN_ON_ONCE(status == 0))
    1287             :                         return -EIO;
    1288             : 
    1289        8699 :                 cond_resched();
    1290             : 
    1291        8699 :                 pos += status;
    1292        8699 :                 written += status;
    1293        8699 :                 length -= status;
    1294             : 
    1295        8699 :                 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
    1296        8699 :         } while (length);
    1297             : 
    1298             :         return written;
    1299             : }
    1300             : 
    1301             : int
    1302          40 : iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
    1303             :                 const struct iomap_ops *ops)
    1304             : {
    1305          40 :         struct iomap_iter iter = {
    1306             :                 .inode          = inode,
    1307             :                 .pos            = pos,
    1308             :                 .len            = len,
    1309             :                 .flags          = IOMAP_WRITE | IOMAP_UNSHARE,
    1310             :         };
    1311          40 :         int ret;
    1312             : 
    1313       17405 :         while ((ret = iomap_iter(&iter, ops)) > 0)
    1314       17365 :                 iter.processed = iomap_unshare_iter(&iter);
    1315          40 :         return ret;
    1316             : }
    1317             : EXPORT_SYMBOL_GPL(iomap_file_unshare);
    1318             : 
    1319    38084296 : static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
    1320             : {
    1321    38084296 :         const struct iomap *srcmap = iomap_iter_srcmap(iter);
    1322    38084296 :         loff_t pos = iter->pos;
    1323    38084296 :         loff_t length = iomap_length(iter);
    1324    38084296 :         loff_t written = 0;
    1325             : 
    1326             :         /* already zeroed?  we're done. */
    1327    38084296 :         if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
    1328             :                 return length;
    1329             : 
    1330    15270090 :         do {
    1331    15270090 :                 struct folio *folio;
    1332    15270090 :                 int status;
    1333    15270090 :                 size_t offset;
    1334    15270090 :                 size_t bytes = min_t(u64, SIZE_MAX, length);
    1335             : 
    1336    15270090 :                 status = iomap_write_begin(iter, pos, bytes, &folio);
    1337    15270018 :                 if (status)
    1338         904 :                         return status;
    1339    15269114 :                 if (iter->iomap.flags & IOMAP_F_STALE)
    1340             :                         break;
    1341             : 
    1342    15268506 :                 offset = offset_in_folio(folio, pos);
    1343    15264826 :                 if (bytes > folio_size(folio) - offset)
    1344       12923 :                         bytes = folio_size(folio) - offset;
    1345             : 
    1346    15268579 :                 folio_zero_range(folio, offset, bytes);
    1347    15262024 :                 folio_mark_accessed(folio);
    1348             : 
    1349    15268699 :                 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
    1350    15268815 :                 if (WARN_ON_ONCE(bytes == 0))
    1351             :                         return -EIO;
    1352             : 
    1353    15268815 :                 pos += bytes;
    1354    15268815 :                 length -= bytes;
    1355    15268815 :                 written += bytes;
    1356    15268815 :         } while (length > 0);
    1357             : 
    1358    15256482 :         if (did_zero)
    1359     1665040 :                 *did_zero = true;
    1360             :         return written;
    1361             : }
    1362             : 
    1363             : int
    1364    29966293 : iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
    1365             :                 const struct iomap_ops *ops)
    1366             : {
    1367    29966293 :         struct iomap_iter iter = {
    1368             :                 .inode          = inode,
    1369             :                 .pos            = pos,
    1370             :                 .len            = len,
    1371             :                 .flags          = IOMAP_ZERO,
    1372             :         };
    1373    29966293 :         int ret;
    1374             : 
    1375    68050622 :         while ((ret = iomap_iter(&iter, ops)) > 0)
    1376    38084273 :                 iter.processed = iomap_zero_iter(&iter, did_zero);
    1377    29964550 :         return ret;
    1378             : }
    1379             : EXPORT_SYMBOL_GPL(iomap_zero_range);
    1380             : 
    1381             : int
    1382     2395903 : iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
    1383             :                 const struct iomap_ops *ops)
    1384             : {
    1385     2395903 :         unsigned int blocksize = i_blocksize(inode);
    1386     2395904 :         unsigned int off = pos & (blocksize - 1);
    1387             : 
    1388             :         /* Block boundary? Nothing to do */
    1389     2395904 :         if (!off)
    1390             :                 return 0;
    1391     1886069 :         return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
    1392             : }
    1393             : EXPORT_SYMBOL_GPL(iomap_truncate_page);
    1394             : 
    1395     5468387 : static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
    1396             :                 struct folio *folio)
    1397             : {
    1398     5468387 :         loff_t length = iomap_length(iter);
    1399     5468387 :         int ret;
    1400             : 
    1401     5468387 :         if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
    1402           0 :                 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
    1403           0 :                                               &iter->iomap);
    1404           0 :                 if (ret)
    1405           0 :                         return ret;
    1406           0 :                 block_commit_write(&folio->page, 0, length);
    1407             :         } else {
    1408     5468387 :                 WARN_ON_ONCE(!folio_test_uptodate(folio));
    1409     5468402 :                 folio_mark_dirty(folio);
    1410             :         }
    1411             : 
    1412             :         return length;
    1413             : }
    1414             : 
    1415     3446569 : vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
    1416             : {
    1417     3446569 :         struct iomap_iter iter = {
    1418     3446569 :                 .inode          = file_inode(vmf->vma->vm_file),
    1419             :                 .flags          = IOMAP_WRITE | IOMAP_FAULT,
    1420             :         };
    1421     3446569 :         struct folio *folio = page_folio(vmf->page);
    1422     3446569 :         ssize_t ret;
    1423             : 
    1424     3446569 :         folio_lock(folio);
    1425     3446610 :         ret = folio_mkwrite_check_truncate(folio, iter.inode);
    1426     3446599 :         if (ret < 0)
    1427          30 :                 goto out_unlock;
    1428     3446569 :         iter.pos = folio_pos(folio);
    1429     3446569 :         iter.len = ret;
    1430     8914902 :         while ((ret = iomap_iter(&iter, ops)) > 0)
    1431     5468373 :                 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
    1432             : 
    1433     3446533 :         if (ret < 0)
    1434       26858 :                 goto out_unlock;
    1435     3419675 :         folio_wait_stable(folio);
    1436     3419675 :         return VM_FAULT_LOCKED;
    1437       26888 : out_unlock:
    1438       26888 :         folio_unlock(folio);
    1439       26888 :         return block_page_mkwrite_return(ret);
    1440             : }
    1441             : EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
    1442             : 
    1443    39729593 : static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
    1444             :                 size_t len, int error)
    1445             : {
    1446    39729593 :         struct iomap_folio_state *ifs = folio->private;
    1447             : 
    1448    39729593 :         if (error) {
    1449      128945 :                 folio_set_error(folio);
    1450      128946 :                 mapping_set_error(inode->i_mapping, error);
    1451             :         }
    1452             : 
    1453    79459222 :         WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
    1454    79458990 :         WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
    1455             : 
    1456    79458996 :         if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
    1457    36043958 :                 folio_end_writeback(folio);
    1458    39729632 : }
    1459             : 
    1460             : /*
    1461             :  * We're now finished for good with this ioend structure.  Update the page
    1462             :  * state, release holds on bios, and finally free up memory.  Do not use the
    1463             :  * ioend after this.
    1464             :  */
    1465             : static u32
    1466    26083102 : iomap_finish_ioend(struct iomap_ioend *ioend, int error)
    1467             : {
    1468    26083102 :         struct inode *inode = ioend->io_inode;
    1469    26083102 :         struct bio *bio = &ioend->io_inline_bio;
    1470    26083102 :         struct bio *last = ioend->io_bio, *next;
    1471    26083102 :         u64 start = bio->bi_iter.bi_sector;
    1472    26083102 :         loff_t offset = ioend->io_offset;
    1473    26083102 :         bool quiet = bio_flagged(bio, BIO_QUIET);
    1474    26083102 :         u32 folio_count = 0;
    1475             : 
    1476    52167155 :         for (bio = &ioend->io_inline_bio; bio; bio = next) {
    1477    26084005 :                 struct folio_iter fi;
    1478             : 
    1479             :                 /*
    1480             :                  * For the last bio, bi_private points to the ioend, so we
    1481             :                  * need to explicitly end the iteration here.
    1482             :                  */
    1483    26084005 :                 if (bio == last)
    1484             :                         next = NULL;
    1485             :                 else
    1486         865 :                         next = bio->bi_private;
    1487             : 
    1488             :                 /* walk all folios in bio, ending page IO on them */
    1489    65813595 :                 bio_for_each_folio_all(fi, bio) {
    1490    39729591 :                         iomap_finish_folio_write(inode, fi.folio, fi.length,
    1491             :                                         error);
    1492    39729632 :                         folio_count++;
    1493             :                 }
    1494    26084021 :                 bio_put(bio);
    1495             :         }
    1496             :         /* The ioend has been freed by bio_put() */
    1497             : 
    1498    26083150 :         if (unlikely(error && !quiet)) {
    1499       42960 :                 printk_ratelimited(KERN_ERR
    1500             : "%s: writeback error on inode %lu, offset %lld, sector %llu",
    1501             :                         inode->i_sb->s_id, inode->i_ino, offset, start);
    1502             :         }
    1503    26083149 :         return folio_count;
    1504             : }
    1505             : 
    1506             : /*
    1507             :  * Ioend completion routine for merged bios. This can only be called from task
    1508             :  * contexts as merged ioends can be of unbound length. Hence we have to break up
    1509             :  * the writeback completions into manageable chunks to avoid long scheduler
    1510             :  * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
    1511             :  * good batch processing throughput without creating adverse scheduler latency
    1512             :  * conditions.
    1513             :  */
    1514             : void
    1515    22549867 : iomap_finish_ioends(struct iomap_ioend *ioend, int error)
    1516             : {
    1517    22549867 :         struct list_head tmp;
    1518    22549867 :         u32 completions;
    1519             : 
    1520    22549867 :         might_sleep();
    1521             : 
    1522    22549867 :         list_replace_init(&ioend->io_list, &tmp);
    1523    22549867 :         completions = iomap_finish_ioend(ioend, error);
    1524             : 
    1525    22556335 :         while (!list_empty(&tmp)) {
    1526        6461 :                 if (completions > IOEND_BATCH_SIZE * 8) {
    1527           0 :                         cond_resched();
    1528           0 :                         completions = 0;
    1529             :                 }
    1530        6461 :                 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
    1531        6461 :                 list_del_init(&ioend->io_list);
    1532        6461 :                 completions += iomap_finish_ioend(ioend, error);
    1533             :         }
    1534    22549882 : }
    1535             : EXPORT_SYMBOL_GPL(iomap_finish_ioends);
    1536             : 
    1537             : /*
    1538             :  * We can merge two adjacent ioends if they have the same set of work to do.
    1539             :  */
    1540             : static bool
    1541     1682201 : iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
    1542             : {
    1543     1682201 :         if (ioend->io_bio->bi_status != next->io_bio->bi_status)
    1544             :                 return false;
    1545     1682188 :         if ((ioend->io_flags & IOMAP_F_SHARED) ^
    1546     1682188 :             (next->io_flags & IOMAP_F_SHARED))
    1547             :                 return false;
    1548     1660675 :         if ((ioend->io_type == IOMAP_UNWRITTEN) ^
    1549     1660675 :             (next->io_type == IOMAP_UNWRITTEN))
    1550             :                 return false;
    1551     1319984 :         if (ioend->io_offset + ioend->io_size != next->io_offset)
    1552             :                 return false;
    1553             :         /*
    1554             :          * Do not merge physically discontiguous ioends. The filesystem
    1555             :          * completion functions will have to iterate the physical
    1556             :          * discontiguities even if we merge the ioends at a logical level, so
    1557             :          * we don't gain anything by merging physical discontiguities here.
    1558             :          *
    1559             :          * We cannot use bio->bi_iter.bi_sector here as it is modified during
    1560             :          * submission so does not point to the start sector of the bio at
    1561             :          * completion.
    1562             :          */
    1563      426412 :         if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
    1564      419951 :                 return false;
    1565             :         return true;
    1566             : }
    1567             : 
    1568             : void
    1569    22549881 : iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
    1570             : {
    1571    22549881 :         struct iomap_ioend *next;
    1572             : 
    1573    22549881 :         INIT_LIST_HEAD(&ioend->io_list);
    1574             : 
    1575    22556341 :         while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
    1576             :                         io_list))) {
    1577     1682201 :                 if (!iomap_ioend_can_merge(ioend, next))
    1578             :                         break;
    1579        6461 :                 list_move_tail(&next->io_list, &ioend->io_list);
    1580        6460 :                 ioend->io_size += next->io_size;
    1581             :         }
    1582    22549880 : }
    1583             : EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
    1584             : 
    1585             : static int
    1586     2720882 : iomap_ioend_compare(void *priv, const struct list_head *a,
    1587             :                 const struct list_head *b)
    1588             : {
    1589     2720882 :         struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
    1590     2720882 :         struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
    1591             : 
    1592     2720882 :         if (ia->io_offset < ib->io_offset)
    1593             :                 return -1;
    1594      949054 :         if (ia->io_offset > ib->io_offset)
    1595      949043 :                 return 1;
    1596             :         return 0;
    1597             : }
    1598             : 
    1599             : void
    1600    20874141 : iomap_sort_ioends(struct list_head *ioend_list)
    1601             : {
    1602    20874141 :         list_sort(NULL, ioend_list, iomap_ioend_compare);
    1603    20874141 : }
    1604             : EXPORT_SYMBOL_GPL(iomap_sort_ioends);
    1605             : 
    1606     3526815 : static void iomap_writepage_end_bio(struct bio *bio)
    1607             : {
    1608     3526815 :         struct iomap_ioend *ioend = bio->bi_private;
    1609             : 
    1610     3526815 :         iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
    1611     3526815 : }
    1612             : 
    1613             : /*
    1614             :  * Submit the final bio for an ioend.
    1615             :  *
    1616             :  * If @error is non-zero, it means that we have a situation where some part of
    1617             :  * the submission process has failed after we've marked pages for writeback
    1618             :  * and unlocked them.  In this situation, we need to fail the bio instead of
    1619             :  * submitting it.  This typically only happens on a filesystem shutdown.
    1620             :  */
    1621             : static int
    1622    26083073 : iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
    1623             :                 int error)
    1624             : {
    1625    26083073 :         ioend->io_bio->bi_private = ioend;
    1626    26083073 :         ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
    1627             : 
    1628    26083073 :         if (wpc->ops->prepare_ioend)
    1629    26083073 :                 error = wpc->ops->prepare_ioend(ioend, error);
    1630    26083055 :         if (error) {
    1631             :                 /*
    1632             :                  * If we're failing the IO now, just mark the ioend with an
    1633             :                  * error and finish it.  This will run IO completion immediately
    1634             :                  * as there is only one reference to the ioend at this point in
    1635             :                  * time.
    1636             :                  */
    1637         155 :                 ioend->io_bio->bi_status = errno_to_blk_status(error);
    1638         155 :                 bio_endio(ioend->io_bio);
    1639         155 :                 return error;
    1640             :         }
    1641             : 
    1642    26082900 :         submit_bio(ioend->io_bio);
    1643    26082900 :         return 0;
    1644             : }
    1645             : 
    1646             : static struct iomap_ioend *
    1647    26083096 : iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
    1648             :                 loff_t offset, sector_t sector, struct writeback_control *wbc)
    1649             : {
    1650    26083096 :         struct iomap_ioend *ioend;
    1651    26083096 :         struct bio *bio;
    1652             : 
    1653    44266025 :         bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
    1654             :                                REQ_OP_WRITE | wbc_to_write_flags(wbc),
    1655             :                                GFP_NOFS, &iomap_ioend_bioset);
    1656    26083079 :         bio->bi_iter.bi_sector = sector;
    1657    26083079 :         wbc_init_bio(wbc, bio);
    1658             : 
    1659    26083027 :         ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
    1660    26083027 :         INIT_LIST_HEAD(&ioend->io_list);
    1661    26083027 :         ioend->io_type = wpc->iomap.type;
    1662    26083027 :         ioend->io_flags = wpc->iomap.flags;
    1663    26083027 :         ioend->io_inode = inode;
    1664    26083027 :         ioend->io_size = 0;
    1665    26083027 :         ioend->io_folios = 0;
    1666    26083027 :         ioend->io_offset = offset;
    1667    26083027 :         ioend->io_bio = bio;
    1668    26083027 :         ioend->io_sector = sector;
    1669    26083027 :         return ioend;
    1670             : }
    1671             : 
    1672             : /*
    1673             :  * Allocate a new bio, and chain the old bio to the new one.
    1674             :  *
    1675             :  * Note that we have to perform the chaining in this unintuitive order
    1676             :  * so that the bi_private linkage is set up in the right direction for the
    1677             :  * traversal in iomap_finish_ioend().
    1678             :  */
    1679             : static struct bio *
    1680         865 : iomap_chain_bio(struct bio *prev)
    1681             : {
    1682         865 :         struct bio *new;
    1683             : 
    1684         865 :         new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
    1685         865 :         bio_clone_blkg_association(new, prev);
    1686         865 :         new->bi_iter.bi_sector = bio_end_sector(prev);
    1687             : 
    1688         865 :         bio_chain(prev, new);
    1689         865 :         bio_get(prev);          /* for iomap_finish_ioend */
    1690         865 :         submit_bio(prev);
    1691         865 :         return new;
    1692             : }
    1693             : 
    1694             : static bool
    1695   300718185 : iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
    1696             :                 sector_t sector)
    1697             : {
    1698   300718185 :         if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
    1699   300718185 :             (wpc->ioend->io_flags & IOMAP_F_SHARED))
    1700             :                 return false;
    1701   300594376 :         if (wpc->iomap.type != wpc->ioend->io_type)
    1702             :                 return false;
    1703   296905919 :         if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
    1704             :                 return false;
    1705   295423395 :         if (sector != bio_end_sector(wpc->ioend->io_bio))
    1706             :                 return false;
    1707             :         /*
    1708             :          * Limit ioend bio chain lengths to minimise IO completion latency. This
    1709             :          * also prevents long tight loops ending page writeback on all the
    1710             :          * folios in the ioend.
    1711             :          */
    1712   293197628 :         if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
    1713          23 :                 return false;
    1714             :         return true;
    1715             : }
    1716             : 
    1717             : /*
    1718             :  * Test to see if we have an existing ioend structure that we could append to
    1719             :  * first; otherwise finish off the current ioend and start another.
    1720             :  */
    1721             : static void
    1722   319278145 : iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
    1723             :                 struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc,
    1724             :                 struct writeback_control *wbc, struct list_head *iolist)
    1725             : {
    1726   319278145 :         sector_t sector = iomap_sector(&wpc->iomap, pos);
    1727   319278145 :         unsigned len = i_blocksize(inode);
    1728   319279292 :         size_t poff = offset_in_folio(folio, pos);
    1729             : 
    1730   319281684 :         if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
    1731    26083116 :                 if (wpc->ioend)
    1732     7527137 :                         list_add(&wpc->ioend->io_list, iolist);
    1733    26083117 :                 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
    1734             :         }
    1735             : 
    1736   319281512 :         if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
    1737         865 :                 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
    1738         865 :                 bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
    1739             :         }
    1740             : 
    1741   319281278 :         if (ifs)
    1742   319281014 :                 atomic_add(len, &ifs->write_bytes_pending);
    1743   319281617 :         wpc->ioend->io_size += len;
    1744   319281617 :         wbc_account_cgroup_owner(wbc, &folio->page, len);
    1745   319280063 : }
    1746             : 
    1747             : /*
    1748             :  * We implement an immediate ioend submission policy here to avoid needing to
    1749             :  * chain multiple ioends and hence nest mempool allocations which can violate
    1750             :  * the forward progress guarantees we need to provide. The current ioend we're
    1751             :  * adding blocks to is cached in the writepage context, and if the new block
    1752             :  * doesn't append to the cached ioend, it will create a new ioend and cache that
    1753             :  * instead.
    1754             :  *
    1755             :  * If a new ioend is created and cached, the old ioend is returned and queued
    1756             :  * locally for submission once the entire page is processed or an error has been
    1757             :  * detected.  While ioends are submitted immediately after they are completed,
    1758             :  * batching optimisations are provided by higher level block plugging.
    1759             :  *
    1760             :  * At the end of a writeback pass, there will be a cached ioend remaining on the
    1761             :  * writepage context that the caller will need to submit.
    1762             :  */
    1763             : static int
    1764    36197842 : iomap_writepage_map(struct iomap_writepage_ctx *wpc,
    1765             :                 struct writeback_control *wbc, struct inode *inode,
    1766             :                 struct folio *folio, u64 end_pos)
    1767             : {
    1768    36197842 :         struct iomap_folio_state *ifs = folio->private;
    1769    36197842 :         struct iomap_ioend *ioend, *next;
    1770    36197842 :         unsigned len = i_blocksize(inode);
    1771    36197866 :         unsigned nblocks = i_blocks_per_folio(inode, folio);
    1772    36197821 :         u64 pos = folio_pos(folio);
    1773    36197821 :         int error = 0, count = 0, i;
    1774    36197821 :         LIST_HEAD(submit_list);
    1775             : 
    1776    36197821 :         WARN_ON_ONCE(end_pos <= pos);
    1777             : 
    1778    36197821 :         if (!ifs && nblocks > 1) {
    1779     3393176 :                 ifs = ifs_alloc(inode, folio, 0);
    1780     3393178 :                 iomap_set_range_dirty(folio, 0, end_pos - pos);
    1781             :         }
    1782             : 
    1783    72395419 :         WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0);
    1784             : 
    1785             :         /*
    1786             :          * Walk through the folio to find areas to write back. If we
    1787             :          * run off the end of the current map or find the current map
    1788             :          * invalid, grab a new one.
    1789             :          */
    1790   573500579 :         for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
    1791   537456768 :                 if (ifs && !ifs_block_is_dirty(folio, ifs, i))
    1792   218010706 :                         continue;
    1793             : 
    1794   319450097 :                 error = wpc->ops->map_blocks(wpc, inode, pos);
    1795   319450478 :                 if (error)
    1796             :                         break;
    1797   319296377 :                 trace_iomap_writepage_map(inode, &wpc->iomap);
    1798   319295248 :                 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
    1799           0 :                         continue;
    1800   319295248 :                 if (wpc->iomap.type == IOMAP_HOLE)
    1801       16988 :                         continue;
    1802   319278260 :                 iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,
    1803             :                                  &submit_list);
    1804   319275062 :                 count++;
    1805             :         }
    1806    36197912 :         if (count)
    1807    36043838 :                 wpc->ioend->io_folios++;
    1808             : 
    1809    36351757 :         WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
    1810    36197912 :         WARN_ON_ONCE(!folio_test_locked(folio));
    1811    36197934 :         WARN_ON_ONCE(folio_test_writeback(folio));
    1812    36197962 :         WARN_ON_ONCE(folio_test_dirty(folio));
    1813             : 
    1814             :         /*
    1815             :          * We cannot cancel the ioend directly here on error.  We may have
    1816             :          * already set other pages under writeback and hence we have to run I/O
    1817             :          * completion to mark the error state of the pages under writeback
    1818             :          * appropriately.
    1819             :          */
    1820    36197965 :         if (unlikely(error)) {
    1821             :                 /*
    1822             :                  * Let the filesystem know what portion of the current page
    1823             :                  * failed to map. If the page hasn't been added to ioend, it
    1824             :                  * won't be affected by I/O completion and we must unlock it
    1825             :                  * now.
    1826             :                  */
    1827      154101 :                 if (wpc->ops->discard_folio)
    1828      154101 :                         wpc->ops->discard_folio(folio, pos);
    1829      154101 :                 if (!count) {
    1830      154074 :                         folio_unlock(folio);
    1831      154074 :                         goto done;
    1832             :                 }
    1833             :         }
    1834             : 
    1835             :         /*
    1836             :          * We can have dirty bits set past end of file in page_mkwrite path
    1837             :          * while mapping the last partial folio. Hence it's better to clear
    1838             :          * all the dirty bits in the folio here.
    1839             :          */
    1840    36043891 :         iomap_clear_range_dirty(folio, 0, folio_size(folio));
    1841    36043854 :         folio_start_writeback(folio);
    1842    36043899 :         folio_unlock(folio);
    1843             : 
    1844             :         /*
    1845             :          * Preserve the original error if there was one; catch
    1846             :          * submission errors here and propagate into subsequent ioend
    1847             :          * submissions.
    1848             :          */
    1849    43570901 :         list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
    1850     7527109 :                 int error2;
    1851             : 
    1852     7527109 :                 list_del_init(&ioend->io_list);
    1853     7527124 :                 error2 = iomap_submit_ioend(wpc, ioend, error);
    1854     7527121 :                 if (error2 && !error)
    1855           0 :                         error = error2;
    1856             :         }
    1857             : 
    1858             :         /*
    1859             :          * We can end up here with no error and nothing to write only if we race
    1860             :          * with a partial page truncate on a sub-page block sized filesystem.
    1861             :          */
    1862    36043792 :         if (!count)
    1863           0 :                 folio_end_writeback(folio);
    1864    36043792 : done:
    1865    36197866 :         mapping_set_error(inode->i_mapping, error);
    1866    36197867 :         return error;
    1867             : }
    1868             : 
    1869             : /*
    1870             :  * Write out a dirty page.
    1871             :  *
    1872             :  * For delalloc space on the page, we need to allocate space and flush it.
    1873             :  * For unwritten space on the page, we need to start the conversion to
    1874             :  * regular allocated space.
    1875             :  */
    1876    36198267 : static int iomap_do_writepage(struct folio *folio,
    1877             :                 struct writeback_control *wbc, void *data)
    1878             : {
    1879    36198267 :         struct iomap_writepage_ctx *wpc = data;
    1880    36198267 :         struct inode *inode = folio->mapping->host;
    1881    36198267 :         u64 end_pos, isize;
    1882             : 
    1883    36198267 :         trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
    1884             : 
    1885             :         /*
    1886             :          * Refuse to write the folio out if we're called from reclaim context.
    1887             :          *
    1888             :          * This avoids stack overflows when called from deeply used stacks in
    1889             :          * random callers for direct reclaim or memcg reclaim.  We explicitly
    1890             :          * allow reclaim from kswapd as the stack usage there is relatively low.
    1891             :          *
    1892             :          * This should never happen except in the case of a VM regression so
    1893             :          * warn about it.
    1894             :          */
    1895    36198276 :         if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
    1896             :                         PF_MEMALLOC))
    1897           0 :                 goto redirty;
    1898             : 
    1899             :         /*
    1900             :          * Is this folio beyond the end of the file?
    1901             :          *
    1902             :          * The folio index is less than the end_index, adjust the end_pos
    1903             :          * to the highest offset that this folio should represent.
    1904             :          * -----------------------------------------------------
    1905             :          * |                    file mapping           | <EOF> |
    1906             :          * -----------------------------------------------------
    1907             :          * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
    1908             :          * ^--------------------------------^----------|--------
    1909             :          * |     desired writeback range    |      see else    |
    1910             :          * ---------------------------------^------------------|
    1911             :          */
    1912    36198276 :         isize = i_size_read(inode);
    1913    36198276 :         end_pos = folio_pos(folio) + folio_size(folio);
    1914    36198300 :         if (end_pos > isize) {
    1915             :                 /*
    1916             :                  * Check whether the page to write out is beyond or straddles
    1917             :                  * i_size or not.
    1918             :                  * -------------------------------------------------------
    1919             :                  * |            file mapping                    | <EOF>  |
    1920             :                  * -------------------------------------------------------
    1921             :                  * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
    1922             :                  * ^--------------------------------^-----------|---------
    1923             :                  * |                                |      Straddles     |
    1924             :                  * ---------------------------------^-----------|--------|
    1925             :                  */
    1926     8737267 :                 size_t poff = offset_in_folio(folio, isize);
    1927     8737259 :                 pgoff_t end_index = isize >> PAGE_SHIFT;
    1928             : 
    1929             :                 /*
    1930             :                  * Skip the page if it's fully outside i_size, e.g.
    1931             :                  * due to a truncate operation that's in progress.  We've
    1932             :                  * cleaned this page and truncate will finish things off for
    1933             :                  * us.
    1934             :                  *
    1935             :                  * Note that the end_index is unsigned long.  If the given
    1936             :                  * offset is greater than 16TB on a 32-bit system then if we
    1937             :                  * checked if the page is fully outside i_size with
    1938             :                  * "if (page->index >= end_index + 1)", "end_index + 1" would
    1939             :                  * overflow and evaluate to 0.  Hence this page would be
    1940             :                  * redirtied and written out repeatedly, which would result in
    1941             :                  * an infinite loop; the user program performing this operation
    1942             :                  * would hang.  Instead, we can detect this situation by
    1943             :                  * checking if the page is totally beyond i_size or if its
    1944             :                  * offset is just equal to the EOF.
    1945             :                  */
    1946     8737259 :                 if (folio->index > end_index ||
    1947     8691265 :                     (folio->index == end_index && poff == 0))
    1948         400 :                         goto unlock;
    1949             : 
    1950             :                 /*
    1951             :                  * The page straddles i_size.  It must be zeroed out on each
    1952             :                  * and every writepage invocation because it may be mmapped.
    1953             :                  * "A file is mapped in multiples of the page size.  For a file
    1954             :                  * that is not a multiple of the page size, the remaining
    1955             :                  * memory is zeroed when mapped, and writes to that region are
    1956             :                  * not written out to the file."
    1957             :                  */
    1958     8736859 :                 folio_zero_segment(folio, poff, folio_size(folio));
    1959     8736853 :                 end_pos = isize;
    1960             :         }
    1961             : 
    1962    36197900 :         return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
    1963             : 
    1964             : redirty:
    1965           0 :         folio_redirty_for_writepage(wbc, folio);
    1966         400 : unlock:
    1967         400 :         folio_unlock(folio);
    1968         400 :         return 0;
    1969             : }
    1970             : 
    1971             : int
    1972    31119469 : iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
    1973             :                 struct iomap_writepage_ctx *wpc,
    1974             :                 const struct iomap_writeback_ops *ops)
    1975             : {
    1976    31119469 :         int                     ret;
    1977             : 
    1978    31119469 :         wpc->ops = ops;
    1979    31119469 :         ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
    1980    31119617 :         if (!wpc->ioend)
    1981             :                 return ret;
    1982    18555985 :         return iomap_submit_ioend(wpc, wpc->ioend, ret);
    1983             : }
    1984             : EXPORT_SYMBOL_GPL(iomap_writepages);
    1985             : 
    1986           0 : static int __init iomap_init(void)
    1987             : {
    1988           0 :         return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
    1989             :                            offsetof(struct iomap_ioend, io_inline_bio),
    1990             :                            BIOSET_NEED_BVECS);
    1991             : }
    1992             : fs_initcall(iomap_init);

Generated by: LCOV version 1.14