LCOV - code coverage report
Current view: top level - fs/ext4 - page-io.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsa @ Mon Jul 31 20:08:27 PDT 2023 Lines: 5 248 2.0 %
Date: 2023-07-31 20:08:27 Functions: 1 22 4.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * linux/fs/ext4/page-io.c
       4             :  *
       5             :  * This contains the new page_io functions for ext4
       6             :  *
       7             :  * Written by Theodore Ts'o, 2010.
       8             :  */
       9             : 
      10             : #include <linux/fs.h>
      11             : #include <linux/time.h>
      12             : #include <linux/highuid.h>
      13             : #include <linux/pagemap.h>
      14             : #include <linux/quotaops.h>
      15             : #include <linux/string.h>
      16             : #include <linux/buffer_head.h>
      17             : #include <linux/writeback.h>
      18             : #include <linux/pagevec.h>
      19             : #include <linux/mpage.h>
      20             : #include <linux/namei.h>
      21             : #include <linux/uio.h>
      22             : #include <linux/bio.h>
      23             : #include <linux/workqueue.h>
      24             : #include <linux/kernel.h>
      25             : #include <linux/slab.h>
      26             : #include <linux/mm.h>
      27             : #include <linux/sched/mm.h>
      28             : 
      29             : #include "ext4_jbd2.h"
      30             : #include "xattr.h"
      31             : #include "acl.h"
      32             : 
      33             : static struct kmem_cache *io_end_cachep;
      34             : static struct kmem_cache *io_end_vec_cachep;
      35             : 
      36           2 : int __init ext4_init_pageio(void)
      37             : {
      38           2 :         io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
      39           2 :         if (io_end_cachep == NULL)
      40             :                 return -ENOMEM;
      41             : 
      42           2 :         io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
      43           2 :         if (io_end_vec_cachep == NULL) {
      44           0 :                 kmem_cache_destroy(io_end_cachep);
      45           0 :                 return -ENOMEM;
      46             :         }
      47             :         return 0;
      48             : }
      49             : 
      50           0 : void ext4_exit_pageio(void)
      51             : {
      52           0 :         kmem_cache_destroy(io_end_cachep);
      53           0 :         kmem_cache_destroy(io_end_vec_cachep);
      54           0 : }
      55             : 
      56           0 : struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
      57             : {
      58           0 :         struct ext4_io_end_vec *io_end_vec;
      59             : 
      60           0 :         io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
      61           0 :         if (!io_end_vec)
      62             :                 return ERR_PTR(-ENOMEM);
      63           0 :         INIT_LIST_HEAD(&io_end_vec->list);
      64           0 :         list_add_tail(&io_end_vec->list, &io_end->list_vec);
      65           0 :         return io_end_vec;
      66             : }
      67             : 
      68           0 : static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
      69             : {
      70           0 :         struct ext4_io_end_vec *io_end_vec, *tmp;
      71             : 
      72           0 :         if (list_empty(&io_end->list_vec))
      73             :                 return;
      74           0 :         list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
      75           0 :                 list_del(&io_end_vec->list);
      76           0 :                 kmem_cache_free(io_end_vec_cachep, io_end_vec);
      77             :         }
      78             : }
      79             : 
      80           0 : struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
      81             : {
      82           0 :         BUG_ON(list_empty(&io_end->list_vec));
      83           0 :         return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
      84             : }
      85             : 
      86             : /*
      87             :  * Print an buffer I/O error compatible with the fs/buffer.c.  This
      88             :  * provides compatibility with dmesg scrapers that look for a specific
      89             :  * buffer I/O error message.  We really need a unified error reporting
      90             :  * structure to userspace ala Digital Unix's uerf system, but it's
      91             :  * probably not going to happen in my lifetime, due to LKML politics...
      92             :  */
      93           0 : static void buffer_io_error(struct buffer_head *bh)
      94             : {
      95           0 :         printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
      96             :                        bh->b_bdev,
      97             :                         (unsigned long long)bh->b_blocknr);
      98           0 : }
      99             : 
     100           0 : static void ext4_finish_bio(struct bio *bio)
     101             : {
     102           0 :         struct folio_iter fi;
     103             : 
     104           0 :         bio_for_each_folio_all(fi, bio) {
     105           0 :                 struct folio *folio = fi.folio;
     106           0 :                 struct folio *io_folio = NULL;
     107           0 :                 struct buffer_head *bh, *head;
     108           0 :                 size_t bio_start = fi.offset;
     109           0 :                 size_t bio_end = bio_start + fi.length;
     110           0 :                 unsigned under_io = 0;
     111           0 :                 unsigned long flags;
     112             : 
     113           0 :                 if (fscrypt_is_bounce_folio(folio)) {
     114             :                         io_folio = folio;
     115             :                         folio = fscrypt_pagecache_folio(folio);
     116             :                 }
     117             : 
     118           0 :                 if (bio->bi_status) {
     119           0 :                         int err = blk_status_to_errno(bio->bi_status);
     120           0 :                         folio_set_error(folio);
     121           0 :                         mapping_set_error(folio->mapping, err);
     122             :                 }
     123           0 :                 bh = head = folio_buffers(folio);
     124             :                 /*
     125             :                  * We check all buffers in the folio under b_uptodate_lock
     126             :                  * to avoid races with other end io clearing async_write flags
     127             :                  */
     128           0 :                 spin_lock_irqsave(&head->b_uptodate_lock, flags);
     129           0 :                 do {
     130           0 :                         if (bh_offset(bh) < bio_start ||
     131           0 :                             bh_offset(bh) + bh->b_size > bio_end) {
     132           0 :                                 if (buffer_async_write(bh))
     133           0 :                                         under_io++;
     134           0 :                                 continue;
     135             :                         }
     136           0 :                         clear_buffer_async_write(bh);
     137           0 :                         if (bio->bi_status) {
     138           0 :                                 set_buffer_write_io_error(bh);
     139           0 :                                 buffer_io_error(bh);
     140             :                         }
     141           0 :                 } while ((bh = bh->b_this_page) != head);
     142           0 :                 spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
     143           0 :                 if (!under_io) {
     144           0 :                         fscrypt_free_bounce_page(&io_folio->page);
     145           0 :                         folio_end_writeback(folio);
     146             :                 }
     147             :         }
     148           0 : }
     149             : 
     150           0 : static void ext4_release_io_end(ext4_io_end_t *io_end)
     151             : {
     152           0 :         struct bio *bio, *next_bio;
     153             : 
     154           0 :         BUG_ON(!list_empty(&io_end->list));
     155           0 :         BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
     156           0 :         WARN_ON(io_end->handle);
     157             : 
     158           0 :         for (bio = io_end->bio; bio; bio = next_bio) {
     159           0 :                 next_bio = bio->bi_private;
     160           0 :                 ext4_finish_bio(bio);
     161           0 :                 bio_put(bio);
     162             :         }
     163           0 :         ext4_free_io_end_vec(io_end);
     164           0 :         kmem_cache_free(io_end_cachep, io_end);
     165           0 : }
     166             : 
     167             : /*
     168             :  * Check a range of space and convert unwritten extents to written. Note that
     169             :  * we are protected from truncate touching same part of extent tree by the
     170             :  * fact that truncate code waits for all DIO to finish (thus exclusion from
     171             :  * direct IO is achieved) and also waits for PageWriteback bits. Thus we
     172             :  * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
     173             :  * completed (happens from ext4_free_ioend()).
     174             :  */
     175           0 : static int ext4_end_io_end(ext4_io_end_t *io_end)
     176             : {
     177           0 :         struct inode *inode = io_end->inode;
     178           0 :         handle_t *handle = io_end->handle;
     179           0 :         int ret = 0;
     180             : 
     181           0 :         ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
     182             :                    "list->prev 0x%p\n",
     183             :                    io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
     184             : 
     185           0 :         io_end->handle = NULL;       /* Following call will use up the handle */
     186           0 :         ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
     187           0 :         if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
     188           0 :                 ext4_msg(inode->i_sb, KERN_EMERG,
     189             :                          "failed to convert unwritten extents to written "
     190             :                          "extents -- potential data loss!  "
     191             :                          "(inode %lu, error %d)", inode->i_ino, ret);
     192             :         }
     193           0 :         ext4_clear_io_unwritten_flag(io_end);
     194           0 :         ext4_release_io_end(io_end);
     195           0 :         return ret;
     196             : }
     197             : 
     198             : static void dump_completed_IO(struct inode *inode, struct list_head *head)
     199             : {
     200             : #ifdef  EXT4FS_DEBUG
     201             :         struct list_head *cur, *before, *after;
     202             :         ext4_io_end_t *io_end, *io_end0, *io_end1;
     203             : 
     204             :         if (list_empty(head))
     205             :                 return;
     206             : 
     207             :         ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
     208             :         list_for_each_entry(io_end, head, list) {
     209             :                 cur = &io_end->list;
     210             :                 before = cur->prev;
     211             :                 io_end0 = container_of(before, ext4_io_end_t, list);
     212             :                 after = cur->next;
     213             :                 io_end1 = container_of(after, ext4_io_end_t, list);
     214             : 
     215             :                 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
     216             :                             io_end, inode->i_ino, io_end0, io_end1);
     217             :         }
     218             : #endif
     219             : }
     220             : 
     221             : /* Add the io_end to per-inode completed end_io list. */
     222           0 : static void ext4_add_complete_io(ext4_io_end_t *io_end)
     223             : {
     224           0 :         struct ext4_inode_info *ei = EXT4_I(io_end->inode);
     225           0 :         struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
     226           0 :         struct workqueue_struct *wq;
     227           0 :         unsigned long flags;
     228             : 
     229             :         /* Only reserved conversions from writeback should enter here */
     230           0 :         WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
     231           0 :         WARN_ON(!io_end->handle && sbi->s_journal);
     232           0 :         spin_lock_irqsave(&ei->i_completed_io_lock, flags);
     233           0 :         wq = sbi->rsv_conversion_wq;
     234           0 :         if (list_empty(&ei->i_rsv_conversion_list))
     235           0 :                 queue_work(wq, &ei->i_rsv_conversion_work);
     236           0 :         list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
     237           0 :         spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
     238           0 : }
     239             : 
     240           0 : static int ext4_do_flush_completed_IO(struct inode *inode,
     241             :                                       struct list_head *head)
     242             : {
     243           0 :         ext4_io_end_t *io_end;
     244           0 :         struct list_head unwritten;
     245           0 :         unsigned long flags;
     246           0 :         struct ext4_inode_info *ei = EXT4_I(inode);
     247           0 :         int err, ret = 0;
     248             : 
     249           0 :         spin_lock_irqsave(&ei->i_completed_io_lock, flags);
     250           0 :         dump_completed_IO(inode, head);
     251           0 :         list_replace_init(head, &unwritten);
     252           0 :         spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
     253             : 
     254           0 :         while (!list_empty(&unwritten)) {
     255           0 :                 io_end = list_entry(unwritten.next, ext4_io_end_t, list);
     256           0 :                 BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
     257           0 :                 list_del_init(&io_end->list);
     258             : 
     259           0 :                 err = ext4_end_io_end(io_end);
     260           0 :                 if (unlikely(!ret && err))
     261           0 :                         ret = err;
     262             :         }
     263           0 :         return ret;
     264             : }
     265             : 
     266             : /*
     267             :  * work on completed IO, to convert unwritten extents to extents
     268             :  */
     269           0 : void ext4_end_io_rsv_work(struct work_struct *work)
     270             : {
     271           0 :         struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
     272             :                                                   i_rsv_conversion_work);
     273           0 :         ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
     274           0 : }
     275             : 
     276           0 : ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
     277             : {
     278           0 :         ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
     279             : 
     280           0 :         if (io_end) {
     281           0 :                 io_end->inode = inode;
     282           0 :                 INIT_LIST_HEAD(&io_end->list);
     283           0 :                 INIT_LIST_HEAD(&io_end->list_vec);
     284           0 :                 refcount_set(&io_end->count, 1);
     285             :         }
     286           0 :         return io_end;
     287             : }
     288             : 
     289           0 : void ext4_put_io_end_defer(ext4_io_end_t *io_end)
     290             : {
     291           0 :         if (refcount_dec_and_test(&io_end->count)) {
     292           0 :                 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
     293           0 :                                 list_empty(&io_end->list_vec)) {
     294           0 :                         ext4_release_io_end(io_end);
     295           0 :                         return;
     296             :                 }
     297           0 :                 ext4_add_complete_io(io_end);
     298             :         }
     299             : }
     300             : 
     301           0 : int ext4_put_io_end(ext4_io_end_t *io_end)
     302             : {
     303           0 :         int err = 0;
     304             : 
     305           0 :         if (refcount_dec_and_test(&io_end->count)) {
     306           0 :                 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
     307           0 :                         err = ext4_convert_unwritten_io_end_vec(io_end->handle,
     308             :                                                                 io_end);
     309           0 :                         io_end->handle = NULL;
     310           0 :                         ext4_clear_io_unwritten_flag(io_end);
     311             :                 }
     312           0 :                 ext4_release_io_end(io_end);
     313             :         }
     314           0 :         return err;
     315             : }
     316             : 
     317           0 : ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
     318             : {
     319           0 :         refcount_inc(&io_end->count);
     320           0 :         return io_end;
     321             : }
     322             : 
     323             : /* BIO completion function for page writeback */
     324           0 : static void ext4_end_bio(struct bio *bio)
     325             : {
     326           0 :         ext4_io_end_t *io_end = bio->bi_private;
     327           0 :         sector_t bi_sector = bio->bi_iter.bi_sector;
     328             : 
     329           0 :         if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
     330             :                       bio->bi_bdev,
     331             :                       (long long) bio->bi_iter.bi_sector,
     332             :                       (unsigned) bio_sectors(bio),
     333             :                       bio->bi_status)) {
     334           0 :                 ext4_finish_bio(bio);
     335           0 :                 bio_put(bio);
     336           0 :                 return;
     337             :         }
     338           0 :         bio->bi_end_io = NULL;
     339             : 
     340           0 :         if (bio->bi_status) {
     341           0 :                 struct inode *inode = io_end->inode;
     342             : 
     343           0 :                 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
     344             :                              "starting block %llu)",
     345             :                              bio->bi_status, inode->i_ino,
     346             :                              (unsigned long long)
     347             :                              bi_sector >> (inode->i_blkbits - 9));
     348           0 :                 mapping_set_error(inode->i_mapping,
     349             :                                 blk_status_to_errno(bio->bi_status));
     350             :         }
     351             : 
     352           0 :         if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
     353             :                 /*
     354             :                  * Link bio into list hanging from io_end. We have to do it
     355             :                  * atomically as bio completions can be racing against each
     356             :                  * other.
     357             :                  */
     358           0 :                 bio->bi_private = xchg(&io_end->bio, bio);
     359           0 :                 ext4_put_io_end_defer(io_end);
     360             :         } else {
     361             :                 /*
     362             :                  * Drop io_end reference early. Inode can get freed once
     363             :                  * we finish the bio.
     364             :                  */
     365           0 :                 ext4_put_io_end_defer(io_end);
     366           0 :                 ext4_finish_bio(bio);
     367           0 :                 bio_put(bio);
     368             :         }
     369             : }
     370             : 
     371           0 : void ext4_io_submit(struct ext4_io_submit *io)
     372             : {
     373           0 :         struct bio *bio = io->io_bio;
     374             : 
     375           0 :         if (bio) {
     376           0 :                 if (io->io_wbc->sync_mode == WB_SYNC_ALL)
     377           0 :                         io->io_bio->bi_opf |= REQ_SYNC;
     378           0 :                 submit_bio(io->io_bio);
     379             :         }
     380           0 :         io->io_bio = NULL;
     381           0 : }
     382             : 
     383           0 : void ext4_io_submit_init(struct ext4_io_submit *io,
     384             :                          struct writeback_control *wbc)
     385             : {
     386           0 :         io->io_wbc = wbc;
     387           0 :         io->io_bio = NULL;
     388           0 :         io->io_end = NULL;
     389           0 : }
     390             : 
     391           0 : static void io_submit_init_bio(struct ext4_io_submit *io,
     392             :                                struct buffer_head *bh)
     393             : {
     394           0 :         struct bio *bio;
     395             : 
     396             :         /*
     397             :          * bio_alloc will _always_ be able to allocate a bio if
     398             :          * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
     399             :          */
     400           0 :         bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
     401           0 :         fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
     402           0 :         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
     403           0 :         bio->bi_end_io = ext4_end_bio;
     404           0 :         bio->bi_private = ext4_get_io_end(io->io_end);
     405           0 :         io->io_bio = bio;
     406           0 :         io->io_next_block = bh->b_blocknr;
     407           0 :         wbc_init_bio(io->io_wbc, bio);
     408           0 : }
     409             : 
     410           0 : static void io_submit_add_bh(struct ext4_io_submit *io,
     411             :                              struct inode *inode,
     412             :                              struct folio *folio,
     413             :                              struct folio *io_folio,
     414             :                              struct buffer_head *bh)
     415             : {
     416           0 :         if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
     417             :                            !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
     418           0 : submit_and_retry:
     419           0 :                 ext4_io_submit(io);
     420             :         }
     421           0 :         if (io->io_bio == NULL)
     422           0 :                 io_submit_init_bio(io, bh);
     423           0 :         if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
     424           0 :                 goto submit_and_retry;
     425           0 :         wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
     426           0 :         io->io_next_block++;
     427           0 : }
     428             : 
     429           0 : int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
     430             :                 size_t len)
     431             : {
     432           0 :         struct folio *io_folio = folio;
     433           0 :         struct inode *inode = folio->mapping->host;
     434           0 :         unsigned block_start;
     435           0 :         struct buffer_head *bh, *head;
     436           0 :         int ret = 0;
     437           0 :         int nr_to_submit = 0;
     438           0 :         struct writeback_control *wbc = io->io_wbc;
     439           0 :         bool keep_towrite = false;
     440             : 
     441           0 :         BUG_ON(!folio_test_locked(folio));
     442           0 :         BUG_ON(folio_test_writeback(folio));
     443             : 
     444           0 :         folio_clear_error(folio);
     445             : 
     446             :         /*
     447             :          * Comments copied from block_write_full_page:
     448             :          *
     449             :          * The folio straddles i_size.  It must be zeroed out on each and every
     450             :          * writepage invocation because it may be mmapped.  "A file is mapped
     451             :          * in multiples of the page size.  For a file that is not a multiple of
     452             :          * the page size, the remaining memory is zeroed when mapped, and
     453             :          * writes to that region are not written out to the file."
     454             :          */
     455           0 :         if (len < folio_size(folio))
     456           0 :                 folio_zero_segment(folio, len, folio_size(folio));
     457             :         /*
     458             :          * In the first loop we prepare and mark buffers to submit. We have to
     459             :          * mark all buffers in the folio before submitting so that
     460             :          * folio_end_writeback() cannot be called from ext4_end_bio() when IO
     461             :          * on the first buffer finishes and we are still working on submitting
     462             :          * the second buffer.
     463             :          */
     464           0 :         bh = head = folio_buffers(folio);
     465           0 :         do {
     466           0 :                 block_start = bh_offset(bh);
     467           0 :                 if (block_start >= len) {
     468           0 :                         clear_buffer_dirty(bh);
     469           0 :                         set_buffer_uptodate(bh);
     470           0 :                         continue;
     471             :                 }
     472           0 :                 if (!buffer_dirty(bh) || buffer_delay(bh) ||
     473           0 :                     !buffer_mapped(bh) || buffer_unwritten(bh)) {
     474             :                         /* A hole? We can safely clear the dirty bit */
     475           0 :                         if (!buffer_mapped(bh))
     476           0 :                                 clear_buffer_dirty(bh);
     477             :                         /*
     478             :                          * Keeping dirty some buffer we cannot write? Make sure
     479             :                          * to redirty the folio and keep TOWRITE tag so that
     480             :                          * racing WB_SYNC_ALL writeback does not skip the folio.
     481             :                          * This happens e.g. when doing writeout for
     482             :                          * transaction commit or when journalled data is not
     483             :                          * yet committed.
     484             :                          */
     485           0 :                         if (buffer_dirty(bh) ||
     486           0 :                             (buffer_jbd(bh) && buffer_jbddirty(bh))) {
     487           0 :                                 if (!folio_test_dirty(folio))
     488           0 :                                         folio_redirty_for_writepage(wbc, folio);
     489             :                                 keep_towrite = true;
     490             :                         }
     491           0 :                         continue;
     492             :                 }
     493           0 :                 if (buffer_new(bh))
     494           0 :                         clear_buffer_new(bh);
     495           0 :                 set_buffer_async_write(bh);
     496           0 :                 clear_buffer_dirty(bh);
     497           0 :                 nr_to_submit++;
     498           0 :         } while ((bh = bh->b_this_page) != head);
     499             : 
     500             :         /* Nothing to submit? Just unlock the folio... */
     501           0 :         if (!nr_to_submit)
     502             :                 return 0;
     503             : 
     504           0 :         bh = head = folio_buffers(folio);
     505             : 
     506             :         /*
     507             :          * If any blocks are being written to an encrypted file, encrypt them
     508             :          * into a bounce page.  For simplicity, just encrypt until the last
     509             :          * block which might be needed.  This may cause some unneeded blocks
     510             :          * (e.g. holes) to be unnecessarily encrypted, but this is rare and
     511             :          * can't happen in the common case of blocksize == PAGE_SIZE.
     512             :          */
     513           0 :         if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
     514             :                 gfp_t gfp_flags = GFP_NOFS;
     515             :                 unsigned int enc_bytes = round_up(len, i_blocksize(inode));
     516             :                 struct page *bounce_page;
     517             : 
     518             :                 /*
     519             :                  * Since bounce page allocation uses a mempool, we can only use
     520             :                  * a waiting mask (i.e. request guaranteed allocation) on the
     521             :                  * first page of the bio.  Otherwise it can deadlock.
     522             :                  */
     523             :                 if (io->io_bio)
     524             :                         gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
     525             :         retry_encrypt:
     526             :                 bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
     527             :                                         enc_bytes, 0, gfp_flags);
     528             :                 if (IS_ERR(bounce_page)) {
     529             :                         ret = PTR_ERR(bounce_page);
     530             :                         if (ret == -ENOMEM &&
     531             :                             (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
     532             :                                 gfp_t new_gfp_flags = GFP_NOFS;
     533             :                                 if (io->io_bio)
     534             :                                         ext4_io_submit(io);
     535             :                                 else
     536             :                                         new_gfp_flags |= __GFP_NOFAIL;
     537             :                                 memalloc_retry_wait(gfp_flags);
     538             :                                 gfp_flags = new_gfp_flags;
     539             :                                 goto retry_encrypt;
     540             :                         }
     541             : 
     542             :                         printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
     543             :                         folio_redirty_for_writepage(wbc, folio);
     544             :                         do {
     545             :                                 if (buffer_async_write(bh)) {
     546             :                                         clear_buffer_async_write(bh);
     547             :                                         set_buffer_dirty(bh);
     548             :                                 }
     549             :                                 bh = bh->b_this_page;
     550             :                         } while (bh != head);
     551             : 
     552             :                         return ret;
     553             :                 }
     554             :                 io_folio = page_folio(bounce_page);
     555             :         }
     556             : 
     557           0 :         __folio_start_writeback(folio, keep_towrite);
     558             : 
     559             :         /* Now submit buffers to write */
     560           0 :         do {
     561           0 :                 if (!buffer_async_write(bh))
     562           0 :                         continue;
     563           0 :                 io_submit_add_bh(io, inode, folio, io_folio, bh);
     564           0 :         } while ((bh = bh->b_this_page) != head);
     565             : 
     566             :         return 0;
     567             : }

Generated by: LCOV version 1.14