LCOV - code coverage report
Current view: top level - fs/ext4 - readpage.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-djwa @ Mon Jul 31 20:08:17 PDT 2023 Lines: 5 189 2.6 %
Date: 2023-07-31 20:08:17 Functions: 1 8 12.5 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * linux/fs/ext4/readpage.c
       4             :  *
       5             :  * Copyright (C) 2002, Linus Torvalds.
       6             :  * Copyright (C) 2015, Google, Inc.
       7             :  *
       8             :  * This was originally taken from fs/mpage.c
       9             :  *
      10             :  * The ext4_mpage_readpages() function here is intended to
      11             :  * replace mpage_readahead() in the general case, not just for
      12             :  * encrypted files.  It has some limitations (see below), where it
      13             :  * will fall back to read_block_full_page(), but these limitations
      14             :  * should only be hit when page_size != block_size.
      15             :  *
      16             :  * This will allow us to attach a callback function to support ext4
      17             :  * encryption.
      18             :  *
      19             :  * If anything unusual happens, such as:
      20             :  *
      21             :  * - encountering a page which has buffers
      22             :  * - encountering a page which has a non-hole after a hole
      23             :  * - encountering a page with non-contiguous blocks
      24             :  *
      25             :  * then this code just gives up and calls the buffer_head-based read function.
      26             :  * It does handle a page which has holes at the end - that is a common case:
      27             :  * the end-of-file on blocksize < PAGE_SIZE setups.
      28             :  *
      29             :  */
      30             : 
      31             : #include <linux/kernel.h>
      32             : #include <linux/export.h>
      33             : #include <linux/mm.h>
      34             : #include <linux/kdev_t.h>
      35             : #include <linux/gfp.h>
      36             : #include <linux/bio.h>
      37             : #include <linux/fs.h>
      38             : #include <linux/buffer_head.h>
      39             : #include <linux/blkdev.h>
      40             : #include <linux/highmem.h>
      41             : #include <linux/prefetch.h>
      42             : #include <linux/mpage.h>
      43             : #include <linux/writeback.h>
      44             : #include <linux/backing-dev.h>
      45             : #include <linux/pagevec.h>
      46             : 
      47             : #include "ext4.h"
      48             : 
      49             : #define NUM_PREALLOC_POST_READ_CTXS     128
      50             : 
      51             : static struct kmem_cache *bio_post_read_ctx_cache;
      52             : static mempool_t *bio_post_read_ctx_pool;
      53             : 
      54             : /* postprocessing steps for read bios */
      55             : enum bio_post_read_step {
      56             :         STEP_INITIAL = 0,
      57             :         STEP_DECRYPT,
      58             :         STEP_VERITY,
      59             :         STEP_MAX,
      60             : };
      61             : 
      62             : struct bio_post_read_ctx {
      63             :         struct bio *bio;
      64             :         struct work_struct work;
      65             :         unsigned int cur_step;
      66             :         unsigned int enabled_steps;
      67             : };
      68             : 
      69           0 : static void __read_end_io(struct bio *bio)
      70             : {
      71           0 :         struct folio_iter fi;
      72             : 
      73           0 :         bio_for_each_folio_all(fi, bio) {
      74           0 :                 struct folio *folio = fi.folio;
      75             : 
      76           0 :                 if (bio->bi_status)
      77           0 :                         folio_clear_uptodate(folio);
      78             :                 else
      79           0 :                         folio_mark_uptodate(folio);
      80           0 :                 folio_unlock(folio);
      81             :         }
      82           0 :         if (bio->bi_private)
      83           0 :                 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
      84           0 :         bio_put(bio);
      85           0 : }
      86             : 
      87             : static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
      88             : 
      89           0 : static void decrypt_work(struct work_struct *work)
      90             : {
      91           0 :         struct bio_post_read_ctx *ctx =
      92           0 :                 container_of(work, struct bio_post_read_ctx, work);
      93           0 :         struct bio *bio = ctx->bio;
      94             : 
      95           0 :         if (fscrypt_decrypt_bio(bio))
      96           0 :                 bio_post_read_processing(ctx);
      97             :         else
      98             :                 __read_end_io(bio);
      99           0 : }
     100             : 
     101           0 : static void verity_work(struct work_struct *work)
     102             : {
     103           0 :         struct bio_post_read_ctx *ctx =
     104           0 :                 container_of(work, struct bio_post_read_ctx, work);
     105           0 :         struct bio *bio = ctx->bio;
     106             : 
     107             :         /*
     108             :          * fsverity_verify_bio() may call readahead() again, and although verity
     109             :          * will be disabled for that, decryption may still be needed, causing
     110             :          * another bio_post_read_ctx to be allocated.  So to guarantee that
     111             :          * mempool_alloc() never deadlocks we must free the current ctx first.
     112             :          * This is safe because verity is the last post-read step.
     113             :          */
     114           0 :         BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
     115           0 :         mempool_free(ctx, bio_post_read_ctx_pool);
     116           0 :         bio->bi_private = NULL;
     117             : 
     118           0 :         fsverity_verify_bio(bio);
     119             : 
     120           0 :         __read_end_io(bio);
     121           0 : }
     122             : 
     123           0 : static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
     124             : {
     125             :         /*
     126             :          * We use different work queues for decryption and for verity because
     127             :          * verity may require reading metadata pages that need decryption, and
     128             :          * we shouldn't recurse to the same workqueue.
     129             :          */
     130           0 :         switch (++ctx->cur_step) {
     131           0 :         case STEP_DECRYPT:
     132           0 :                 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
     133           0 :                         INIT_WORK(&ctx->work, decrypt_work);
     134           0 :                         fscrypt_enqueue_decrypt_work(&ctx->work);
     135           0 :                         return;
     136             :                 }
     137           0 :                 ctx->cur_step++;
     138           0 :                 fallthrough;
     139           0 :         case STEP_VERITY:
     140           0 :                 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
     141           0 :                         INIT_WORK(&ctx->work, verity_work);
     142           0 :                         fsverity_enqueue_verify_work(&ctx->work);
     143           0 :                         return;
     144             :                 }
     145           0 :                 ctx->cur_step++;
     146           0 :                 fallthrough;
     147           0 :         default:
     148           0 :                 __read_end_io(ctx->bio);
     149             :         }
     150             : }
     151             : 
     152             : static bool bio_post_read_required(struct bio *bio)
     153             : {
     154           0 :         return bio->bi_private && !bio->bi_status;
     155             : }
     156             : 
     157             : /*
     158             :  * I/O completion handler for multipage BIOs.
     159             :  *
     160             :  * The mpage code never puts partial pages into a BIO (except for end-of-file).
     161             :  * If a page does not map to a contiguous run of blocks then it simply falls
     162             :  * back to block_read_full_folio().
     163             :  *
     164             :  * Why is this?  If a page's completion depends on a number of different BIOs
     165             :  * which can complete in any order (or at the same time) then determining the
     166             :  * status of that page is hard.  See end_buffer_async_read() for the details.
     167             :  * There is no point in duplicating all that complexity.
     168             :  */
     169           0 : static void mpage_end_io(struct bio *bio)
     170             : {
     171           0 :         if (bio_post_read_required(bio)) {
     172           0 :                 struct bio_post_read_ctx *ctx = bio->bi_private;
     173             : 
     174           0 :                 ctx->cur_step = STEP_INITIAL;
     175           0 :                 bio_post_read_processing(ctx);
     176           0 :                 return;
     177             :         }
     178           0 :         __read_end_io(bio);
     179             : }
     180             : 
     181             : static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
     182             : {
     183             :         return fsverity_active(inode) &&
     184             :                idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
     185             : }
     186             : 
     187             : static void ext4_set_bio_post_read_ctx(struct bio *bio,
     188             :                                        const struct inode *inode,
     189             :                                        pgoff_t first_idx)
     190             : {
     191             :         unsigned int post_read_steps = 0;
     192             : 
     193             :         if (fscrypt_inode_uses_fs_layer_crypto(inode))
     194             :                 post_read_steps |= 1 << STEP_DECRYPT;
     195             : 
     196             :         if (ext4_need_verity(inode, first_idx))
     197             :                 post_read_steps |= 1 << STEP_VERITY;
     198             : 
     199             :         if (post_read_steps) {
     200             :                 /* Due to the mempool, this never fails. */
     201             :                 struct bio_post_read_ctx *ctx =
     202             :                         mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
     203             : 
     204             :                 ctx->bio = bio;
     205             :                 ctx->enabled_steps = post_read_steps;
     206             :                 bio->bi_private = ctx;
     207             :         }
     208             : }
     209             : 
     210             : static inline loff_t ext4_readpage_limit(struct inode *inode)
     211             : {
     212           0 :         if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
     213             :                 return inode->i_sb->s_maxbytes;
     214             : 
     215           0 :         return i_size_read(inode);
     216             : }
     217             : 
     218           0 : int ext4_mpage_readpages(struct inode *inode,
     219             :                 struct readahead_control *rac, struct folio *folio)
     220             : {
     221           0 :         struct bio *bio = NULL;
     222           0 :         sector_t last_block_in_bio = 0;
     223             : 
     224           0 :         const unsigned blkbits = inode->i_blkbits;
     225           0 :         const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
     226           0 :         const unsigned blocksize = 1 << blkbits;
     227           0 :         sector_t next_block;
     228           0 :         sector_t block_in_file;
     229           0 :         sector_t last_block;
     230           0 :         sector_t last_block_in_file;
     231           0 :         sector_t blocks[MAX_BUF_PER_PAGE];
     232           0 :         unsigned page_block;
     233           0 :         struct block_device *bdev = inode->i_sb->s_bdev;
     234           0 :         int length;
     235           0 :         unsigned relative_block = 0;
     236           0 :         struct ext4_map_blocks map;
     237           0 :         unsigned int nr_pages = rac ? readahead_count(rac) : 1;
     238             : 
     239           0 :         map.m_pblk = 0;
     240           0 :         map.m_lblk = 0;
     241           0 :         map.m_len = 0;
     242           0 :         map.m_flags = 0;
     243             : 
     244           0 :         for (; nr_pages; nr_pages--) {
     245           0 :                 int fully_mapped = 1;
     246           0 :                 unsigned first_hole = blocks_per_page;
     247             : 
     248           0 :                 if (rac)
     249           0 :                         folio = readahead_folio(rac);
     250           0 :                 prefetchw(&folio->flags);
     251             : 
     252           0 :                 if (folio_buffers(folio))
     253           0 :                         goto confused;
     254             : 
     255           0 :                 block_in_file = next_block =
     256           0 :                         (sector_t)folio->index << (PAGE_SHIFT - blkbits);
     257           0 :                 last_block = block_in_file + nr_pages * blocks_per_page;
     258           0 :                 last_block_in_file = (ext4_readpage_limit(inode) +
     259           0 :                                       blocksize - 1) >> blkbits;
     260           0 :                 if (last_block > last_block_in_file)
     261             :                         last_block = last_block_in_file;
     262           0 :                 page_block = 0;
     263             : 
     264             :                 /*
     265             :                  * Map blocks using the previous result first.
     266             :                  */
     267           0 :                 if ((map.m_flags & EXT4_MAP_MAPPED) &&
     268           0 :                     block_in_file > map.m_lblk &&
     269           0 :                     block_in_file < (map.m_lblk + map.m_len)) {
     270           0 :                         unsigned map_offset = block_in_file - map.m_lblk;
     271           0 :                         unsigned last = map.m_len - map_offset;
     272             : 
     273           0 :                         for (relative_block = 0; ; relative_block++) {
     274           0 :                                 if (relative_block == last) {
     275             :                                         /* needed? */
     276           0 :                                         map.m_flags &= ~EXT4_MAP_MAPPED;
     277           0 :                                         break;
     278             :                                 }
     279           0 :                                 if (page_block == blocks_per_page)
     280             :                                         break;
     281           0 :                                 blocks[page_block] = map.m_pblk + map_offset +
     282             :                                         relative_block;
     283           0 :                                 page_block++;
     284           0 :                                 block_in_file++;
     285             :                         }
     286             :                 }
     287             : 
     288             :                 /*
     289             :                  * Then do more ext4_map_blocks() calls until we are
     290             :                  * done with this folio.
     291             :                  */
     292           0 :                 while (page_block < blocks_per_page) {
     293           0 :                         if (block_in_file < last_block) {
     294           0 :                                 map.m_lblk = block_in_file;
     295           0 :                                 map.m_len = last_block - block_in_file;
     296             : 
     297           0 :                                 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
     298           0 :                                 set_error_page:
     299           0 :                                         folio_set_error(folio);
     300           0 :                                         folio_zero_segment(folio, 0,
     301             :                                                           folio_size(folio));
     302           0 :                                         folio_unlock(folio);
     303           0 :                                         goto next_page;
     304             :                                 }
     305             :                         }
     306           0 :                         if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
     307           0 :                                 fully_mapped = 0;
     308           0 :                                 if (first_hole == blocks_per_page)
     309           0 :                                         first_hole = page_block;
     310           0 :                                 page_block++;
     311           0 :                                 block_in_file++;
     312           0 :                                 continue;
     313             :                         }
     314           0 :                         if (first_hole != blocks_per_page)
     315           0 :                                 goto confused;          /* hole -> non-hole */
     316             : 
     317             :                         /* Contiguous blocks? */
     318           0 :                         if (page_block && blocks[page_block-1] != map.m_pblk-1)
     319           0 :                                 goto confused;
     320           0 :                         for (relative_block = 0; ; relative_block++) {
     321           0 :                                 if (relative_block == map.m_len) {
     322             :                                         /* needed? */
     323           0 :                                         map.m_flags &= ~EXT4_MAP_MAPPED;
     324           0 :                                         break;
     325           0 :                                 } else if (page_block == blocks_per_page)
     326             :                                         break;
     327           0 :                                 blocks[page_block] = map.m_pblk+relative_block;
     328           0 :                                 page_block++;
     329           0 :                                 block_in_file++;
     330             :                         }
     331             :                 }
     332           0 :                 if (first_hole != blocks_per_page) {
     333           0 :                         folio_zero_segment(folio, first_hole << blkbits,
     334             :                                           folio_size(folio));
     335           0 :                         if (first_hole == 0) {
     336           0 :                                 if (ext4_need_verity(inode, folio->index) &&
     337             :                                     !fsverity_verify_folio(folio))
     338             :                                         goto set_error_page;
     339           0 :                                 folio_mark_uptodate(folio);
     340           0 :                                 folio_unlock(folio);
     341           0 :                                 continue;
     342             :                         }
     343           0 :                 } else if (fully_mapped) {
     344           0 :                         folio_set_mappedtodisk(folio);
     345             :                 }
     346             : 
     347             :                 /*
     348             :                  * This folio will go to BIO.  Do we need to send this
     349             :                  * BIO off first?
     350             :                  */
     351           0 :                 if (bio && (last_block_in_bio != blocks[0] - 1 ||
     352             :                             !fscrypt_mergeable_bio(bio, inode, next_block))) {
     353           0 :                 submit_and_realloc:
     354           0 :                         submit_bio(bio);
     355           0 :                         bio = NULL;
     356             :                 }
     357           0 :                 if (bio == NULL) {
     358             :                         /*
     359             :                          * bio_alloc will _always_ be able to allocate a bio if
     360             :                          * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
     361             :                          */
     362           0 :                         bio = bio_alloc(bdev, bio_max_segs(nr_pages),
     363             :                                         REQ_OP_READ, GFP_KERNEL);
     364           0 :                         fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
     365             :                                                   GFP_KERNEL);
     366           0 :                         ext4_set_bio_post_read_ctx(bio, inode, folio->index);
     367           0 :                         bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
     368           0 :                         bio->bi_end_io = mpage_end_io;
     369           0 :                         if (rac)
     370           0 :                                 bio->bi_opf |= REQ_RAHEAD;
     371             :                 }
     372             : 
     373           0 :                 length = first_hole << blkbits;
     374           0 :                 if (!bio_add_folio(bio, folio, length, 0))
     375           0 :                         goto submit_and_realloc;
     376             : 
     377           0 :                 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
     378           0 :                      (relative_block == map.m_len)) ||
     379             :                     (first_hole != blocks_per_page)) {
     380           0 :                         submit_bio(bio);
     381           0 :                         bio = NULL;
     382             :                 } else
     383           0 :                         last_block_in_bio = blocks[blocks_per_page - 1];
     384           0 :                 continue;
     385           0 :         confused:
     386           0 :                 if (bio) {
     387           0 :                         submit_bio(bio);
     388           0 :                         bio = NULL;
     389             :                 }
     390           0 :                 if (!folio_test_uptodate(folio))
     391           0 :                         block_read_full_folio(folio, ext4_get_block);
     392             :                 else
     393           0 :                         folio_unlock(folio);
     394           0 : next_page:
     395           0 :                 ; /* A label shall be followed by a statement until C23 */
     396             :         }
     397           0 :         if (bio)
     398           0 :                 submit_bio(bio);
     399           0 :         return 0;
     400             : }
     401             : 
     402           2 : int __init ext4_init_post_read_processing(void)
     403             : {
     404           2 :         bio_post_read_ctx_cache = KMEM_CACHE(bio_post_read_ctx, SLAB_RECLAIM_ACCOUNT);
     405             : 
     406           2 :         if (!bio_post_read_ctx_cache)
     407           0 :                 goto fail;
     408           2 :         bio_post_read_ctx_pool =
     409             :                 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
     410             :                                          bio_post_read_ctx_cache);
     411           2 :         if (!bio_post_read_ctx_pool)
     412           0 :                 goto fail_free_cache;
     413             :         return 0;
     414             : 
     415             : fail_free_cache:
     416           0 :         kmem_cache_destroy(bio_post_read_ctx_cache);
     417             : fail:
     418             :         return -ENOMEM;
     419             : }
     420             : 
     421           0 : void ext4_exit_post_read_processing(void)
     422             : {
     423           0 :         mempool_destroy(bio_post_read_ctx_pool);
     424           0 :         kmem_cache_destroy(bio_post_read_ctx_cache);
     425           0 : }

Generated by: LCOV version 1.14