LCOV - code coverage report
Current view: top level - fs/btrfs - extent_io.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-acha @ Mon Jul 31 20:08:06 PDT 2023 Lines: 3 2166 0.1 %
Date: 2023-07-31 20:08:07 Functions: 1 106 0.9 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : 
       3             : #include <linux/bitops.h>
       4             : #include <linux/slab.h>
       5             : #include <linux/bio.h>
       6             : #include <linux/mm.h>
       7             : #include <linux/pagemap.h>
       8             : #include <linux/page-flags.h>
       9             : #include <linux/sched/mm.h>
      10             : #include <linux/spinlock.h>
      11             : #include <linux/blkdev.h>
      12             : #include <linux/swap.h>
      13             : #include <linux/writeback.h>
      14             : #include <linux/pagevec.h>
      15             : #include <linux/prefetch.h>
      16             : #include <linux/fsverity.h>
      17             : #include "misc.h"
      18             : #include "extent_io.h"
      19             : #include "extent-io-tree.h"
      20             : #include "extent_map.h"
      21             : #include "ctree.h"
      22             : #include "btrfs_inode.h"
      23             : #include "bio.h"
      24             : #include "check-integrity.h"
      25             : #include "locking.h"
      26             : #include "rcu-string.h"
      27             : #include "backref.h"
      28             : #include "disk-io.h"
      29             : #include "subpage.h"
      30             : #include "zoned.h"
      31             : #include "block-group.h"
      32             : #include "compression.h"
      33             : #include "fs.h"
      34             : #include "accessors.h"
      35             : #include "file-item.h"
      36             : #include "file.h"
      37             : #include "dev-replace.h"
      38             : #include "super.h"
      39             : #include "transaction.h"
      40             : 
      41             : static struct kmem_cache *extent_buffer_cache;
      42             : 
      43             : #ifdef CONFIG_BTRFS_DEBUG
      44             : static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)
      45             : {
      46             :         struct btrfs_fs_info *fs_info = eb->fs_info;
      47             :         unsigned long flags;
      48             : 
      49             :         spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
      50             :         list_add(&eb->leak_list, &fs_info->allocated_ebs);
      51             :         spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
      52             : }
      53             : 
      54             : static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb)
      55             : {
      56             :         struct btrfs_fs_info *fs_info = eb->fs_info;
      57             :         unsigned long flags;
      58             : 
      59             :         spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
      60             :         list_del(&eb->leak_list);
      61             :         spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
      62             : }
      63             : 
      64             : void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
      65             : {
      66             :         struct extent_buffer *eb;
      67             :         unsigned long flags;
      68             : 
      69             :         /*
      70             :          * If we didn't get into open_ctree our allocated_ebs will not be
      71             :          * initialized, so just skip this.
      72             :          */
      73             :         if (!fs_info->allocated_ebs.next)
      74             :                 return;
      75             : 
      76             :         WARN_ON(!list_empty(&fs_info->allocated_ebs));
      77             :         spin_lock_irqsave(&fs_info->eb_leak_lock, flags);
      78             :         while (!list_empty(&fs_info->allocated_ebs)) {
      79             :                 eb = list_first_entry(&fs_info->allocated_ebs,
      80             :                                       struct extent_buffer, leak_list);
      81             :                 pr_err(
      82             :         "BTRFS: buffer leak start %llu len %lu refs %d bflags %lu owner %llu\n",
      83             :                        eb->start, eb->len, atomic_read(&eb->refs), eb->bflags,
      84             :                        btrfs_header_owner(eb));
      85             :                 list_del(&eb->leak_list);
      86             :                 kmem_cache_free(extent_buffer_cache, eb);
      87             :         }
      88             :         spin_unlock_irqrestore(&fs_info->eb_leak_lock, flags);
      89             : }
      90             : #else
      91             : #define btrfs_leak_debug_add_eb(eb)                     do {} while (0)
      92             : #define btrfs_leak_debug_del_eb(eb)                     do {} while (0)
      93             : #endif
      94             : 
      95             : /*
      96             :  * Structure to record info about the bio being assembled, and other info like
      97             :  * how many bytes are there before stripe/ordered extent boundary.
      98             :  */
      99             : struct btrfs_bio_ctrl {
     100             :         struct btrfs_bio *bbio;
     101             :         enum btrfs_compression_type compress_type;
     102             :         u32 len_to_oe_boundary;
     103             :         blk_opf_t opf;
     104             :         btrfs_bio_end_io_t end_io_func;
     105             :         struct writeback_control *wbc;
     106             : };
     107             : 
     108           0 : static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
     109             : {
     110           0 :         struct btrfs_bio *bbio = bio_ctrl->bbio;
     111             : 
     112           0 :         if (!bbio)
     113             :                 return;
     114             : 
     115             :         /* Caller should ensure the bio has at least some range added */
     116           0 :         ASSERT(bbio->bio.bi_iter.bi_size);
     117             : 
     118           0 :         if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
     119           0 :             bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
     120           0 :                 btrfs_submit_compressed_read(bbio);
     121             :         else
     122           0 :                 btrfs_submit_bio(bbio, 0);
     123             : 
     124             :         /* The bbio is owned by the end_io handler now */
     125           0 :         bio_ctrl->bbio = NULL;
     126             : }
     127             : 
     128             : /*
     129             :  * Submit or fail the current bio in the bio_ctrl structure.
     130             :  */
     131           0 : static void submit_write_bio(struct btrfs_bio_ctrl *bio_ctrl, int ret)
     132             : {
     133           0 :         struct btrfs_bio *bbio = bio_ctrl->bbio;
     134             : 
     135           0 :         if (!bbio)
     136             :                 return;
     137             : 
     138           0 :         if (ret) {
     139           0 :                 ASSERT(ret < 0);
     140           0 :                 btrfs_bio_end_io(bbio, errno_to_blk_status(ret));
     141             :                 /* The bio is owned by the end_io handler now */
     142           0 :                 bio_ctrl->bbio = NULL;
     143             :         } else {
     144           0 :                 submit_one_bio(bio_ctrl);
     145             :         }
     146             : }
     147             : 
     148           2 : int __init extent_buffer_init_cachep(void)
     149             : {
     150           2 :         extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
     151             :                         sizeof(struct extent_buffer), 0,
     152             :                         SLAB_MEM_SPREAD, NULL);
     153           2 :         if (!extent_buffer_cache)
     154           0 :                 return -ENOMEM;
     155             : 
     156             :         return 0;
     157             : }
     158             : 
     159           0 : void __cold extent_buffer_free_cachep(void)
     160             : {
     161             :         /*
     162             :          * Make sure all delayed rcu free are flushed before we
     163             :          * destroy caches.
     164             :          */
     165           0 :         rcu_barrier();
     166           0 :         kmem_cache_destroy(extent_buffer_cache);
     167           0 : }
     168             : 
     169           0 : void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end)
     170             : {
     171           0 :         unsigned long index = start >> PAGE_SHIFT;
     172           0 :         unsigned long end_index = end >> PAGE_SHIFT;
     173           0 :         struct page *page;
     174             : 
     175           0 :         while (index <= end_index) {
     176           0 :                 page = find_get_page(inode->i_mapping, index);
     177           0 :                 BUG_ON(!page); /* Pages should be in the extent_io_tree */
     178           0 :                 clear_page_dirty_for_io(page);
     179           0 :                 put_page(page);
     180           0 :                 index++;
     181             :         }
     182           0 : }
     183             : 
     184           0 : void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end)
     185             : {
     186           0 :         struct address_space *mapping = inode->i_mapping;
     187           0 :         unsigned long index = start >> PAGE_SHIFT;
     188           0 :         unsigned long end_index = end >> PAGE_SHIFT;
     189           0 :         struct folio *folio;
     190             : 
     191           0 :         while (index <= end_index) {
     192           0 :                 folio = filemap_get_folio(mapping, index);
     193           0 :                 filemap_dirty_folio(mapping, folio);
     194           0 :                 folio_account_redirty(folio);
     195           0 :                 index += folio_nr_pages(folio);
     196           0 :                 folio_put(folio);
     197             :         }
     198           0 : }
     199             : 
     200             : /*
     201             :  * Process one page for __process_pages_contig().
     202             :  *
     203             :  * Return >0 if we hit @page == @locked_page.
     204             :  * Return 0 if we updated the page status.
     205             :  * Return -EGAIN if the we need to try again.
     206             :  * (For PAGE_LOCK case but got dirty page or page not belong to mapping)
     207             :  */
     208           0 : static int process_one_page(struct btrfs_fs_info *fs_info,
     209             :                             struct address_space *mapping,
     210             :                             struct page *page, struct page *locked_page,
     211             :                             unsigned long page_ops, u64 start, u64 end)
     212             : {
     213           0 :         u32 len;
     214             : 
     215           0 :         ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX);
     216           0 :         len = end + 1 - start;
     217             : 
     218           0 :         if (page_ops & PAGE_SET_ORDERED)
     219           0 :                 btrfs_page_clamp_set_ordered(fs_info, page, start, len);
     220           0 :         if (page_ops & PAGE_START_WRITEBACK) {
     221           0 :                 btrfs_page_clamp_clear_dirty(fs_info, page, start, len);
     222           0 :                 btrfs_page_clamp_set_writeback(fs_info, page, start, len);
     223             :         }
     224           0 :         if (page_ops & PAGE_END_WRITEBACK)
     225           0 :                 btrfs_page_clamp_clear_writeback(fs_info, page, start, len);
     226             : 
     227           0 :         if (page == locked_page)
     228             :                 return 1;
     229             : 
     230           0 :         if (page_ops & PAGE_LOCK) {
     231           0 :                 int ret;
     232             : 
     233           0 :                 ret = btrfs_page_start_writer_lock(fs_info, page, start, len);
     234           0 :                 if (ret)
     235             :                         return ret;
     236           0 :                 if (!PageDirty(page) || page->mapping != mapping) {
     237           0 :                         btrfs_page_end_writer_lock(fs_info, page, start, len);
     238           0 :                         return -EAGAIN;
     239             :                 }
     240             :         }
     241           0 :         if (page_ops & PAGE_UNLOCK)
     242           0 :                 btrfs_page_end_writer_lock(fs_info, page, start, len);
     243             :         return 0;
     244             : }
     245             : 
     246           0 : static int __process_pages_contig(struct address_space *mapping,
     247             :                                   struct page *locked_page,
     248             :                                   u64 start, u64 end, unsigned long page_ops,
     249             :                                   u64 *processed_end)
     250             : {
     251           0 :         struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
     252           0 :         pgoff_t start_index = start >> PAGE_SHIFT;
     253           0 :         pgoff_t end_index = end >> PAGE_SHIFT;
     254           0 :         pgoff_t index = start_index;
     255           0 :         unsigned long pages_processed = 0;
     256           0 :         struct folio_batch fbatch;
     257           0 :         int err = 0;
     258           0 :         int i;
     259             : 
     260           0 :         if (page_ops & PAGE_LOCK) {
     261             :                 ASSERT(page_ops == PAGE_LOCK);
     262             :                 ASSERT(processed_end && *processed_end == start);
     263             :         }
     264             : 
     265           0 :         folio_batch_init(&fbatch);
     266           0 :         while (index <= end_index) {
     267           0 :                 int found_folios;
     268             : 
     269           0 :                 found_folios = filemap_get_folios_contig(mapping, &index,
     270             :                                 end_index, &fbatch);
     271             : 
     272           0 :                 if (found_folios == 0) {
     273             :                         /*
     274             :                          * Only if we're going to lock these pages, we can find
     275             :                          * nothing at @index.
     276             :                          */
     277           0 :                         ASSERT(page_ops & PAGE_LOCK);
     278           0 :                         err = -EAGAIN;
     279           0 :                         goto out;
     280             :                 }
     281             : 
     282           0 :                 for (i = 0; i < found_folios; i++) {
     283           0 :                         int process_ret;
     284           0 :                         struct folio *folio = fbatch.folios[i];
     285           0 :                         process_ret = process_one_page(fs_info, mapping,
     286             :                                         &folio->page, locked_page, page_ops,
     287             :                                         start, end);
     288           0 :                         if (process_ret < 0) {
     289           0 :                                 err = -EAGAIN;
     290           0 :                                 folio_batch_release(&fbatch);
     291           0 :                                 goto out;
     292             :                         }
     293           0 :                         pages_processed += folio_nr_pages(folio);
     294             :                 }
     295           0 :                 folio_batch_release(&fbatch);
     296           0 :                 cond_resched();
     297             :         }
     298           0 : out:
     299           0 :         if (err && processed_end) {
     300             :                 /*
     301             :                  * Update @processed_end. I know this is awful since it has
     302             :                  * two different return value patterns (inclusive vs exclusive).
     303             :                  *
     304             :                  * But the exclusive pattern is necessary if @start is 0, or we
     305             :                  * underflow and check against processed_end won't work as
     306             :                  * expected.
     307             :                  */
     308           0 :                 if (pages_processed)
     309           0 :                         *processed_end = min(end,
     310             :                         ((u64)(start_index + pages_processed) << PAGE_SHIFT) - 1);
     311             :                 else
     312           0 :                         *processed_end = start;
     313             :         }
     314           0 :         return err;
     315             : }
     316             : 
     317           0 : static noinline void __unlock_for_delalloc(struct inode *inode,
     318             :                                            struct page *locked_page,
     319             :                                            u64 start, u64 end)
     320             : {
     321           0 :         unsigned long index = start >> PAGE_SHIFT;
     322           0 :         unsigned long end_index = end >> PAGE_SHIFT;
     323             : 
     324           0 :         ASSERT(locked_page);
     325           0 :         if (index == locked_page->index && end_index == index)
     326             :                 return;
     327             : 
     328           0 :         __process_pages_contig(inode->i_mapping, locked_page, start, end,
     329             :                                PAGE_UNLOCK, NULL);
     330             : }
     331             : 
     332           0 : static noinline int lock_delalloc_pages(struct inode *inode,
     333             :                                         struct page *locked_page,
     334             :                                         u64 delalloc_start,
     335             :                                         u64 delalloc_end)
     336             : {
     337           0 :         unsigned long index = delalloc_start >> PAGE_SHIFT;
     338           0 :         unsigned long end_index = delalloc_end >> PAGE_SHIFT;
     339           0 :         u64 processed_end = delalloc_start;
     340           0 :         int ret;
     341             : 
     342           0 :         ASSERT(locked_page);
     343           0 :         if (index == locked_page->index && index == end_index)
     344             :                 return 0;
     345             : 
     346           0 :         ret = __process_pages_contig(inode->i_mapping, locked_page, delalloc_start,
     347             :                                      delalloc_end, PAGE_LOCK, &processed_end);
     348           0 :         if (ret == -EAGAIN && processed_end > delalloc_start)
     349           0 :                 __unlock_for_delalloc(inode, locked_page, delalloc_start,
     350             :                                       processed_end);
     351             :         return ret;
     352             : }
     353             : 
     354             : /*
     355             :  * Find and lock a contiguous range of bytes in the file marked as delalloc, no
     356             :  * more than @max_bytes.
     357             :  *
     358             :  * @start:      The original start bytenr to search.
     359             :  *              Will store the extent range start bytenr.
     360             :  * @end:        The original end bytenr of the search range
     361             :  *              Will store the extent range end bytenr.
     362             :  *
     363             :  * Return true if we find a delalloc range which starts inside the original
     364             :  * range, and @start/@end will store the delalloc range start/end.
     365             :  *
     366             :  * Return false if we can't find any delalloc range which starts inside the
     367             :  * original range, and @start/@end will be the non-delalloc range start/end.
     368             :  */
     369             : EXPORT_FOR_TESTS
     370           0 : noinline_for_stack bool find_lock_delalloc_range(struct inode *inode,
     371             :                                     struct page *locked_page, u64 *start,
     372             :                                     u64 *end)
     373             : {
     374           0 :         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
     375           0 :         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
     376           0 :         const u64 orig_start = *start;
     377           0 :         const u64 orig_end = *end;
     378             :         /* The sanity tests may not set a valid fs_info. */
     379           0 :         u64 max_bytes = fs_info ? fs_info->max_extent_size : BTRFS_MAX_EXTENT_SIZE;
     380           0 :         u64 delalloc_start;
     381           0 :         u64 delalloc_end;
     382           0 :         bool found;
     383           0 :         struct extent_state *cached_state = NULL;
     384           0 :         int ret;
     385           0 :         int loops = 0;
     386             : 
     387             :         /* Caller should pass a valid @end to indicate the search range end */
     388           0 :         ASSERT(orig_end > orig_start);
     389             : 
     390             :         /* The range should at least cover part of the page */
     391           0 :         ASSERT(!(orig_start >= page_offset(locked_page) + PAGE_SIZE ||
     392             :                  orig_end <= page_offset(locked_page)));
     393             : again:
     394             :         /* step one, find a bunch of delalloc bytes starting at start */
     395           0 :         delalloc_start = *start;
     396           0 :         delalloc_end = 0;
     397           0 :         found = btrfs_find_delalloc_range(tree, &delalloc_start, &delalloc_end,
     398             :                                           max_bytes, &cached_state);
     399           0 :         if (!found || delalloc_end <= *start || delalloc_start > orig_end) {
     400           0 :                 *start = delalloc_start;
     401             : 
     402             :                 /* @delalloc_end can be -1, never go beyond @orig_end */
     403           0 :                 *end = min(delalloc_end, orig_end);
     404           0 :                 free_extent_state(cached_state);
     405           0 :                 return false;
     406             :         }
     407             : 
     408             :         /*
     409             :          * start comes from the offset of locked_page.  We have to lock
     410             :          * pages in order, so we can't process delalloc bytes before
     411             :          * locked_page
     412             :          */
     413           0 :         if (delalloc_start < *start)
     414           0 :                 delalloc_start = *start;
     415             : 
     416             :         /*
     417             :          * make sure to limit the number of pages we try to lock down
     418             :          */
     419           0 :         if (delalloc_end + 1 - delalloc_start > max_bytes)
     420           0 :                 delalloc_end = delalloc_start + max_bytes - 1;
     421             : 
     422             :         /* step two, lock all the pages after the page that has start */
     423           0 :         ret = lock_delalloc_pages(inode, locked_page,
     424             :                                   delalloc_start, delalloc_end);
     425           0 :         ASSERT(!ret || ret == -EAGAIN);
     426           0 :         if (ret == -EAGAIN) {
     427             :                 /* some of the pages are gone, lets avoid looping by
     428             :                  * shortening the size of the delalloc range we're searching
     429             :                  */
     430           0 :                 free_extent_state(cached_state);
     431           0 :                 cached_state = NULL;
     432           0 :                 if (!loops) {
     433           0 :                         max_bytes = PAGE_SIZE;
     434           0 :                         loops = 1;
     435           0 :                         goto again;
     436             :                 } else {
     437           0 :                         found = false;
     438           0 :                         goto out_failed;
     439             :                 }
     440             :         }
     441             : 
     442             :         /* step three, lock the state bits for the whole range */
     443           0 :         lock_extent(tree, delalloc_start, delalloc_end, &cached_state);
     444             : 
     445             :         /* then test to make sure it is all still delalloc */
     446           0 :         ret = test_range_bit(tree, delalloc_start, delalloc_end,
     447             :                              EXTENT_DELALLOC, 1, cached_state);
     448           0 :         if (!ret) {
     449           0 :                 unlock_extent(tree, delalloc_start, delalloc_end,
     450             :                               &cached_state);
     451           0 :                 __unlock_for_delalloc(inode, locked_page,
     452             :                               delalloc_start, delalloc_end);
     453           0 :                 cond_resched();
     454           0 :                 goto again;
     455             :         }
     456           0 :         free_extent_state(cached_state);
     457           0 :         *start = delalloc_start;
     458           0 :         *end = delalloc_end;
     459             : out_failed:
     460             :         return found;
     461             : }
     462             : 
     463           0 : void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
     464             :                                   struct page *locked_page,
     465             :                                   u32 clear_bits, unsigned long page_ops)
     466             : {
     467           0 :         clear_extent_bit(&inode->io_tree, start, end, clear_bits, NULL);
     468             : 
     469           0 :         __process_pages_contig(inode->vfs_inode.i_mapping, locked_page,
     470             :                                start, end, page_ops, NULL);
     471           0 : }
     472             : 
     473             : static bool btrfs_verify_page(struct page *page, u64 start)
     474             : {
     475             :         if (!fsverity_active(page->mapping->host) ||
     476             :             PageUptodate(page) ||
     477             :             start >= i_size_read(page->mapping->host))
     478             :                 return true;
     479             :         return fsverity_verify_page(page);
     480             : }
     481             : 
     482           0 : static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len)
     483             : {
     484           0 :         struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
     485             : 
     486           0 :         ASSERT(page_offset(page) <= start &&
     487             :                start + len <= page_offset(page) + PAGE_SIZE);
     488             : 
     489           0 :         if (uptodate && btrfs_verify_page(page, start))
     490           0 :                 btrfs_page_set_uptodate(fs_info, page, start, len);
     491             :         else
     492           0 :                 btrfs_page_clear_uptodate(fs_info, page, start, len);
     493             : 
     494           0 :         if (!btrfs_is_subpage(fs_info, page))
     495           0 :                 unlock_page(page);
     496             :         else
     497           0 :                 btrfs_subpage_end_reader(fs_info, page, start, len);
     498           0 : }
     499             : 
     500             : /* lots and lots of room for performance fixes in the end_bio funcs */
     501             : 
     502           0 : void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
     503             : {
     504           0 :         struct btrfs_inode *inode;
     505           0 :         const bool uptodate = (err == 0);
     506           0 :         int ret = 0;
     507             : 
     508           0 :         ASSERT(page && page->mapping);
     509           0 :         inode = BTRFS_I(page->mapping->host);
     510           0 :         btrfs_writepage_endio_finish_ordered(inode, page, start, end, uptodate);
     511             : 
     512           0 :         if (!uptodate) {
     513           0 :                 const struct btrfs_fs_info *fs_info = inode->root->fs_info;
     514           0 :                 u32 len;
     515             : 
     516           0 :                 ASSERT(end + 1 - start <= U32_MAX);
     517           0 :                 len = end + 1 - start;
     518             : 
     519           0 :                 btrfs_page_clear_uptodate(fs_info, page, start, len);
     520           0 :                 ret = err < 0 ? err : -EIO;
     521           0 :                 mapping_set_error(page->mapping, ret);
     522             :         }
     523           0 : }
     524             : 
     525             : /*
     526             :  * after a writepage IO is done, we need to:
     527             :  * clear the uptodate bits on error
     528             :  * clear the writeback bits in the extent tree for this IO
     529             :  * end_page_writeback if the page has no more pending IO
     530             :  *
     531             :  * Scheduling is not allowed, so the extent state tree is expected
     532             :  * to have one and only one object corresponding to this IO.
     533             :  */
     534           0 : static void end_bio_extent_writepage(struct btrfs_bio *bbio)
     535             : {
     536           0 :         struct bio *bio = &bbio->bio;
     537           0 :         int error = blk_status_to_errno(bio->bi_status);
     538           0 :         struct bio_vec *bvec;
     539           0 :         struct bvec_iter_all iter_all;
     540             : 
     541           0 :         ASSERT(!bio_flagged(bio, BIO_CLONED));
     542           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
     543           0 :                 struct page *page = bvec->bv_page;
     544           0 :                 struct inode *inode = page->mapping->host;
     545           0 :                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
     546           0 :                 const u32 sectorsize = fs_info->sectorsize;
     547           0 :                 u64 start = page_offset(page) + bvec->bv_offset;
     548           0 :                 u32 len = bvec->bv_len;
     549             : 
     550             :                 /* Our read/write should always be sector aligned. */
     551           0 :                 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
     552           0 :                         btrfs_err(fs_info,
     553             :                 "partial page write in btrfs with offset %u and length %u",
     554             :                                   bvec->bv_offset, bvec->bv_len);
     555           0 :                 else if (!IS_ALIGNED(bvec->bv_len, sectorsize))
     556           0 :                         btrfs_info(fs_info,
     557             :                 "incomplete page write with offset %u and length %u",
     558             :                                    bvec->bv_offset, bvec->bv_len);
     559             : 
     560           0 :                 btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error);
     561           0 :                 if (error) {
     562           0 :                         btrfs_page_clear_uptodate(fs_info, page, start, len);
     563           0 :                         mapping_set_error(page->mapping, error);
     564             :                 }
     565           0 :                 btrfs_page_clear_writeback(fs_info, page, start, len);
     566             :         }
     567             : 
     568           0 :         bio_put(bio);
     569           0 : }
     570             : 
     571             : /*
     572             :  * Record previously processed extent range
     573             :  *
     574             :  * For endio_readpage_release_extent() to handle a full extent range, reducing
     575             :  * the extent io operations.
     576             :  */
     577             : struct processed_extent {
     578             :         struct btrfs_inode *inode;
     579             :         /* Start of the range in @inode */
     580             :         u64 start;
     581             :         /* End of the range in @inode */
     582             :         u64 end;
     583             :         bool uptodate;
     584             : };
     585             : 
     586             : /*
     587             :  * Try to release processed extent range
     588             :  *
     589             :  * May not release the extent range right now if the current range is
     590             :  * contiguous to processed extent.
     591             :  *
     592             :  * Will release processed extent when any of @inode, @uptodate, the range is
     593             :  * no longer contiguous to the processed range.
     594             :  *
     595             :  * Passing @inode == NULL will force processed extent to be released.
     596             :  */
     597           0 : static void endio_readpage_release_extent(struct processed_extent *processed,
     598             :                               struct btrfs_inode *inode, u64 start, u64 end,
     599             :                               bool uptodate)
     600             : {
     601           0 :         struct extent_state *cached = NULL;
     602           0 :         struct extent_io_tree *tree;
     603             : 
     604             :         /* The first extent, initialize @processed */
     605           0 :         if (!processed->inode)
     606           0 :                 goto update;
     607             : 
     608             :         /*
     609             :          * Contiguous to processed extent, just uptodate the end.
     610             :          *
     611             :          * Several things to notice:
     612             :          *
     613             :          * - bio can be merged as long as on-disk bytenr is contiguous
     614             :          *   This means we can have page belonging to other inodes, thus need to
     615             :          *   check if the inode still matches.
     616             :          * - bvec can contain range beyond current page for multi-page bvec
     617             :          *   Thus we need to do processed->end + 1 >= start check
     618             :          */
     619           0 :         if (processed->inode == inode && processed->uptodate == uptodate &&
     620           0 :             processed->end + 1 >= start && end >= processed->end) {
     621           0 :                 processed->end = end;
     622           0 :                 return;
     623             :         }
     624             : 
     625           0 :         tree = &processed->inode->io_tree;
     626             :         /*
     627             :          * Now we don't have range contiguous to the processed range, release
     628             :          * the processed range now.
     629             :          */
     630           0 :         unlock_extent(tree, processed->start, processed->end, &cached);
     631             : 
     632           0 : update:
     633             :         /* Update processed to current range */
     634           0 :         processed->inode = inode;
     635           0 :         processed->start = start;
     636           0 :         processed->end = end;
     637           0 :         processed->uptodate = uptodate;
     638             : }
     639             : 
     640           0 : static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page)
     641             : {
     642           0 :         ASSERT(PageLocked(page));
     643           0 :         if (!btrfs_is_subpage(fs_info, page))
     644             :                 return;
     645             : 
     646           0 :         ASSERT(PagePrivate(page));
     647           0 :         btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE);
     648             : }
     649             : 
     650             : /*
     651             :  * after a readpage IO is done, we need to:
     652             :  * clear the uptodate bits on error
     653             :  * set the uptodate bits if things worked
     654             :  * set the page up to date if all extents in the tree are uptodate
     655             :  * clear the lock bit in the extent tree
     656             :  * unlock the page if there are no other extents locked for it
     657             :  *
     658             :  * Scheduling is not allowed, so the extent state tree is expected
     659             :  * to have one and only one object corresponding to this IO.
     660             :  */
     661           0 : static void end_bio_extent_readpage(struct btrfs_bio *bbio)
     662             : {
     663           0 :         struct bio *bio = &bbio->bio;
     664           0 :         struct bio_vec *bvec;
     665           0 :         struct processed_extent processed = { 0 };
     666             :         /*
     667             :          * The offset to the beginning of a bio, since one bio can never be
     668             :          * larger than UINT_MAX, u32 here is enough.
     669             :          */
     670           0 :         u32 bio_offset = 0;
     671           0 :         struct bvec_iter_all iter_all;
     672             : 
     673           0 :         ASSERT(!bio_flagged(bio, BIO_CLONED));
     674           0 :         bio_for_each_segment_all(bvec, bio, iter_all) {
     675           0 :                 bool uptodate = !bio->bi_status;
     676           0 :                 struct page *page = bvec->bv_page;
     677           0 :                 struct inode *inode = page->mapping->host;
     678           0 :                 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
     679           0 :                 const u32 sectorsize = fs_info->sectorsize;
     680           0 :                 u64 start;
     681           0 :                 u64 end;
     682           0 :                 u32 len;
     683             : 
     684           0 :                 btrfs_debug(fs_info,
     685             :                         "end_bio_extent_readpage: bi_sector=%llu, err=%d, mirror=%u",
     686             :                         bio->bi_iter.bi_sector, bio->bi_status,
     687             :                         bbio->mirror_num);
     688             : 
     689             :                 /*
     690             :                  * We always issue full-sector reads, but if some block in a
     691             :                  * page fails to read, blk_update_request() will advance
     692             :                  * bv_offset and adjust bv_len to compensate.  Print a warning
     693             :                  * for unaligned offsets, and an error if they don't add up to
     694             :                  * a full sector.
     695             :                  */
     696           0 :                 if (!IS_ALIGNED(bvec->bv_offset, sectorsize))
     697           0 :                         btrfs_err(fs_info,
     698             :                 "partial page read in btrfs with offset %u and length %u",
     699             :                                   bvec->bv_offset, bvec->bv_len);
     700           0 :                 else if (!IS_ALIGNED(bvec->bv_offset + bvec->bv_len,
     701             :                                      sectorsize))
     702           0 :                         btrfs_info(fs_info,
     703             :                 "incomplete page read with offset %u and length %u",
     704             :                                    bvec->bv_offset, bvec->bv_len);
     705             : 
     706           0 :                 start = page_offset(page) + bvec->bv_offset;
     707           0 :                 end = start + bvec->bv_len - 1;
     708           0 :                 len = bvec->bv_len;
     709             : 
     710           0 :                 if (likely(uptodate)) {
     711           0 :                         loff_t i_size = i_size_read(inode);
     712           0 :                         pgoff_t end_index = i_size >> PAGE_SHIFT;
     713             : 
     714             :                         /*
     715             :                          * Zero out the remaining part if this range straddles
     716             :                          * i_size.
     717             :                          *
     718             :                          * Here we should only zero the range inside the bvec,
     719             :                          * not touch anything else.
     720             :                          *
     721             :                          * NOTE: i_size is exclusive while end is inclusive.
     722             :                          */
     723           0 :                         if (page->index == end_index && i_size <= end) {
     724           0 :                                 u32 zero_start = max(offset_in_page(i_size),
     725             :                                                      offset_in_page(start));
     726             : 
     727           0 :                                 zero_user_segment(page, zero_start,
     728           0 :                                                   offset_in_page(end) + 1);
     729             :                         }
     730             :                 }
     731             : 
     732             :                 /* Update page status and unlock. */
     733           0 :                 end_page_read(page, uptodate, start, len);
     734           0 :                 endio_readpage_release_extent(&processed, BTRFS_I(inode),
     735             :                                               start, end, uptodate);
     736             : 
     737           0 :                 ASSERT(bio_offset + len > bio_offset);
     738           0 :                 bio_offset += len;
     739             : 
     740             :         }
     741             :         /* Release the last extent */
     742           0 :         endio_readpage_release_extent(&processed, NULL, 0, 0, false);
     743           0 :         bio_put(bio);
     744           0 : }
     745             : 
     746             : /*
     747             :  * Populate every free slot in a provided array with pages.
     748             :  *
     749             :  * @nr_pages:   number of pages to allocate
     750             :  * @page_array: the array to fill with pages; any existing non-null entries in
     751             :  *              the array will be skipped
     752             :  *
     753             :  * Return: 0        if all pages were able to be allocated;
     754             :  *         -ENOMEM  otherwise, and the caller is responsible for freeing all
     755             :  *                  non-null page pointers in the array.
     756             :  */
     757           0 : int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array)
     758             : {
     759           0 :         unsigned int allocated;
     760             : 
     761           0 :         for (allocated = 0; allocated < nr_pages;) {
     762           0 :                 unsigned int last = allocated;
     763             : 
     764           0 :                 allocated = alloc_pages_bulk_array(GFP_NOFS, nr_pages, page_array);
     765             : 
     766           0 :                 if (allocated == nr_pages)
     767             :                         return 0;
     768             : 
     769             :                 /*
     770             :                  * During this iteration, no page could be allocated, even
     771             :                  * though alloc_pages_bulk_array() falls back to alloc_page()
     772             :                  * if  it could not bulk-allocate. So we must be out of memory.
     773             :                  */
     774           0 :                 if (allocated == last)
     775             :                         return -ENOMEM;
     776             : 
     777           0 :                 memalloc_retry_wait(GFP_NOFS);
     778             :         }
     779             :         return 0;
     780             : }
     781             : 
     782           0 : static bool btrfs_bio_is_contig(struct btrfs_bio_ctrl *bio_ctrl,
     783             :                                 struct page *page, u64 disk_bytenr,
     784             :                                 unsigned int pg_offset)
     785             : {
     786           0 :         struct bio *bio = &bio_ctrl->bbio->bio;
     787           0 :         struct bio_vec *bvec = bio_last_bvec_all(bio);
     788           0 :         const sector_t sector = disk_bytenr >> SECTOR_SHIFT;
     789             : 
     790           0 :         if (bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) {
     791             :                 /*
     792             :                  * For compression, all IO should have its logical bytenr set
     793             :                  * to the starting bytenr of the compressed extent.
     794             :                  */
     795           0 :                 return bio->bi_iter.bi_sector == sector;
     796             :         }
     797             : 
     798             :         /*
     799             :          * The contig check requires the following conditions to be met:
     800             :          *
     801             :          * 1) The pages are belonging to the same inode
     802             :          *    This is implied by the call chain.
     803             :          *
     804             :          * 2) The range has adjacent logical bytenr
     805             :          *
     806             :          * 3) The range has adjacent file offset
     807             :          *    This is required for the usage of btrfs_bio->file_offset.
     808             :          */
     809           0 :         return bio_end_sector(bio) == sector &&
     810           0 :                 page_offset(bvec->bv_page) + bvec->bv_offset + bvec->bv_len ==
     811           0 :                 page_offset(page) + pg_offset;
     812             : }
     813             : 
     814           0 : static void alloc_new_bio(struct btrfs_inode *inode,
     815             :                           struct btrfs_bio_ctrl *bio_ctrl,
     816             :                           u64 disk_bytenr, u64 file_offset)
     817             : {
     818           0 :         struct btrfs_fs_info *fs_info = inode->root->fs_info;
     819           0 :         struct btrfs_bio *bbio;
     820             : 
     821           0 :         bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, fs_info,
     822             :                                bio_ctrl->end_io_func, NULL);
     823           0 :         bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
     824           0 :         bbio->inode = inode;
     825           0 :         bbio->file_offset = file_offset;
     826           0 :         bio_ctrl->bbio = bbio;
     827           0 :         bio_ctrl->len_to_oe_boundary = U32_MAX;
     828             : 
     829             :         /* Limit data write bios to the ordered boundary. */
     830           0 :         if (bio_ctrl->wbc) {
     831           0 :                 struct btrfs_ordered_extent *ordered;
     832             : 
     833           0 :                 ordered = btrfs_lookup_ordered_extent(inode, file_offset);
     834           0 :                 if (ordered) {
     835           0 :                         bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX,
     836             :                                         ordered->file_offset +
     837             :                                         ordered->disk_num_bytes - file_offset);
     838           0 :                         bbio->ordered = ordered;
     839             :                 }
     840             : 
     841             :                 /*
     842             :                  * Pick the last added device to support cgroup writeback.  For
     843             :                  * multi-device file systems this means blk-cgroup policies have
     844             :                  * to always be set on the last added/replaced device.
     845             :                  * This is a bit odd but has been like that for a long time.
     846             :                  */
     847           0 :                 bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
     848           0 :                 wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
     849             :         }
     850           0 : }
     851             : 
     852             : /*
     853             :  * @disk_bytenr: logical bytenr where the write will be
     854             :  * @page:       page to add to the bio
     855             :  * @size:       portion of page that we want to write to
     856             :  * @pg_offset:  offset of the new bio or to check whether we are adding
     857             :  *              a contiguous page to the previous one
     858             :  *
     859             :  * The will either add the page into the existing @bio_ctrl->bbio, or allocate a
     860             :  * new one in @bio_ctrl->bbio.
     861             :  * The mirror number for this IO should already be initizlied in
     862             :  * @bio_ctrl->mirror_num.
     863             :  */
     864           0 : static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
     865             :                                u64 disk_bytenr, struct page *page,
     866             :                                size_t size, unsigned long pg_offset)
     867             : {
     868           0 :         struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
     869             : 
     870           0 :         ASSERT(pg_offset + size <= PAGE_SIZE);
     871           0 :         ASSERT(bio_ctrl->end_io_func);
     872             : 
     873           0 :         if (bio_ctrl->bbio &&
     874           0 :             !btrfs_bio_is_contig(bio_ctrl, page, disk_bytenr, pg_offset))
     875           0 :                 submit_one_bio(bio_ctrl);
     876             : 
     877           0 :         do {
     878           0 :                 u32 len = size;
     879             : 
     880             :                 /* Allocate new bio if needed */
     881           0 :                 if (!bio_ctrl->bbio) {
     882           0 :                         alloc_new_bio(inode, bio_ctrl, disk_bytenr,
     883           0 :                                       page_offset(page) + pg_offset);
     884             :                 }
     885             : 
     886             :                 /* Cap to the current ordered extent boundary if there is one. */
     887           0 :                 if (len > bio_ctrl->len_to_oe_boundary) {
     888             :                         ASSERT(bio_ctrl->compress_type == BTRFS_COMPRESS_NONE);
     889             :                         ASSERT(is_data_inode(&inode->vfs_inode));
     890             :                         len = bio_ctrl->len_to_oe_boundary;
     891             :                 }
     892             : 
     893           0 :                 if (bio_add_page(&bio_ctrl->bbio->bio, page, len, pg_offset) != len) {
     894             :                         /* bio full: move on to a new one */
     895           0 :                         submit_one_bio(bio_ctrl);
     896           0 :                         continue;
     897             :                 }
     898             : 
     899           0 :                 if (bio_ctrl->wbc)
     900           0 :                         wbc_account_cgroup_owner(bio_ctrl->wbc, page, len);
     901             : 
     902           0 :                 size -= len;
     903           0 :                 pg_offset += len;
     904           0 :                 disk_bytenr += len;
     905           0 :                 bio_ctrl->len_to_oe_boundary -= len;
     906             : 
     907             :                 /* Ordered extent boundary: move on to a new bio. */
     908           0 :                 if (bio_ctrl->len_to_oe_boundary == 0)
     909           0 :                         submit_one_bio(bio_ctrl);
     910           0 :         } while (size);
     911           0 : }
     912             : 
     913           0 : static int attach_extent_buffer_page(struct extent_buffer *eb,
     914             :                                      struct page *page,
     915             :                                      struct btrfs_subpage *prealloc)
     916             : {
     917           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
     918           0 :         int ret = 0;
     919             : 
     920             :         /*
     921             :          * If the page is mapped to btree inode, we should hold the private
     922             :          * lock to prevent race.
     923             :          * For cloned or dummy extent buffers, their pages are not mapped and
     924             :          * will not race with any other ebs.
     925             :          */
     926           0 :         if (page->mapping)
     927           0 :                 lockdep_assert_held(&page->mapping->private_lock);
     928             : 
     929           0 :         if (fs_info->nodesize >= PAGE_SIZE) {
     930           0 :                 if (!PagePrivate(page))
     931           0 :                         attach_page_private(page, eb);
     932             :                 else
     933           0 :                         WARN_ON(page->private != (unsigned long)eb);
     934           0 :                 return 0;
     935             :         }
     936             : 
     937             :         /* Already mapped, just free prealloc */
     938           0 :         if (PagePrivate(page)) {
     939           0 :                 btrfs_free_subpage(prealloc);
     940           0 :                 return 0;
     941             :         }
     942             : 
     943           0 :         if (prealloc)
     944             :                 /* Has preallocated memory for subpage */
     945           0 :                 attach_page_private(page, prealloc);
     946             :         else
     947             :                 /* Do new allocation to attach subpage */
     948           0 :                 ret = btrfs_attach_subpage(fs_info, page,
     949             :                                            BTRFS_SUBPAGE_METADATA);
     950             :         return ret;
     951             : }
     952             : 
     953           0 : int set_page_extent_mapped(struct page *page)
     954             : {
     955           0 :         struct btrfs_fs_info *fs_info;
     956             : 
     957           0 :         ASSERT(page->mapping);
     958             : 
     959           0 :         if (PagePrivate(page))
     960             :                 return 0;
     961             : 
     962           0 :         fs_info = btrfs_sb(page->mapping->host->i_sb);
     963             : 
     964           0 :         if (btrfs_is_subpage(fs_info, page))
     965           0 :                 return btrfs_attach_subpage(fs_info, page, BTRFS_SUBPAGE_DATA);
     966             : 
     967           0 :         attach_page_private(page, (void *)EXTENT_PAGE_PRIVATE);
     968           0 :         return 0;
     969             : }
     970             : 
     971           0 : void clear_page_extent_mapped(struct page *page)
     972             : {
     973           0 :         struct btrfs_fs_info *fs_info;
     974             : 
     975           0 :         ASSERT(page->mapping);
     976             : 
     977           0 :         if (!PagePrivate(page))
     978             :                 return;
     979             : 
     980           0 :         fs_info = btrfs_sb(page->mapping->host->i_sb);
     981           0 :         if (btrfs_is_subpage(fs_info, page))
     982           0 :                 return btrfs_detach_subpage(fs_info, page);
     983             : 
     984           0 :         detach_page_private(page);
     985             : }
     986             : 
     987             : static struct extent_map *
     988           0 : __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
     989             :                  u64 start, u64 len, struct extent_map **em_cached)
     990             : {
     991           0 :         struct extent_map *em;
     992             : 
     993           0 :         if (em_cached && *em_cached) {
     994           0 :                 em = *em_cached;
     995           0 :                 if (extent_map_in_tree(em) && start >= em->start &&
     996             :                     start < extent_map_end(em)) {
     997           0 :                         refcount_inc(&em->refs);
     998           0 :                         return em;
     999             :                 }
    1000             : 
    1001           0 :                 free_extent_map(em);
    1002           0 :                 *em_cached = NULL;
    1003             :         }
    1004             : 
    1005           0 :         em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len);
    1006           0 :         if (em_cached && !IS_ERR(em)) {
    1007           0 :                 BUG_ON(*em_cached);
    1008           0 :                 refcount_inc(&em->refs);
    1009           0 :                 *em_cached = em;
    1010             :         }
    1011             :         return em;
    1012             : }
    1013             : /*
    1014             :  * basic readpage implementation.  Locked extent state structs are inserted
    1015             :  * into the tree that are removed when the IO is done (by the end_io
    1016             :  * handlers)
    1017             :  * XXX JDM: This needs looking at to ensure proper page locking
    1018             :  * return 0 on success, otherwise return error
    1019             :  */
    1020           0 : static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
    1021             :                       struct btrfs_bio_ctrl *bio_ctrl, u64 *prev_em_start)
    1022             : {
    1023           0 :         struct inode *inode = page->mapping->host;
    1024           0 :         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
    1025           0 :         u64 start = page_offset(page);
    1026           0 :         const u64 end = start + PAGE_SIZE - 1;
    1027           0 :         u64 cur = start;
    1028           0 :         u64 extent_offset;
    1029           0 :         u64 last_byte = i_size_read(inode);
    1030           0 :         u64 block_start;
    1031           0 :         struct extent_map *em;
    1032           0 :         int ret = 0;
    1033           0 :         size_t pg_offset = 0;
    1034           0 :         size_t iosize;
    1035           0 :         size_t blocksize = inode->i_sb->s_blocksize;
    1036           0 :         struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
    1037             : 
    1038           0 :         ret = set_page_extent_mapped(page);
    1039           0 :         if (ret < 0) {
    1040           0 :                 unlock_extent(tree, start, end, NULL);
    1041           0 :                 unlock_page(page);
    1042           0 :                 return ret;
    1043             :         }
    1044             : 
    1045           0 :         if (page->index == last_byte >> PAGE_SHIFT) {
    1046           0 :                 size_t zero_offset = offset_in_page(last_byte);
    1047             : 
    1048           0 :                 if (zero_offset) {
    1049           0 :                         iosize = PAGE_SIZE - zero_offset;
    1050           0 :                         memzero_page(page, zero_offset, iosize);
    1051             :                 }
    1052             :         }
    1053           0 :         bio_ctrl->end_io_func = end_bio_extent_readpage;
    1054           0 :         begin_page_read(fs_info, page);
    1055           0 :         while (cur <= end) {
    1056           0 :                 enum btrfs_compression_type compress_type = BTRFS_COMPRESS_NONE;
    1057           0 :                 bool force_bio_submit = false;
    1058           0 :                 u64 disk_bytenr;
    1059             : 
    1060           0 :                 ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
    1061           0 :                 if (cur >= last_byte) {
    1062           0 :                         iosize = PAGE_SIZE - pg_offset;
    1063           0 :                         memzero_page(page, pg_offset, iosize);
    1064           0 :                         unlock_extent(tree, cur, cur + iosize - 1, NULL);
    1065           0 :                         end_page_read(page, true, cur, iosize);
    1066           0 :                         break;
    1067             :                 }
    1068           0 :                 em = __get_extent_map(inode, page, pg_offset, cur,
    1069           0 :                                       end - cur + 1, em_cached);
    1070           0 :                 if (IS_ERR(em)) {
    1071           0 :                         unlock_extent(tree, cur, end, NULL);
    1072           0 :                         end_page_read(page, false, cur, end + 1 - cur);
    1073           0 :                         return PTR_ERR(em);
    1074             :                 }
    1075           0 :                 extent_offset = cur - em->start;
    1076           0 :                 BUG_ON(extent_map_end(em) <= cur);
    1077           0 :                 BUG_ON(end < cur);
    1078             : 
    1079           0 :                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
    1080           0 :                         compress_type = em->compress_type;
    1081             : 
    1082           0 :                 iosize = min(extent_map_end(em) - cur, end - cur + 1);
    1083           0 :                 iosize = ALIGN(iosize, blocksize);
    1084           0 :                 if (compress_type != BTRFS_COMPRESS_NONE)
    1085           0 :                         disk_bytenr = em->block_start;
    1086             :                 else
    1087           0 :                         disk_bytenr = em->block_start + extent_offset;
    1088           0 :                 block_start = em->block_start;
    1089           0 :                 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
    1090           0 :                         block_start = EXTENT_MAP_HOLE;
    1091             : 
    1092             :                 /*
    1093             :                  * If we have a file range that points to a compressed extent
    1094             :                  * and it's followed by a consecutive file range that points
    1095             :                  * to the same compressed extent (possibly with a different
    1096             :                  * offset and/or length, so it either points to the whole extent
    1097             :                  * or only part of it), we must make sure we do not submit a
    1098             :                  * single bio to populate the pages for the 2 ranges because
    1099             :                  * this makes the compressed extent read zero out the pages
    1100             :                  * belonging to the 2nd range. Imagine the following scenario:
    1101             :                  *
    1102             :                  *  File layout
    1103             :                  *  [0 - 8K]                     [8K - 24K]
    1104             :                  *    |                               |
    1105             :                  *    |                               |
    1106             :                  * points to extent X,         points to extent X,
    1107             :                  * offset 4K, length of 8K     offset 0, length 16K
    1108             :                  *
    1109             :                  * [extent X, compressed length = 4K uncompressed length = 16K]
    1110             :                  *
    1111             :                  * If the bio to read the compressed extent covers both ranges,
    1112             :                  * it will decompress extent X into the pages belonging to the
    1113             :                  * first range and then it will stop, zeroing out the remaining
    1114             :                  * pages that belong to the other range that points to extent X.
    1115             :                  * So here we make sure we submit 2 bios, one for the first
    1116             :                  * range and another one for the third range. Both will target
    1117             :                  * the same physical extent from disk, but we can't currently
    1118             :                  * make the compressed bio endio callback populate the pages
    1119             :                  * for both ranges because each compressed bio is tightly
    1120             :                  * coupled with a single extent map, and each range can have
    1121             :                  * an extent map with a different offset value relative to the
    1122             :                  * uncompressed data of our extent and different lengths. This
    1123             :                  * is a corner case so we prioritize correctness over
    1124             :                  * non-optimal behavior (submitting 2 bios for the same extent).
    1125             :                  */
    1126           0 :                 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
    1127           0 :                     prev_em_start && *prev_em_start != (u64)-1 &&
    1128             :                     *prev_em_start != em->start)
    1129           0 :                         force_bio_submit = true;
    1130             : 
    1131           0 :                 if (prev_em_start)
    1132           0 :                         *prev_em_start = em->start;
    1133             : 
    1134           0 :                 free_extent_map(em);
    1135           0 :                 em = NULL;
    1136             : 
    1137             :                 /* we've found a hole, just zero and go on */
    1138           0 :                 if (block_start == EXTENT_MAP_HOLE) {
    1139           0 :                         memzero_page(page, pg_offset, iosize);
    1140             : 
    1141           0 :                         unlock_extent(tree, cur, cur + iosize - 1, NULL);
    1142           0 :                         end_page_read(page, true, cur, iosize);
    1143           0 :                         cur = cur + iosize;
    1144           0 :                         pg_offset += iosize;
    1145           0 :                         continue;
    1146             :                 }
    1147             :                 /* the get_extent function already copied into the page */
    1148           0 :                 if (block_start == EXTENT_MAP_INLINE) {
    1149           0 :                         unlock_extent(tree, cur, cur + iosize - 1, NULL);
    1150           0 :                         end_page_read(page, true, cur, iosize);
    1151           0 :                         cur = cur + iosize;
    1152           0 :                         pg_offset += iosize;
    1153           0 :                         continue;
    1154             :                 }
    1155             : 
    1156           0 :                 if (bio_ctrl->compress_type != compress_type) {
    1157           0 :                         submit_one_bio(bio_ctrl);
    1158           0 :                         bio_ctrl->compress_type = compress_type;
    1159             :                 }
    1160             : 
    1161           0 :                 if (force_bio_submit)
    1162           0 :                         submit_one_bio(bio_ctrl);
    1163           0 :                 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
    1164             :                                    pg_offset);
    1165           0 :                 cur = cur + iosize;
    1166           0 :                 pg_offset += iosize;
    1167             :         }
    1168             : 
    1169             :         return 0;
    1170             : }
    1171             : 
    1172           0 : int btrfs_read_folio(struct file *file, struct folio *folio)
    1173             : {
    1174           0 :         struct page *page = &folio->page;
    1175           0 :         struct btrfs_inode *inode = BTRFS_I(page->mapping->host);
    1176           0 :         u64 start = page_offset(page);
    1177           0 :         u64 end = start + PAGE_SIZE - 1;
    1178           0 :         struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ };
    1179           0 :         int ret;
    1180             : 
    1181           0 :         btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
    1182             : 
    1183           0 :         ret = btrfs_do_readpage(page, NULL, &bio_ctrl, NULL);
    1184             :         /*
    1185             :          * If btrfs_do_readpage() failed we will want to submit the assembled
    1186             :          * bio to do the cleanup.
    1187             :          */
    1188           0 :         submit_one_bio(&bio_ctrl);
    1189           0 :         return ret;
    1190             : }
    1191             : 
    1192           0 : static inline void contiguous_readpages(struct page *pages[], int nr_pages,
    1193             :                                         u64 start, u64 end,
    1194             :                                         struct extent_map **em_cached,
    1195             :                                         struct btrfs_bio_ctrl *bio_ctrl,
    1196             :                                         u64 *prev_em_start)
    1197             : {
    1198           0 :         struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host);
    1199           0 :         int index;
    1200             : 
    1201           0 :         btrfs_lock_and_flush_ordered_range(inode, start, end, NULL);
    1202             : 
    1203           0 :         for (index = 0; index < nr_pages; index++) {
    1204           0 :                 btrfs_do_readpage(pages[index], em_cached, bio_ctrl,
    1205             :                                   prev_em_start);
    1206           0 :                 put_page(pages[index]);
    1207             :         }
    1208           0 : }
    1209             : 
    1210             : /*
    1211             :  * helper for __extent_writepage, doing all of the delayed allocation setup.
    1212             :  *
    1213             :  * This returns 1 if btrfs_run_delalloc_range function did all the work required
    1214             :  * to write the page (copy into inline extent).  In this case the IO has
    1215             :  * been started and the page is already unlocked.
    1216             :  *
    1217             :  * This returns 0 if all went well (page still locked)
    1218             :  * This returns < 0 if there were errors (page still locked)
    1219             :  */
    1220           0 : static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
    1221             :                 struct page *page, struct writeback_control *wbc)
    1222             : {
    1223           0 :         const u64 page_end = page_offset(page) + PAGE_SIZE - 1;
    1224           0 :         u64 delalloc_start = page_offset(page);
    1225           0 :         u64 delalloc_to_write = 0;
    1226             :         /* How many pages are started by btrfs_run_delalloc_range() */
    1227           0 :         unsigned long nr_written = 0;
    1228           0 :         int ret;
    1229           0 :         int page_started = 0;
    1230             : 
    1231           0 :         while (delalloc_start < page_end) {
    1232           0 :                 u64 delalloc_end = page_end;
    1233           0 :                 bool found;
    1234             : 
    1235           0 :                 found = find_lock_delalloc_range(&inode->vfs_inode, page,
    1236             :                                                &delalloc_start,
    1237             :                                                &delalloc_end);
    1238           0 :                 if (!found) {
    1239           0 :                         delalloc_start = delalloc_end + 1;
    1240           0 :                         continue;
    1241             :                 }
    1242           0 :                 ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
    1243             :                                 delalloc_end, &page_started, &nr_written, wbc);
    1244           0 :                 if (ret)
    1245           0 :                         return ret;
    1246             : 
    1247             :                 /*
    1248             :                  * delalloc_end is already one less than the total length, so
    1249             :                  * we don't subtract one from PAGE_SIZE
    1250             :                  */
    1251           0 :                 delalloc_to_write += (delalloc_end - delalloc_start +
    1252           0 :                                       PAGE_SIZE) >> PAGE_SHIFT;
    1253           0 :                 delalloc_start = delalloc_end + 1;
    1254             :         }
    1255           0 :         if (wbc->nr_to_write < delalloc_to_write) {
    1256           0 :                 int thresh = 8192;
    1257             : 
    1258           0 :                 if (delalloc_to_write < thresh * 2)
    1259           0 :                         thresh = delalloc_to_write;
    1260           0 :                 wbc->nr_to_write = min_t(u64, delalloc_to_write,
    1261             :                                          thresh);
    1262             :         }
    1263             : 
    1264             :         /* Did btrfs_run_dealloc_range() already unlock and start the IO? */
    1265           0 :         if (page_started) {
    1266             :                 /*
    1267             :                  * We've unlocked the page, so we can't update the mapping's
    1268             :                  * writeback index, just update nr_to_write.
    1269             :                  */
    1270           0 :                 wbc->nr_to_write -= nr_written;
    1271           0 :                 return 1;
    1272             :         }
    1273             : 
    1274             :         return 0;
    1275             : }
    1276             : 
    1277             : /*
    1278             :  * Find the first byte we need to write.
    1279             :  *
    1280             :  * For subpage, one page can contain several sectors, and
    1281             :  * __extent_writepage_io() will just grab all extent maps in the page
    1282             :  * range and try to submit all non-inline/non-compressed extents.
    1283             :  *
    1284             :  * This is a big problem for subpage, we shouldn't re-submit already written
    1285             :  * data at all.
    1286             :  * This function will lookup subpage dirty bit to find which range we really
    1287             :  * need to submit.
    1288             :  *
    1289             :  * Return the next dirty range in [@start, @end).
    1290             :  * If no dirty range is found, @start will be page_offset(page) + PAGE_SIZE.
    1291             :  */
    1292           0 : static void find_next_dirty_byte(struct btrfs_fs_info *fs_info,
    1293             :                                  struct page *page, u64 *start, u64 *end)
    1294             : {
    1295           0 :         struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
    1296           0 :         struct btrfs_subpage_info *spi = fs_info->subpage_info;
    1297           0 :         u64 orig_start = *start;
    1298             :         /* Declare as unsigned long so we can use bitmap ops */
    1299           0 :         unsigned long flags;
    1300           0 :         int range_start_bit;
    1301           0 :         int range_end_bit;
    1302             : 
    1303             :         /*
    1304             :          * For regular sector size == page size case, since one page only
    1305             :          * contains one sector, we return the page offset directly.
    1306             :          */
    1307           0 :         if (!btrfs_is_subpage(fs_info, page)) {
    1308           0 :                 *start = page_offset(page);
    1309           0 :                 *end = page_offset(page) + PAGE_SIZE;
    1310           0 :                 return;
    1311             :         }
    1312             : 
    1313           0 :         range_start_bit = spi->dirty_offset +
    1314           0 :                           (offset_in_page(orig_start) >> fs_info->sectorsize_bits);
    1315             : 
    1316             :         /* We should have the page locked, but just in case */
    1317           0 :         spin_lock_irqsave(&subpage->lock, flags);
    1318           0 :         bitmap_next_set_region(subpage->bitmaps, &range_start_bit, &range_end_bit,
    1319           0 :                                spi->dirty_offset + spi->bitmap_nr_bits);
    1320           0 :         spin_unlock_irqrestore(&subpage->lock, flags);
    1321             : 
    1322           0 :         range_start_bit -= spi->dirty_offset;
    1323           0 :         range_end_bit -= spi->dirty_offset;
    1324             : 
    1325           0 :         *start = page_offset(page) + range_start_bit * fs_info->sectorsize;
    1326           0 :         *end = page_offset(page) + range_end_bit * fs_info->sectorsize;
    1327             : }
    1328             : 
    1329             : /*
    1330             :  * helper for __extent_writepage.  This calls the writepage start hooks,
    1331             :  * and does the loop to map the page into extents and bios.
    1332             :  *
    1333             :  * We return 1 if the IO is started and the page is unlocked,
    1334             :  * 0 if all went well (page still locked)
    1335             :  * < 0 if there were errors (page still locked)
    1336             :  */
    1337           0 : static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
    1338             :                                  struct page *page,
    1339             :                                  struct btrfs_bio_ctrl *bio_ctrl,
    1340             :                                  loff_t i_size,
    1341             :                                  int *nr_ret)
    1342             : {
    1343           0 :         struct btrfs_fs_info *fs_info = inode->root->fs_info;
    1344           0 :         u64 cur = page_offset(page);
    1345           0 :         u64 end = cur + PAGE_SIZE - 1;
    1346           0 :         u64 extent_offset;
    1347           0 :         u64 block_start;
    1348           0 :         struct extent_map *em;
    1349           0 :         int ret = 0;
    1350           0 :         int nr = 0;
    1351             : 
    1352           0 :         ret = btrfs_writepage_cow_fixup(page);
    1353           0 :         if (ret) {
    1354             :                 /* Fixup worker will requeue */
    1355           0 :                 redirty_page_for_writepage(bio_ctrl->wbc, page);
    1356           0 :                 unlock_page(page);
    1357           0 :                 return 1;
    1358             :         }
    1359             : 
    1360           0 :         bio_ctrl->end_io_func = end_bio_extent_writepage;
    1361           0 :         while (cur <= end) {
    1362           0 :                 u64 disk_bytenr;
    1363           0 :                 u64 em_end;
    1364           0 :                 u64 dirty_range_start = cur;
    1365           0 :                 u64 dirty_range_end;
    1366           0 :                 u32 iosize;
    1367             : 
    1368           0 :                 if (cur >= i_size) {
    1369           0 :                         btrfs_writepage_endio_finish_ordered(inode, page, cur,
    1370             :                                                              end, true);
    1371             :                         /*
    1372             :                          * This range is beyond i_size, thus we don't need to
    1373             :                          * bother writing back.
    1374             :                          * But we still need to clear the dirty subpage bit, or
    1375             :                          * the next time the page gets dirtied, we will try to
    1376             :                          * writeback the sectors with subpage dirty bits,
    1377             :                          * causing writeback without ordered extent.
    1378             :                          */
    1379           0 :                         btrfs_page_clear_dirty(fs_info, page, cur, end + 1 - cur);
    1380           0 :                         break;
    1381             :                 }
    1382             : 
    1383           0 :                 find_next_dirty_byte(fs_info, page, &dirty_range_start,
    1384             :                                      &dirty_range_end);
    1385           0 :                 if (cur < dirty_range_start) {
    1386           0 :                         cur = dirty_range_start;
    1387           0 :                         continue;
    1388             :                 }
    1389             : 
    1390           0 :                 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1);
    1391           0 :                 if (IS_ERR(em)) {
    1392           0 :                         ret = PTR_ERR_OR_ZERO(em);
    1393           0 :                         goto out_error;
    1394             :                 }
    1395             : 
    1396           0 :                 extent_offset = cur - em->start;
    1397           0 :                 em_end = extent_map_end(em);
    1398           0 :                 ASSERT(cur <= em_end);
    1399           0 :                 ASSERT(cur < end);
    1400           0 :                 ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize));
    1401           0 :                 ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize));
    1402             : 
    1403           0 :                 block_start = em->block_start;
    1404           0 :                 disk_bytenr = em->block_start + extent_offset;
    1405             : 
    1406           0 :                 ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags));
    1407           0 :                 ASSERT(block_start != EXTENT_MAP_HOLE);
    1408           0 :                 ASSERT(block_start != EXTENT_MAP_INLINE);
    1409             : 
    1410             :                 /*
    1411             :                  * Note that em_end from extent_map_end() and dirty_range_end from
    1412             :                  * find_next_dirty_byte() are all exclusive
    1413             :                  */
    1414           0 :                 iosize = min(min(em_end, end + 1), dirty_range_end) - cur;
    1415           0 :                 free_extent_map(em);
    1416           0 :                 em = NULL;
    1417             : 
    1418           0 :                 btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
    1419           0 :                 if (!PageWriteback(page)) {
    1420           0 :                         btrfs_err(inode->root->fs_info,
    1421             :                                    "page %lu not writeback, cur %llu end %llu",
    1422             :                                page->index, cur, end);
    1423             :                 }
    1424             : 
    1425             :                 /*
    1426             :                  * Although the PageDirty bit is cleared before entering this
    1427             :                  * function, subpage dirty bit is not cleared.
    1428             :                  * So clear subpage dirty bit here so next time we won't submit
    1429             :                  * page for range already written to disk.
    1430             :                  */
    1431           0 :                 btrfs_page_clear_dirty(fs_info, page, cur, iosize);
    1432             : 
    1433           0 :                 submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
    1434           0 :                                    cur - page_offset(page));
    1435           0 :                 cur += iosize;
    1436           0 :                 nr++;
    1437             :         }
    1438             : 
    1439           0 :         btrfs_page_assert_not_dirty(fs_info, page);
    1440           0 :         *nr_ret = nr;
    1441           0 :         return 0;
    1442             : 
    1443             : out_error:
    1444             :         /*
    1445             :          * If we finish without problem, we should not only clear page dirty,
    1446             :          * but also empty subpage dirty bits
    1447             :          */
    1448           0 :         *nr_ret = nr;
    1449           0 :         return ret;
    1450             : }
    1451             : 
    1452             : /*
    1453             :  * the writepage semantics are similar to regular writepage.  extent
    1454             :  * records are inserted to lock ranges in the tree, and as dirty areas
    1455             :  * are found, they are marked writeback.  Then the lock bits are removed
    1456             :  * and the end_io handler clears the writeback ranges
    1457             :  *
    1458             :  * Return 0 if everything goes well.
    1459             :  * Return <0 for error.
    1460             :  */
    1461           0 : static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl)
    1462             : {
    1463           0 :         struct folio *folio = page_folio(page);
    1464           0 :         struct inode *inode = page->mapping->host;
    1465           0 :         const u64 page_start = page_offset(page);
    1466           0 :         const u64 page_end = page_start + PAGE_SIZE - 1;
    1467           0 :         int ret;
    1468           0 :         int nr = 0;
    1469           0 :         size_t pg_offset;
    1470           0 :         loff_t i_size = i_size_read(inode);
    1471           0 :         unsigned long end_index = i_size >> PAGE_SHIFT;
    1472             : 
    1473           0 :         trace___extent_writepage(page, inode, bio_ctrl->wbc);
    1474             : 
    1475           0 :         WARN_ON(!PageLocked(page));
    1476             : 
    1477           0 :         pg_offset = offset_in_page(i_size);
    1478           0 :         if (page->index > end_index ||
    1479           0 :            (page->index == end_index && !pg_offset)) {
    1480           0 :                 folio_invalidate(folio, 0, folio_size(folio));
    1481           0 :                 folio_unlock(folio);
    1482           0 :                 return 0;
    1483             :         }
    1484             : 
    1485           0 :         if (page->index == end_index)
    1486           0 :                 memzero_page(page, pg_offset, PAGE_SIZE - pg_offset);
    1487             : 
    1488           0 :         ret = set_page_extent_mapped(page);
    1489           0 :         if (ret < 0)
    1490           0 :                 goto done;
    1491             : 
    1492           0 :         ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc);
    1493           0 :         if (ret == 1)
    1494             :                 return 0;
    1495           0 :         if (ret)
    1496           0 :                 goto done;
    1497             : 
    1498           0 :         ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr);
    1499           0 :         if (ret == 1)
    1500             :                 return 0;
    1501             : 
    1502           0 :         bio_ctrl->wbc->nr_to_write--;
    1503             : 
    1504           0 : done:
    1505           0 :         if (nr == 0) {
    1506             :                 /* make sure the mapping tag for page dirty gets cleared */
    1507           0 :                 set_page_writeback(page);
    1508           0 :                 end_page_writeback(page);
    1509             :         }
    1510           0 :         if (ret)
    1511           0 :                 end_extent_writepage(page, ret, page_start, page_end);
    1512           0 :         unlock_page(page);
    1513           0 :         ASSERT(ret <= 0);
    1514           0 :         return ret;
    1515             : }
    1516             : 
    1517           0 : void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
    1518             : {
    1519           0 :         wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
    1520             :                        TASK_UNINTERRUPTIBLE);
    1521           0 : }
    1522             : 
    1523             : /*
    1524             :  * Lock extent buffer status and pages for writeback.
    1525             :  *
    1526             :  * Return %false if the extent buffer doesn't need to be submitted (e.g. the
    1527             :  * extent buffer is not dirty)
    1528             :  * Return %true is the extent buffer is submitted to bio.
    1529             :  */
    1530           0 : static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb,
    1531             :                           struct writeback_control *wbc)
    1532             : {
    1533           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    1534           0 :         bool ret = false;
    1535             : 
    1536           0 :         btrfs_tree_lock(eb);
    1537           0 :         while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) {
    1538           0 :                 btrfs_tree_unlock(eb);
    1539           0 :                 if (wbc->sync_mode != WB_SYNC_ALL)
    1540             :                         return false;
    1541           0 :                 wait_on_extent_buffer_writeback(eb);
    1542           0 :                 btrfs_tree_lock(eb);
    1543             :         }
    1544             : 
    1545             :         /*
    1546             :          * We need to do this to prevent races in people who check if the eb is
    1547             :          * under IO since we can end up having no IO bits set for a short period
    1548             :          * of time.
    1549             :          */
    1550           0 :         spin_lock(&eb->refs_lock);
    1551           0 :         if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
    1552           0 :                 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
    1553           0 :                 spin_unlock(&eb->refs_lock);
    1554           0 :                 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
    1555           0 :                 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
    1556           0 :                                          -eb->len,
    1557             :                                          fs_info->dirty_metadata_batch);
    1558           0 :                 ret = true;
    1559             :         } else {
    1560           0 :                 spin_unlock(&eb->refs_lock);
    1561             :         }
    1562           0 :         btrfs_tree_unlock(eb);
    1563           0 :         return ret;
    1564             : }
    1565             : 
    1566           0 : static void set_btree_ioerr(struct extent_buffer *eb)
    1567             : {
    1568           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    1569             : 
    1570           0 :         set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
    1571             : 
    1572             :         /*
    1573             :          * A read may stumble upon this buffer later, make sure that it gets an
    1574             :          * error and knows there was an error.
    1575             :          */
    1576           0 :         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
    1577             : 
    1578             :         /*
    1579             :          * We need to set the mapping with the io error as well because a write
    1580             :          * error will flip the file system readonly, and then syncfs() will
    1581             :          * return a 0 because we are readonly if we don't modify the err seq for
    1582             :          * the superblock.
    1583             :          */
    1584           0 :         mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO);
    1585             : 
    1586             :         /*
    1587             :          * If writeback for a btree extent that doesn't belong to a log tree
    1588             :          * failed, increment the counter transaction->eb_write_errors.
    1589             :          * We do this because while the transaction is running and before it's
    1590             :          * committing (when we call filemap_fdata[write|wait]_range against
    1591             :          * the btree inode), we might have
    1592             :          * btree_inode->i_mapping->a_ops->writepages() called by the VM - if it
    1593             :          * returns an error or an error happens during writeback, when we're
    1594             :          * committing the transaction we wouldn't know about it, since the pages
    1595             :          * can be no longer dirty nor marked anymore for writeback (if a
    1596             :          * subsequent modification to the extent buffer didn't happen before the
    1597             :          * transaction commit), which makes filemap_fdata[write|wait]_range not
    1598             :          * able to find the pages tagged with SetPageError at transaction
    1599             :          * commit time. So if this happens we must abort the transaction,
    1600             :          * otherwise we commit a super block with btree roots that point to
    1601             :          * btree nodes/leafs whose content on disk is invalid - either garbage
    1602             :          * or the content of some node/leaf from a past generation that got
    1603             :          * cowed or deleted and is no longer valid.
    1604             :          *
    1605             :          * Note: setting AS_EIO/AS_ENOSPC in the btree inode's i_mapping would
    1606             :          * not be enough - we need to distinguish between log tree extents vs
    1607             :          * non-log tree extents, and the next filemap_fdatawait_range() call
    1608             :          * will catch and clear such errors in the mapping - and that call might
    1609             :          * be from a log sync and not from a transaction commit. Also, checking
    1610             :          * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is
    1611             :          * not done and would not be reliable - the eb might have been released
    1612             :          * from memory and reading it back again means that flag would not be
    1613             :          * set (since it's a runtime flag, not persisted on disk).
    1614             :          *
    1615             :          * Using the flags below in the btree inode also makes us achieve the
    1616             :          * goal of AS_EIO/AS_ENOSPC when writepages() returns success, started
    1617             :          * writeback for all dirty pages and before filemap_fdatawait_range()
    1618             :          * is called, the writeback for all dirty pages had already finished
    1619             :          * with errors - because we were not using AS_EIO/AS_ENOSPC,
    1620             :          * filemap_fdatawait_range() would return success, as it could not know
    1621             :          * that writeback errors happened (the pages were no longer tagged for
    1622             :          * writeback).
    1623             :          */
    1624           0 :         switch (eb->log_index) {
    1625           0 :         case -1:
    1626           0 :                 set_bit(BTRFS_FS_BTREE_ERR, &fs_info->flags);
    1627             :                 break;
    1628           0 :         case 0:
    1629           0 :                 set_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
    1630             :                 break;
    1631           0 :         case 1:
    1632           0 :                 set_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
    1633             :                 break;
    1634           0 :         default:
    1635           0 :                 BUG(); /* unexpected, logic error */
    1636             :         }
    1637           0 : }
    1638             : 
    1639             : /*
    1640             :  * The endio specific version which won't touch any unsafe spinlock in endio
    1641             :  * context.
    1642             :  */
    1643           0 : static struct extent_buffer *find_extent_buffer_nolock(
    1644             :                 struct btrfs_fs_info *fs_info, u64 start)
    1645             : {
    1646           0 :         struct extent_buffer *eb;
    1647             : 
    1648           0 :         rcu_read_lock();
    1649           0 :         eb = radix_tree_lookup(&fs_info->buffer_radix,
    1650           0 :                                start >> fs_info->sectorsize_bits);
    1651           0 :         if (eb && atomic_inc_not_zero(&eb->refs)) {
    1652           0 :                 rcu_read_unlock();
    1653           0 :                 return eb;
    1654             :         }
    1655           0 :         rcu_read_unlock();
    1656           0 :         return NULL;
    1657             : }
    1658             : 
    1659           0 : static void extent_buffer_write_end_io(struct btrfs_bio *bbio)
    1660             : {
    1661           0 :         struct extent_buffer *eb = bbio->private;
    1662           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    1663           0 :         bool uptodate = !bbio->bio.bi_status;
    1664           0 :         struct bvec_iter_all iter_all;
    1665           0 :         struct bio_vec *bvec;
    1666           0 :         u32 bio_offset = 0;
    1667             : 
    1668           0 :         if (!uptodate)
    1669           0 :                 set_btree_ioerr(eb);
    1670             : 
    1671           0 :         bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
    1672           0 :                 u64 start = eb->start + bio_offset;
    1673           0 :                 struct page *page = bvec->bv_page;
    1674           0 :                 u32 len = bvec->bv_len;
    1675             : 
    1676           0 :                 if (!uptodate)
    1677           0 :                         btrfs_page_clear_uptodate(fs_info, page, start, len);
    1678           0 :                 btrfs_page_clear_writeback(fs_info, page, start, len);
    1679           0 :                 bio_offset += len;
    1680             :         }
    1681             : 
    1682           0 :         clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
    1683           0 :         smp_mb__after_atomic();
    1684           0 :         wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
    1685             : 
    1686           0 :         bio_put(&bbio->bio);
    1687           0 : }
    1688             : 
    1689           0 : static void prepare_eb_write(struct extent_buffer *eb)
    1690             : {
    1691           0 :         u32 nritems;
    1692           0 :         unsigned long start;
    1693           0 :         unsigned long end;
    1694             : 
    1695           0 :         clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
    1696             : 
    1697             :         /* Set btree blocks beyond nritems with 0 to avoid stale content */
    1698           0 :         nritems = btrfs_header_nritems(eb);
    1699           0 :         if (btrfs_header_level(eb) > 0) {
    1700           0 :                 end = btrfs_node_key_ptr_offset(eb, nritems);
    1701           0 :                 memzero_extent_buffer(eb, end, eb->len - end);
    1702             :         } else {
    1703             :                 /*
    1704             :                  * Leaf:
    1705             :                  * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
    1706             :                  */
    1707           0 :                 start = btrfs_item_nr_offset(eb, nritems);
    1708           0 :                 end = btrfs_item_nr_offset(eb, 0);
    1709           0 :                 if (nritems == 0)
    1710           0 :                         end += BTRFS_LEAF_DATA_SIZE(eb->fs_info);
    1711             :                 else
    1712           0 :                         end += btrfs_item_offset(eb, nritems - 1);
    1713           0 :                 memzero_extent_buffer(eb, start, end - start);
    1714             :         }
    1715           0 : }
    1716             : 
    1717           0 : static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
    1718             :                                             struct writeback_control *wbc)
    1719             : {
    1720           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    1721           0 :         struct btrfs_bio *bbio;
    1722             : 
    1723           0 :         prepare_eb_write(eb);
    1724             : 
    1725           0 :         bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
    1726             :                                REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc),
    1727             :                                eb->fs_info, extent_buffer_write_end_io, eb);
    1728           0 :         bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
    1729           0 :         bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
    1730           0 :         wbc_init_bio(wbc, &bbio->bio);
    1731           0 :         bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
    1732           0 :         bbio->file_offset = eb->start;
    1733           0 :         if (fs_info->nodesize < PAGE_SIZE) {
    1734           0 :                 struct page *p = eb->pages[0];
    1735             : 
    1736           0 :                 lock_page(p);
    1737           0 :                 btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len);
    1738           0 :                 if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start,
    1739           0 :                                                        eb->len)) {
    1740           0 :                         clear_page_dirty_for_io(p);
    1741           0 :                         wbc->nr_to_write--;
    1742             :                 }
    1743           0 :                 __bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p));
    1744           0 :                 wbc_account_cgroup_owner(wbc, p, eb->len);
    1745           0 :                 unlock_page(p);
    1746             :         } else {
    1747           0 :                 for (int i = 0; i < num_extent_pages(eb); i++) {
    1748           0 :                         struct page *p = eb->pages[i];
    1749             : 
    1750           0 :                         lock_page(p);
    1751           0 :                         clear_page_dirty_for_io(p);
    1752           0 :                         set_page_writeback(p);
    1753           0 :                         __bio_add_page(&bbio->bio, p, PAGE_SIZE, 0);
    1754           0 :                         wbc_account_cgroup_owner(wbc, p, PAGE_SIZE);
    1755           0 :                         wbc->nr_to_write--;
    1756           0 :                         unlock_page(p);
    1757             :                 }
    1758             :         }
    1759           0 :         btrfs_submit_bio(bbio, 0);
    1760           0 : }
    1761             : 
    1762             : /*
    1763             :  * Submit one subpage btree page.
    1764             :  *
    1765             :  * The main difference to submit_eb_page() is:
    1766             :  * - Page locking
    1767             :  *   For subpage, we don't rely on page locking at all.
    1768             :  *
    1769             :  * - Flush write bio
    1770             :  *   We only flush bio if we may be unable to fit current extent buffers into
    1771             :  *   current bio.
    1772             :  *
    1773             :  * Return >=0 for the number of submitted extent buffers.
    1774             :  * Return <0 for fatal error.
    1775             :  */
    1776           0 : static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
    1777             : {
    1778           0 :         struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
    1779           0 :         int submitted = 0;
    1780           0 :         u64 page_start = page_offset(page);
    1781           0 :         int bit_start = 0;
    1782           0 :         int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits;
    1783             : 
    1784             :         /* Lock and write each dirty extent buffers in the range */
    1785           0 :         while (bit_start < fs_info->subpage_info->bitmap_nr_bits) {
    1786           0 :                 struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private;
    1787           0 :                 struct extent_buffer *eb;
    1788           0 :                 unsigned long flags;
    1789           0 :                 u64 start;
    1790             : 
    1791             :                 /*
    1792             :                  * Take private lock to ensure the subpage won't be detached
    1793             :                  * in the meantime.
    1794             :                  */
    1795           0 :                 spin_lock(&page->mapping->private_lock);
    1796           0 :                 if (!PagePrivate(page)) {
    1797           0 :                         spin_unlock(&page->mapping->private_lock);
    1798             :                         break;
    1799             :                 }
    1800           0 :                 spin_lock_irqsave(&subpage->lock, flags);
    1801           0 :                 if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
    1802             :                               subpage->bitmaps)) {
    1803           0 :                         spin_unlock_irqrestore(&subpage->lock, flags);
    1804           0 :                         spin_unlock(&page->mapping->private_lock);
    1805           0 :                         bit_start++;
    1806           0 :                         continue;
    1807             :                 }
    1808             : 
    1809           0 :                 start = page_start + bit_start * fs_info->sectorsize;
    1810           0 :                 bit_start += sectors_per_node;
    1811             : 
    1812             :                 /*
    1813             :                  * Here we just want to grab the eb without touching extra
    1814             :                  * spin locks, so call find_extent_buffer_nolock().
    1815             :                  */
    1816           0 :                 eb = find_extent_buffer_nolock(fs_info, start);
    1817           0 :                 spin_unlock_irqrestore(&subpage->lock, flags);
    1818           0 :                 spin_unlock(&page->mapping->private_lock);
    1819             : 
    1820             :                 /*
    1821             :                  * The eb has already reached 0 refs thus find_extent_buffer()
    1822             :                  * doesn't return it. We don't need to write back such eb
    1823             :                  * anyway.
    1824             :                  */
    1825           0 :                 if (!eb)
    1826           0 :                         continue;
    1827             : 
    1828           0 :                 if (lock_extent_buffer_for_io(eb, wbc)) {
    1829           0 :                         write_one_eb(eb, wbc);
    1830           0 :                         submitted++;
    1831             :                 }
    1832           0 :                 free_extent_buffer(eb);
    1833             :         }
    1834           0 :         return submitted;
    1835             : }
    1836             : 
    1837             : /*
    1838             :  * Submit all page(s) of one extent buffer.
    1839             :  *
    1840             :  * @page:       the page of one extent buffer
    1841             :  * @eb_context: to determine if we need to submit this page, if current page
    1842             :  *              belongs to this eb, we don't need to submit
    1843             :  *
    1844             :  * The caller should pass each page in their bytenr order, and here we use
    1845             :  * @eb_context to determine if we have submitted pages of one extent buffer.
    1846             :  *
    1847             :  * If we have, we just skip until we hit a new page that doesn't belong to
    1848             :  * current @eb_context.
    1849             :  *
    1850             :  * If not, we submit all the page(s) of the extent buffer.
    1851             :  *
    1852             :  * Return >0 if we have submitted the extent buffer successfully.
    1853             :  * Return 0 if we don't need to submit the page, as it's already submitted by
    1854             :  * previous call.
    1855             :  * Return <0 for fatal error.
    1856             :  */
    1857           0 : static int submit_eb_page(struct page *page, struct writeback_control *wbc,
    1858             :                           struct extent_buffer **eb_context)
    1859             : {
    1860           0 :         struct address_space *mapping = page->mapping;
    1861           0 :         struct btrfs_block_group *cache = NULL;
    1862           0 :         struct extent_buffer *eb;
    1863           0 :         int ret;
    1864             : 
    1865           0 :         if (!PagePrivate(page))
    1866             :                 return 0;
    1867             : 
    1868           0 :         if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
    1869           0 :                 return submit_eb_subpage(page, wbc);
    1870             : 
    1871           0 :         spin_lock(&mapping->private_lock);
    1872           0 :         if (!PagePrivate(page)) {
    1873           0 :                 spin_unlock(&mapping->private_lock);
    1874           0 :                 return 0;
    1875             :         }
    1876             : 
    1877           0 :         eb = (struct extent_buffer *)page->private;
    1878             : 
    1879             :         /*
    1880             :          * Shouldn't happen and normally this would be a BUG_ON but no point
    1881             :          * crashing the machine for something we can survive anyway.
    1882             :          */
    1883           0 :         if (WARN_ON(!eb)) {
    1884           0 :                 spin_unlock(&mapping->private_lock);
    1885           0 :                 return 0;
    1886             :         }
    1887             : 
    1888           0 :         if (eb == *eb_context) {
    1889           0 :                 spin_unlock(&mapping->private_lock);
    1890           0 :                 return 0;
    1891             :         }
    1892           0 :         ret = atomic_inc_not_zero(&eb->refs);
    1893           0 :         spin_unlock(&mapping->private_lock);
    1894           0 :         if (!ret)
    1895             :                 return 0;
    1896             : 
    1897           0 :         if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
    1898             :                 /*
    1899             :                  * If for_sync, this hole will be filled with
    1900             :                  * trasnsaction commit.
    1901             :                  */
    1902           0 :                 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
    1903             :                         ret = -EAGAIN;
    1904             :                 else
    1905           0 :                         ret = 0;
    1906           0 :                 free_extent_buffer(eb);
    1907           0 :                 return ret;
    1908             :         }
    1909             : 
    1910           0 :         *eb_context = eb;
    1911             : 
    1912           0 :         if (!lock_extent_buffer_for_io(eb, wbc)) {
    1913           0 :                 btrfs_revert_meta_write_pointer(cache, eb);
    1914           0 :                 if (cache)
    1915           0 :                         btrfs_put_block_group(cache);
    1916           0 :                 free_extent_buffer(eb);
    1917           0 :                 return 0;
    1918             :         }
    1919           0 :         if (cache) {
    1920             :                 /*
    1921             :                  * Implies write in zoned mode. Mark the last eb in a block group.
    1922             :                  */
    1923           0 :                 btrfs_schedule_zone_finish_bg(cache, eb);
    1924           0 :                 btrfs_put_block_group(cache);
    1925             :         }
    1926           0 :         write_one_eb(eb, wbc);
    1927           0 :         free_extent_buffer(eb);
    1928           0 :         return 1;
    1929             : }
    1930             : 
    1931           0 : int btree_write_cache_pages(struct address_space *mapping,
    1932             :                                    struct writeback_control *wbc)
    1933             : {
    1934           0 :         struct extent_buffer *eb_context = NULL;
    1935           0 :         struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
    1936           0 :         int ret = 0;
    1937           0 :         int done = 0;
    1938           0 :         int nr_to_write_done = 0;
    1939           0 :         struct folio_batch fbatch;
    1940           0 :         unsigned int nr_folios;
    1941           0 :         pgoff_t index;
    1942           0 :         pgoff_t end;            /* Inclusive */
    1943           0 :         int scanned = 0;
    1944           0 :         xa_mark_t tag;
    1945             : 
    1946           0 :         folio_batch_init(&fbatch);
    1947           0 :         if (wbc->range_cyclic) {
    1948           0 :                 index = mapping->writeback_index; /* Start from prev offset */
    1949           0 :                 end = -1;
    1950             :                 /*
    1951             :                  * Start from the beginning does not need to cycle over the
    1952             :                  * range, mark it as scanned.
    1953             :                  */
    1954           0 :                 scanned = (index == 0);
    1955             :         } else {
    1956           0 :                 index = wbc->range_start >> PAGE_SHIFT;
    1957           0 :                 end = wbc->range_end >> PAGE_SHIFT;
    1958           0 :                 scanned = 1;
    1959             :         }
    1960           0 :         if (wbc->sync_mode == WB_SYNC_ALL)
    1961             :                 tag = PAGECACHE_TAG_TOWRITE;
    1962             :         else
    1963           0 :                 tag = PAGECACHE_TAG_DIRTY;
    1964           0 :         btrfs_zoned_meta_io_lock(fs_info);
    1965           0 : retry:
    1966           0 :         if (wbc->sync_mode == WB_SYNC_ALL)
    1967           0 :                 tag_pages_for_writeback(mapping, index, end);
    1968           0 :         while (!done && !nr_to_write_done && (index <= end) &&
    1969           0 :                (nr_folios = filemap_get_folios_tag(mapping, &index, end,
    1970             :                                             tag, &fbatch))) {
    1971             :                 unsigned i;
    1972             : 
    1973           0 :                 for (i = 0; i < nr_folios; i++) {
    1974           0 :                         struct folio *folio = fbatch.folios[i];
    1975             : 
    1976           0 :                         ret = submit_eb_page(&folio->page, wbc, &eb_context);
    1977           0 :                         if (ret == 0)
    1978           0 :                                 continue;
    1979           0 :                         if (ret < 0) {
    1980             :                                 done = 1;
    1981             :                                 break;
    1982             :                         }
    1983             : 
    1984             :                         /*
    1985             :                          * the filesystem may choose to bump up nr_to_write.
    1986             :                          * We have to make sure to honor the new nr_to_write
    1987             :                          * at any time
    1988             :                          */
    1989           0 :                         nr_to_write_done = wbc->nr_to_write <= 0;
    1990             :                 }
    1991           0 :                 folio_batch_release(&fbatch);
    1992           0 :                 cond_resched();
    1993             :         }
    1994           0 :         if (!scanned && !done) {
    1995             :                 /*
    1996             :                  * We hit the last page and there is more work to be done: wrap
    1997             :                  * back to the start of the file
    1998             :                  */
    1999           0 :                 scanned = 1;
    2000           0 :                 index = 0;
    2001           0 :                 goto retry;
    2002             :         }
    2003             :         /*
    2004             :          * If something went wrong, don't allow any metadata write bio to be
    2005             :          * submitted.
    2006             :          *
    2007             :          * This would prevent use-after-free if we had dirty pages not
    2008             :          * cleaned up, which can still happen by fuzzed images.
    2009             :          *
    2010             :          * - Bad extent tree
    2011             :          *   Allowing existing tree block to be allocated for other trees.
    2012             :          *
    2013             :          * - Log tree operations
    2014             :          *   Exiting tree blocks get allocated to log tree, bumps its
    2015             :          *   generation, then get cleaned in tree re-balance.
    2016             :          *   Such tree block will not be written back, since it's clean,
    2017             :          *   thus no WRITTEN flag set.
    2018             :          *   And after log writes back, this tree block is not traced by
    2019             :          *   any dirty extent_io_tree.
    2020             :          *
    2021             :          * - Offending tree block gets re-dirtied from its original owner
    2022             :          *   Since it has bumped generation, no WRITTEN flag, it can be
    2023             :          *   reused without COWing. This tree block will not be traced
    2024             :          *   by btrfs_transaction::dirty_pages.
    2025             :          *
    2026             :          *   Now such dirty tree block will not be cleaned by any dirty
    2027             :          *   extent io tree. Thus we don't want to submit such wild eb
    2028             :          *   if the fs already has error.
    2029             :          *
    2030             :          * We can get ret > 0 from submit_extent_page() indicating how many ebs
    2031             :          * were submitted. Reset it to 0 to avoid false alerts for the caller.
    2032             :          */
    2033           0 :         if (ret > 0)
    2034             :                 ret = 0;
    2035           0 :         if (!ret && BTRFS_FS_ERROR(fs_info))
    2036           0 :                 ret = -EROFS;
    2037           0 :         btrfs_zoned_meta_io_unlock(fs_info);
    2038           0 :         return ret;
    2039             : }
    2040             : 
    2041             : /*
    2042             :  * Walk the list of dirty pages of the given address space and write all of them.
    2043             :  *
    2044             :  * @mapping:   address space structure to write
    2045             :  * @wbc:       subtract the number of written pages from *@wbc->nr_to_write
    2046             :  * @bio_ctrl:  holds context for the write, namely the bio
    2047             :  *
    2048             :  * If a page is already under I/O, write_cache_pages() skips it, even
    2049             :  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
    2050             :  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
    2051             :  * and msync() need to guarantee that all the data which was dirty at the time
    2052             :  * the call was made get new I/O started against them.  If wbc->sync_mode is
    2053             :  * WB_SYNC_ALL then we were called for data integrity and we must wait for
    2054             :  * existing IO to complete.
    2055             :  */
    2056           0 : static int extent_write_cache_pages(struct address_space *mapping,
    2057             :                              struct btrfs_bio_ctrl *bio_ctrl)
    2058             : {
    2059           0 :         struct writeback_control *wbc = bio_ctrl->wbc;
    2060           0 :         struct inode *inode = mapping->host;
    2061           0 :         int ret = 0;
    2062           0 :         int done = 0;
    2063           0 :         int nr_to_write_done = 0;
    2064           0 :         struct folio_batch fbatch;
    2065           0 :         unsigned int nr_folios;
    2066           0 :         pgoff_t index;
    2067           0 :         pgoff_t end;            /* Inclusive */
    2068           0 :         pgoff_t done_index;
    2069           0 :         int range_whole = 0;
    2070           0 :         int scanned = 0;
    2071           0 :         xa_mark_t tag;
    2072             : 
    2073             :         /*
    2074             :          * We have to hold onto the inode so that ordered extents can do their
    2075             :          * work when the IO finishes.  The alternative to this is failing to add
    2076             :          * an ordered extent if the igrab() fails there and that is a huge pain
    2077             :          * to deal with, so instead just hold onto the inode throughout the
    2078             :          * writepages operation.  If it fails here we are freeing up the inode
    2079             :          * anyway and we'd rather not waste our time writing out stuff that is
    2080             :          * going to be truncated anyway.
    2081             :          */
    2082           0 :         if (!igrab(inode))
    2083             :                 return 0;
    2084             : 
    2085           0 :         folio_batch_init(&fbatch);
    2086           0 :         if (wbc->range_cyclic) {
    2087           0 :                 index = mapping->writeback_index; /* Start from prev offset */
    2088           0 :                 end = -1;
    2089             :                 /*
    2090             :                  * Start from the beginning does not need to cycle over the
    2091             :                  * range, mark it as scanned.
    2092             :                  */
    2093           0 :                 scanned = (index == 0);
    2094             :         } else {
    2095           0 :                 index = wbc->range_start >> PAGE_SHIFT;
    2096           0 :                 end = wbc->range_end >> PAGE_SHIFT;
    2097           0 :                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
    2098           0 :                         range_whole = 1;
    2099             :                 scanned = 1;
    2100             :         }
    2101             : 
    2102             :         /*
    2103             :          * We do the tagged writepage as long as the snapshot flush bit is set
    2104             :          * and we are the first one who do the filemap_flush() on this inode.
    2105             :          *
    2106             :          * The nr_to_write == LONG_MAX is needed to make sure other flushers do
    2107             :          * not race in and drop the bit.
    2108             :          */
    2109           0 :         if (range_whole && wbc->nr_to_write == LONG_MAX &&
    2110             :             test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
    2111           0 :                                &BTRFS_I(inode)->runtime_flags))
    2112           0 :                 wbc->tagged_writepages = 1;
    2113             : 
    2114           0 :         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
    2115             :                 tag = PAGECACHE_TAG_TOWRITE;
    2116             :         else
    2117           0 :                 tag = PAGECACHE_TAG_DIRTY;
    2118           0 : retry:
    2119           0 :         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
    2120           0 :                 tag_pages_for_writeback(mapping, index, end);
    2121           0 :         done_index = index;
    2122           0 :         while (!done && !nr_to_write_done && (index <= end) &&
    2123           0 :                         (nr_folios = filemap_get_folios_tag(mapping, &index,
    2124             :                                                         end, tag, &fbatch))) {
    2125             :                 unsigned i;
    2126             : 
    2127           0 :                 for (i = 0; i < nr_folios; i++) {
    2128           0 :                         struct folio *folio = fbatch.folios[i];
    2129             : 
    2130           0 :                         done_index = folio->index + folio_nr_pages(folio);
    2131             :                         /*
    2132             :                          * At this point we hold neither the i_pages lock nor
    2133             :                          * the page lock: the page may be truncated or
    2134             :                          * invalidated (changing page->mapping to NULL),
    2135             :                          * or even swizzled back from swapper_space to
    2136             :                          * tmpfs file mapping
    2137             :                          */
    2138           0 :                         if (!folio_trylock(folio)) {
    2139           0 :                                 submit_write_bio(bio_ctrl, 0);
    2140           0 :                                 folio_lock(folio);
    2141             :                         }
    2142             : 
    2143           0 :                         if (unlikely(folio->mapping != mapping)) {
    2144           0 :                                 folio_unlock(folio);
    2145           0 :                                 continue;
    2146             :                         }
    2147             : 
    2148           0 :                         if (wbc->sync_mode != WB_SYNC_NONE) {
    2149           0 :                                 if (folio_test_writeback(folio))
    2150           0 :                                         submit_write_bio(bio_ctrl, 0);
    2151           0 :                                 folio_wait_writeback(folio);
    2152             :                         }
    2153             : 
    2154           0 :                         if (folio_test_writeback(folio) ||
    2155           0 :                             !folio_clear_dirty_for_io(folio)) {
    2156           0 :                                 folio_unlock(folio);
    2157           0 :                                 continue;
    2158             :                         }
    2159             : 
    2160           0 :                         ret = __extent_writepage(&folio->page, bio_ctrl);
    2161           0 :                         if (ret < 0) {
    2162             :                                 done = 1;
    2163             :                                 break;
    2164             :                         }
    2165             : 
    2166             :                         /*
    2167             :                          * the filesystem may choose to bump up nr_to_write.
    2168             :                          * We have to make sure to honor the new nr_to_write
    2169             :                          * at any time
    2170             :                          */
    2171           0 :                         nr_to_write_done = wbc->nr_to_write <= 0;
    2172             :                 }
    2173           0 :                 folio_batch_release(&fbatch);
    2174           0 :                 cond_resched();
    2175             :         }
    2176           0 :         if (!scanned && !done) {
    2177             :                 /*
    2178             :                  * We hit the last page and there is more work to be done: wrap
    2179             :                  * back to the start of the file
    2180             :                  */
    2181           0 :                 scanned = 1;
    2182           0 :                 index = 0;
    2183             : 
    2184             :                 /*
    2185             :                  * If we're looping we could run into a page that is locked by a
    2186             :                  * writer and that writer could be waiting on writeback for a
    2187             :                  * page in our current bio, and thus deadlock, so flush the
    2188             :                  * write bio here.
    2189             :                  */
    2190           0 :                 submit_write_bio(bio_ctrl, 0);
    2191           0 :                 goto retry;
    2192             :         }
    2193             : 
    2194           0 :         if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole))
    2195           0 :                 mapping->writeback_index = done_index;
    2196             : 
    2197           0 :         btrfs_add_delayed_iput(BTRFS_I(inode));
    2198           0 :         return ret;
    2199             : }
    2200             : 
    2201             : /*
    2202             :  * Submit the pages in the range to bio for call sites which delalloc range has
    2203             :  * already been ran (aka, ordered extent inserted) and all pages are still
    2204             :  * locked.
    2205             :  */
    2206           0 : int extent_write_locked_range(struct inode *inode, u64 start, u64 end,
    2207             :                               struct writeback_control *wbc)
    2208             : {
    2209           0 :         bool found_error = false;
    2210           0 :         int first_error = 0;
    2211           0 :         int ret = 0;
    2212           0 :         struct address_space *mapping = inode->i_mapping;
    2213           0 :         struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
    2214           0 :         const u32 sectorsize = fs_info->sectorsize;
    2215           0 :         loff_t i_size = i_size_read(inode);
    2216           0 :         u64 cur = start;
    2217           0 :         struct btrfs_bio_ctrl bio_ctrl = {
    2218             :                 .wbc = wbc,
    2219           0 :                 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
    2220             :         };
    2221             : 
    2222           0 :         if (wbc->no_cgroup_owner)
    2223           0 :                 bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT;
    2224             : 
    2225           0 :         ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize));
    2226             : 
    2227           0 :         while (cur <= end) {
    2228           0 :                 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end);
    2229           0 :                 struct page *page;
    2230           0 :                 int nr = 0;
    2231             : 
    2232           0 :                 page = find_get_page(mapping, cur >> PAGE_SHIFT);
    2233             :                 /*
    2234             :                  * All pages in the range are locked since
    2235             :                  * btrfs_run_delalloc_range(), thus there is no way to clear
    2236             :                  * the page dirty flag.
    2237             :                  */
    2238           0 :                 ASSERT(PageLocked(page));
    2239           0 :                 ASSERT(PageDirty(page));
    2240           0 :                 clear_page_dirty_for_io(page);
    2241             : 
    2242           0 :                 ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl,
    2243             :                                             i_size, &nr);
    2244           0 :                 if (ret == 1)
    2245           0 :                         goto next_page;
    2246             : 
    2247             :                 /* Make sure the mapping tag for page dirty gets cleared. */
    2248           0 :                 if (nr == 0) {
    2249           0 :                         set_page_writeback(page);
    2250           0 :                         end_page_writeback(page);
    2251             :                 }
    2252           0 :                 if (ret)
    2253           0 :                         end_extent_writepage(page, ret, cur, cur_end);
    2254           0 :                 btrfs_page_unlock_writer(fs_info, page, cur, cur_end + 1 - cur);
    2255           0 :                 if (ret < 0) {
    2256           0 :                         found_error = true;
    2257           0 :                         first_error = ret;
    2258             :                 }
    2259           0 : next_page:
    2260           0 :                 put_page(page);
    2261           0 :                 cur = cur_end + 1;
    2262             :         }
    2263             : 
    2264           0 :         submit_write_bio(&bio_ctrl, found_error ? ret : 0);
    2265             : 
    2266           0 :         if (found_error)
    2267           0 :                 return first_error;
    2268             :         return ret;
    2269             : }
    2270             : 
    2271           0 : int extent_writepages(struct address_space *mapping,
    2272             :                       struct writeback_control *wbc)
    2273             : {
    2274           0 :         struct inode *inode = mapping->host;
    2275           0 :         int ret = 0;
    2276           0 :         struct btrfs_bio_ctrl bio_ctrl = {
    2277             :                 .wbc = wbc,
    2278           0 :                 .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc),
    2279             :         };
    2280             : 
    2281             :         /*
    2282             :          * Allow only a single thread to do the reloc work in zoned mode to
    2283             :          * protect the write pointer updates.
    2284             :          */
    2285           0 :         btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
    2286           0 :         ret = extent_write_cache_pages(mapping, &bio_ctrl);
    2287           0 :         submit_write_bio(&bio_ctrl, ret);
    2288           0 :         btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
    2289           0 :         return ret;
    2290             : }
    2291             : 
    2292           0 : void extent_readahead(struct readahead_control *rac)
    2293             : {
    2294           0 :         struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ | REQ_RAHEAD };
    2295           0 :         struct page *pagepool[16];
    2296           0 :         struct extent_map *em_cached = NULL;
    2297           0 :         u64 prev_em_start = (u64)-1;
    2298           0 :         int nr;
    2299             : 
    2300           0 :         while ((nr = readahead_page_batch(rac, pagepool))) {
    2301           0 :                 u64 contig_start = readahead_pos(rac);
    2302           0 :                 u64 contig_end = contig_start + readahead_batch_length(rac) - 1;
    2303             : 
    2304           0 :                 contiguous_readpages(pagepool, nr, contig_start, contig_end,
    2305             :                                 &em_cached, &bio_ctrl, &prev_em_start);
    2306             :         }
    2307             : 
    2308           0 :         if (em_cached)
    2309           0 :                 free_extent_map(em_cached);
    2310           0 :         submit_one_bio(&bio_ctrl);
    2311           0 : }
    2312             : 
    2313             : /*
    2314             :  * basic invalidate_folio code, this waits on any locked or writeback
    2315             :  * ranges corresponding to the folio, and then deletes any extent state
    2316             :  * records from the tree
    2317             :  */
    2318           0 : int extent_invalidate_folio(struct extent_io_tree *tree,
    2319             :                           struct folio *folio, size_t offset)
    2320             : {
    2321           0 :         struct extent_state *cached_state = NULL;
    2322           0 :         u64 start = folio_pos(folio);
    2323           0 :         u64 end = start + folio_size(folio) - 1;
    2324           0 :         size_t blocksize = folio->mapping->host->i_sb->s_blocksize;
    2325             : 
    2326             :         /* This function is only called for the btree inode */
    2327           0 :         ASSERT(tree->owner == IO_TREE_BTREE_INODE_IO);
    2328             : 
    2329           0 :         start += ALIGN(offset, blocksize);
    2330           0 :         if (start > end)
    2331             :                 return 0;
    2332             : 
    2333           0 :         lock_extent(tree, start, end, &cached_state);
    2334           0 :         folio_wait_writeback(folio);
    2335             : 
    2336             :         /*
    2337             :          * Currently for btree io tree, only EXTENT_LOCKED is utilized,
    2338             :          * so here we only need to unlock the extent range to free any
    2339             :          * existing extent state.
    2340             :          */
    2341           0 :         unlock_extent(tree, start, end, &cached_state);
    2342           0 :         return 0;
    2343             : }
    2344             : 
    2345             : /*
    2346             :  * a helper for release_folio, this tests for areas of the page that
    2347             :  * are locked or under IO and drops the related state bits if it is safe
    2348             :  * to drop the page.
    2349             :  */
    2350           0 : static int try_release_extent_state(struct extent_io_tree *tree,
    2351             :                                     struct page *page, gfp_t mask)
    2352             : {
    2353           0 :         u64 start = page_offset(page);
    2354           0 :         u64 end = start + PAGE_SIZE - 1;
    2355           0 :         int ret = 1;
    2356             : 
    2357           0 :         if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) {
    2358             :                 ret = 0;
    2359             :         } else {
    2360           0 :                 u32 clear_bits = ~(EXTENT_LOCKED | EXTENT_NODATASUM |
    2361             :                                    EXTENT_DELALLOC_NEW | EXTENT_CTLBITS);
    2362             : 
    2363             :                 /*
    2364             :                  * At this point we can safely clear everything except the
    2365             :                  * locked bit, the nodatasum bit and the delalloc new bit.
    2366             :                  * The delalloc new bit will be cleared by ordered extent
    2367             :                  * completion.
    2368             :                  */
    2369           0 :                 ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL);
    2370             : 
    2371             :                 /* if clear_extent_bit failed for enomem reasons,
    2372             :                  * we can't allow the release to continue.
    2373             :                  */
    2374           0 :                 if (ret < 0)
    2375             :                         ret = 0;
    2376             :                 else
    2377           0 :                         ret = 1;
    2378             :         }
    2379           0 :         return ret;
    2380             : }
    2381             : 
    2382             : /*
    2383             :  * a helper for release_folio.  As long as there are no locked extents
    2384             :  * in the range corresponding to the page, both state records and extent
    2385             :  * map records are removed
    2386             :  */
    2387           0 : int try_release_extent_mapping(struct page *page, gfp_t mask)
    2388             : {
    2389           0 :         struct extent_map *em;
    2390           0 :         u64 start = page_offset(page);
    2391           0 :         u64 end = start + PAGE_SIZE - 1;
    2392           0 :         struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
    2393           0 :         struct extent_io_tree *tree = &btrfs_inode->io_tree;
    2394           0 :         struct extent_map_tree *map = &btrfs_inode->extent_tree;
    2395             : 
    2396           0 :         if (gfpflags_allow_blocking(mask) &&
    2397           0 :             page->mapping->host->i_size > SZ_16M) {
    2398             :                 u64 len;
    2399           0 :                 while (start <= end) {
    2400           0 :                         struct btrfs_fs_info *fs_info;
    2401           0 :                         u64 cur_gen;
    2402             : 
    2403           0 :                         len = end - start + 1;
    2404           0 :                         write_lock(&map->lock);
    2405           0 :                         em = lookup_extent_mapping(map, start, len);
    2406           0 :                         if (!em) {
    2407           0 :                                 write_unlock(&map->lock);
    2408           0 :                                 break;
    2409             :                         }
    2410           0 :                         if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
    2411           0 :                             em->start != start) {
    2412           0 :                                 write_unlock(&map->lock);
    2413           0 :                                 free_extent_map(em);
    2414           0 :                                 break;
    2415             :                         }
    2416           0 :                         if (test_range_bit(tree, em->start,
    2417             :                                            extent_map_end(em) - 1,
    2418             :                                            EXTENT_LOCKED, 0, NULL))
    2419           0 :                                 goto next;
    2420             :                         /*
    2421             :                          * If it's not in the list of modified extents, used
    2422             :                          * by a fast fsync, we can remove it. If it's being
    2423             :                          * logged we can safely remove it since fsync took an
    2424             :                          * extra reference on the em.
    2425             :                          */
    2426           0 :                         if (list_empty(&em->list) ||
    2427           0 :                             test_bit(EXTENT_FLAG_LOGGING, &em->flags))
    2428           0 :                                 goto remove_em;
    2429             :                         /*
    2430             :                          * If it's in the list of modified extents, remove it
    2431             :                          * only if its generation is older then the current one,
    2432             :                          * in which case we don't need it for a fast fsync.
    2433             :                          * Otherwise don't remove it, we could be racing with an
    2434             :                          * ongoing fast fsync that could miss the new extent.
    2435             :                          */
    2436           0 :                         fs_info = btrfs_inode->root->fs_info;
    2437           0 :                         spin_lock(&fs_info->trans_lock);
    2438           0 :                         cur_gen = fs_info->generation;
    2439           0 :                         spin_unlock(&fs_info->trans_lock);
    2440           0 :                         if (em->generation >= cur_gen)
    2441           0 :                                 goto next;
    2442           0 : remove_em:
    2443             :                         /*
    2444             :                          * We only remove extent maps that are not in the list of
    2445             :                          * modified extents or that are in the list but with a
    2446             :                          * generation lower then the current generation, so there
    2447             :                          * is no need to set the full fsync flag on the inode (it
    2448             :                          * hurts the fsync performance for workloads with a data
    2449             :                          * size that exceeds or is close to the system's memory).
    2450             :                          */
    2451           0 :                         remove_extent_mapping(map, em);
    2452             :                         /* once for the rb tree */
    2453           0 :                         free_extent_map(em);
    2454           0 : next:
    2455           0 :                         start = extent_map_end(em);
    2456           0 :                         write_unlock(&map->lock);
    2457             : 
    2458             :                         /* once for us */
    2459           0 :                         free_extent_map(em);
    2460             : 
    2461           0 :                         cond_resched(); /* Allow large-extent preemption. */
    2462             :                 }
    2463             :         }
    2464           0 :         return try_release_extent_state(tree, page, mask);
    2465             : }
    2466             : 
    2467             : /*
    2468             :  * To cache previous fiemap extent
    2469             :  *
    2470             :  * Will be used for merging fiemap extent
    2471             :  */
    2472             : struct fiemap_cache {
    2473             :         u64 offset;
    2474             :         u64 phys;
    2475             :         u64 len;
    2476             :         u32 flags;
    2477             :         bool cached;
    2478             : };
    2479             : 
    2480             : /*
    2481             :  * Helper to submit fiemap extent.
    2482             :  *
    2483             :  * Will try to merge current fiemap extent specified by @offset, @phys,
    2484             :  * @len and @flags with cached one.
    2485             :  * And only when we fails to merge, cached one will be submitted as
    2486             :  * fiemap extent.
    2487             :  *
    2488             :  * Return value is the same as fiemap_fill_next_extent().
    2489             :  */
    2490           0 : static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
    2491             :                                 struct fiemap_cache *cache,
    2492             :                                 u64 offset, u64 phys, u64 len, u32 flags)
    2493             : {
    2494           0 :         int ret = 0;
    2495             : 
    2496             :         /* Set at the end of extent_fiemap(). */
    2497           0 :         ASSERT((flags & FIEMAP_EXTENT_LAST) == 0);
    2498             : 
    2499           0 :         if (!cache->cached)
    2500           0 :                 goto assign;
    2501             : 
    2502             :         /*
    2503             :          * Sanity check, extent_fiemap() should have ensured that new
    2504             :          * fiemap extent won't overlap with cached one.
    2505             :          * Not recoverable.
    2506             :          *
    2507             :          * NOTE: Physical address can overlap, due to compression
    2508             :          */
    2509           0 :         if (cache->offset + cache->len > offset) {
    2510           0 :                 WARN_ON(1);
    2511           0 :                 return -EINVAL;
    2512             :         }
    2513             : 
    2514             :         /*
    2515             :          * Only merges fiemap extents if
    2516             :          * 1) Their logical addresses are continuous
    2517             :          *
    2518             :          * 2) Their physical addresses are continuous
    2519             :          *    So truly compressed (physical size smaller than logical size)
    2520             :          *    extents won't get merged with each other
    2521             :          *
    2522             :          * 3) Share same flags
    2523             :          */
    2524           0 :         if (cache->offset + cache->len  == offset &&
    2525           0 :             cache->phys + cache->len == phys  &&
    2526           0 :             cache->flags == flags) {
    2527           0 :                 cache->len += len;
    2528           0 :                 return 0;
    2529             :         }
    2530             : 
    2531             :         /* Not mergeable, need to submit cached one */
    2532           0 :         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
    2533             :                                       cache->len, cache->flags);
    2534           0 :         cache->cached = false;
    2535           0 :         if (ret)
    2536             :                 return ret;
    2537           0 : assign:
    2538           0 :         cache->cached = true;
    2539           0 :         cache->offset = offset;
    2540           0 :         cache->phys = phys;
    2541           0 :         cache->len = len;
    2542           0 :         cache->flags = flags;
    2543             : 
    2544           0 :         return 0;
    2545             : }
    2546             : 
    2547             : /*
    2548             :  * Emit last fiemap cache
    2549             :  *
    2550             :  * The last fiemap cache may still be cached in the following case:
    2551             :  * 0                  4k                    8k
    2552             :  * |<- Fiemap range ->|
    2553             :  * |<------------  First extent ----------->|
    2554             :  *
    2555             :  * In this case, the first extent range will be cached but not emitted.
    2556             :  * So we must emit it before ending extent_fiemap().
    2557             :  */
    2558           0 : static int emit_last_fiemap_cache(struct fiemap_extent_info *fieinfo,
    2559             :                                   struct fiemap_cache *cache)
    2560             : {
    2561           0 :         int ret;
    2562             : 
    2563           0 :         if (!cache->cached)
    2564             :                 return 0;
    2565             : 
    2566           0 :         ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys,
    2567             :                                       cache->len, cache->flags);
    2568           0 :         cache->cached = false;
    2569           0 :         if (ret > 0)
    2570             :                 ret = 0;
    2571             :         return ret;
    2572             : }
    2573             : 
    2574           0 : static int fiemap_next_leaf_item(struct btrfs_inode *inode, struct btrfs_path *path)
    2575             : {
    2576           0 :         struct extent_buffer *clone;
    2577           0 :         struct btrfs_key key;
    2578           0 :         int slot;
    2579           0 :         int ret;
    2580             : 
    2581           0 :         path->slots[0]++;
    2582           0 :         if (path->slots[0] < btrfs_header_nritems(path->nodes[0]))
    2583             :                 return 0;
    2584             : 
    2585           0 :         ret = btrfs_next_leaf(inode->root, path);
    2586           0 :         if (ret != 0)
    2587             :                 return ret;
    2588             : 
    2589             :         /*
    2590             :          * Don't bother with cloning if there are no more file extent items for
    2591             :          * our inode.
    2592             :          */
    2593           0 :         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
    2594           0 :         if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY)
    2595             :                 return 1;
    2596             : 
    2597             :         /* See the comment at fiemap_search_slot() about why we clone. */
    2598           0 :         clone = btrfs_clone_extent_buffer(path->nodes[0]);
    2599           0 :         if (!clone)
    2600             :                 return -ENOMEM;
    2601             : 
    2602           0 :         slot = path->slots[0];
    2603           0 :         btrfs_release_path(path);
    2604           0 :         path->nodes[0] = clone;
    2605           0 :         path->slots[0] = slot;
    2606             : 
    2607           0 :         return 0;
    2608             : }
    2609             : 
    2610             : /*
    2611             :  * Search for the first file extent item that starts at a given file offset or
    2612             :  * the one that starts immediately before that offset.
    2613             :  * Returns: 0 on success, < 0 on error, 1 if not found.
    2614             :  */
    2615           0 : static int fiemap_search_slot(struct btrfs_inode *inode, struct btrfs_path *path,
    2616             :                               u64 file_offset)
    2617             : {
    2618           0 :         const u64 ino = btrfs_ino(inode);
    2619           0 :         struct btrfs_root *root = inode->root;
    2620           0 :         struct extent_buffer *clone;
    2621           0 :         struct btrfs_key key;
    2622           0 :         int slot;
    2623           0 :         int ret;
    2624             : 
    2625           0 :         key.objectid = ino;
    2626           0 :         key.type = BTRFS_EXTENT_DATA_KEY;
    2627           0 :         key.offset = file_offset;
    2628             : 
    2629           0 :         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
    2630           0 :         if (ret < 0)
    2631             :                 return ret;
    2632             : 
    2633           0 :         if (ret > 0 && path->slots[0] > 0) {
    2634           0 :                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
    2635           0 :                 if (key.objectid == ino && key.type == BTRFS_EXTENT_DATA_KEY)
    2636           0 :                         path->slots[0]--;
    2637             :         }
    2638             : 
    2639           0 :         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
    2640           0 :                 ret = btrfs_next_leaf(root, path);
    2641           0 :                 if (ret != 0)
    2642             :                         return ret;
    2643             : 
    2644           0 :                 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
    2645           0 :                 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
    2646             :                         return 1;
    2647             :         }
    2648             : 
    2649             :         /*
    2650             :          * We clone the leaf and use it during fiemap. This is because while
    2651             :          * using the leaf we do expensive things like checking if an extent is
    2652             :          * shared, which can take a long time. In order to prevent blocking
    2653             :          * other tasks for too long, we use a clone of the leaf. We have locked
    2654             :          * the file range in the inode's io tree, so we know none of our file
    2655             :          * extent items can change. This way we avoid blocking other tasks that
    2656             :          * want to insert items for other inodes in the same leaf or b+tree
    2657             :          * rebalance operations (triggered for example when someone is trying
    2658             :          * to push items into this leaf when trying to insert an item in a
    2659             :          * neighbour leaf).
    2660             :          * We also need the private clone because holding a read lock on an
    2661             :          * extent buffer of the subvolume's b+tree will make lockdep unhappy
    2662             :          * when we call fiemap_fill_next_extent(), because that may cause a page
    2663             :          * fault when filling the user space buffer with fiemap data.
    2664             :          */
    2665           0 :         clone = btrfs_clone_extent_buffer(path->nodes[0]);
    2666           0 :         if (!clone)
    2667             :                 return -ENOMEM;
    2668             : 
    2669           0 :         slot = path->slots[0];
    2670           0 :         btrfs_release_path(path);
    2671           0 :         path->nodes[0] = clone;
    2672           0 :         path->slots[0] = slot;
    2673             : 
    2674           0 :         return 0;
    2675             : }
    2676             : 
    2677             : /*
    2678             :  * Process a range which is a hole or a prealloc extent in the inode's subvolume
    2679             :  * btree. If @disk_bytenr is 0, we are dealing with a hole, otherwise a prealloc
    2680             :  * extent. The end offset (@end) is inclusive.
    2681             :  */
    2682           0 : static int fiemap_process_hole(struct btrfs_inode *inode,
    2683             :                                struct fiemap_extent_info *fieinfo,
    2684             :                                struct fiemap_cache *cache,
    2685             :                                struct extent_state **delalloc_cached_state,
    2686             :                                struct btrfs_backref_share_check_ctx *backref_ctx,
    2687             :                                u64 disk_bytenr, u64 extent_offset,
    2688             :                                u64 extent_gen,
    2689             :                                u64 start, u64 end)
    2690             : {
    2691           0 :         const u64 i_size = i_size_read(&inode->vfs_inode);
    2692           0 :         u64 cur_offset = start;
    2693           0 :         u64 last_delalloc_end = 0;
    2694           0 :         u32 prealloc_flags = FIEMAP_EXTENT_UNWRITTEN;
    2695           0 :         bool checked_extent_shared = false;
    2696           0 :         int ret;
    2697             : 
    2698             :         /*
    2699             :          * There can be no delalloc past i_size, so don't waste time looking for
    2700             :          * it beyond i_size.
    2701             :          */
    2702           0 :         while (cur_offset < end && cur_offset < i_size) {
    2703           0 :                 u64 delalloc_start;
    2704           0 :                 u64 delalloc_end;
    2705           0 :                 u64 prealloc_start;
    2706           0 :                 u64 prealloc_len = 0;
    2707           0 :                 bool delalloc;
    2708             : 
    2709           0 :                 delalloc = btrfs_find_delalloc_in_range(inode, cur_offset, end,
    2710             :                                                         delalloc_cached_state,
    2711             :                                                         &delalloc_start,
    2712             :                                                         &delalloc_end);
    2713           0 :                 if (!delalloc)
    2714             :                         break;
    2715             : 
    2716             :                 /*
    2717             :                  * If this is a prealloc extent we have to report every section
    2718             :                  * of it that has no delalloc.
    2719             :                  */
    2720           0 :                 if (disk_bytenr != 0) {
    2721           0 :                         if (last_delalloc_end == 0) {
    2722           0 :                                 prealloc_start = start;
    2723           0 :                                 prealloc_len = delalloc_start - start;
    2724             :                         } else {
    2725           0 :                                 prealloc_start = last_delalloc_end + 1;
    2726           0 :                                 prealloc_len = delalloc_start - prealloc_start;
    2727             :                         }
    2728             :                 }
    2729             : 
    2730           0 :                 if (prealloc_len > 0) {
    2731           0 :                         if (!checked_extent_shared && fieinfo->fi_extents_max) {
    2732           0 :                                 ret = btrfs_is_data_extent_shared(inode,
    2733             :                                                                   disk_bytenr,
    2734             :                                                                   extent_gen,
    2735             :                                                                   backref_ctx);
    2736           0 :                                 if (ret < 0)
    2737           0 :                                         return ret;
    2738           0 :                                 else if (ret > 0)
    2739           0 :                                         prealloc_flags |= FIEMAP_EXTENT_SHARED;
    2740             : 
    2741             :                                 checked_extent_shared = true;
    2742             :                         }
    2743           0 :                         ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
    2744             :                                                  disk_bytenr + extent_offset,
    2745             :                                                  prealloc_len, prealloc_flags);
    2746           0 :                         if (ret)
    2747           0 :                                 return ret;
    2748           0 :                         extent_offset += prealloc_len;
    2749             :                 }
    2750             : 
    2751           0 :                 ret = emit_fiemap_extent(fieinfo, cache, delalloc_start, 0,
    2752           0 :                                          delalloc_end + 1 - delalloc_start,
    2753             :                                          FIEMAP_EXTENT_DELALLOC |
    2754             :                                          FIEMAP_EXTENT_UNKNOWN);
    2755           0 :                 if (ret)
    2756           0 :                         return ret;
    2757             : 
    2758           0 :                 last_delalloc_end = delalloc_end;
    2759           0 :                 cur_offset = delalloc_end + 1;
    2760           0 :                 extent_offset += cur_offset - delalloc_start;
    2761           0 :                 cond_resched();
    2762             :         }
    2763             : 
    2764             :         /*
    2765             :          * Either we found no delalloc for the whole prealloc extent or we have
    2766             :          * a prealloc extent that spans i_size or starts at or after i_size.
    2767             :          */
    2768           0 :         if (disk_bytenr != 0 && last_delalloc_end < end) {
    2769           0 :                 u64 prealloc_start;
    2770           0 :                 u64 prealloc_len;
    2771             : 
    2772           0 :                 if (last_delalloc_end == 0) {
    2773           0 :                         prealloc_start = start;
    2774           0 :                         prealloc_len = end + 1 - start;
    2775             :                 } else {
    2776           0 :                         prealloc_start = last_delalloc_end + 1;
    2777           0 :                         prealloc_len = end + 1 - prealloc_start;
    2778             :                 }
    2779             : 
    2780           0 :                 if (!checked_extent_shared && fieinfo->fi_extents_max) {
    2781           0 :                         ret = btrfs_is_data_extent_shared(inode,
    2782             :                                                           disk_bytenr,
    2783             :                                                           extent_gen,
    2784             :                                                           backref_ctx);
    2785           0 :                         if (ret < 0)
    2786             :                                 return ret;
    2787           0 :                         else if (ret > 0)
    2788           0 :                                 prealloc_flags |= FIEMAP_EXTENT_SHARED;
    2789             :                 }
    2790           0 :                 ret = emit_fiemap_extent(fieinfo, cache, prealloc_start,
    2791             :                                          disk_bytenr + extent_offset,
    2792             :                                          prealloc_len, prealloc_flags);
    2793           0 :                 if (ret)
    2794           0 :                         return ret;
    2795             :         }
    2796             : 
    2797             :         return 0;
    2798             : }
    2799             : 
    2800           0 : static int fiemap_find_last_extent_offset(struct btrfs_inode *inode,
    2801             :                                           struct btrfs_path *path,
    2802             :                                           u64 *last_extent_end_ret)
    2803             : {
    2804           0 :         const u64 ino = btrfs_ino(inode);
    2805           0 :         struct btrfs_root *root = inode->root;
    2806           0 :         struct extent_buffer *leaf;
    2807           0 :         struct btrfs_file_extent_item *ei;
    2808           0 :         struct btrfs_key key;
    2809           0 :         u64 disk_bytenr;
    2810           0 :         int ret;
    2811             : 
    2812             :         /*
    2813             :          * Lookup the last file extent. We're not using i_size here because
    2814             :          * there might be preallocation past i_size.
    2815             :          */
    2816           0 :         ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0);
    2817             :         /* There can't be a file extent item at offset (u64)-1 */
    2818           0 :         ASSERT(ret != 0);
    2819           0 :         if (ret < 0)
    2820             :                 return ret;
    2821             : 
    2822             :         /*
    2823             :          * For a non-existing key, btrfs_search_slot() always leaves us at a
    2824             :          * slot > 0, except if the btree is empty, which is impossible because
    2825             :          * at least it has the inode item for this inode and all the items for
    2826             :          * the root inode 256.
    2827             :          */
    2828           0 :         ASSERT(path->slots[0] > 0);
    2829           0 :         path->slots[0]--;
    2830           0 :         leaf = path->nodes[0];
    2831           0 :         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    2832           0 :         if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
    2833             :                 /* No file extent items in the subvolume tree. */
    2834           0 :                 *last_extent_end_ret = 0;
    2835           0 :                 return 0;
    2836             :         }
    2837             : 
    2838             :         /*
    2839             :          * For an inline extent, the disk_bytenr is where inline data starts at,
    2840             :          * so first check if we have an inline extent item before checking if we
    2841             :          * have an implicit hole (disk_bytenr == 0).
    2842             :          */
    2843           0 :         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item);
    2844           0 :         if (btrfs_file_extent_type(leaf, ei) == BTRFS_FILE_EXTENT_INLINE) {
    2845           0 :                 *last_extent_end_ret = btrfs_file_extent_end(path);
    2846           0 :                 return 0;
    2847             :         }
    2848             : 
    2849             :         /*
    2850             :          * Find the last file extent item that is not a hole (when NO_HOLES is
    2851             :          * not enabled). This should take at most 2 iterations in the worst
    2852             :          * case: we have one hole file extent item at slot 0 of a leaf and
    2853             :          * another hole file extent item as the last item in the previous leaf.
    2854             :          * This is because we merge file extent items that represent holes.
    2855             :          */
    2856           0 :         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
    2857           0 :         while (disk_bytenr == 0) {
    2858           0 :                 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
    2859           0 :                 if (ret < 0) {
    2860           0 :                         return ret;
    2861           0 :                 } else if (ret > 0) {
    2862             :                         /* No file extent items that are not holes. */
    2863           0 :                         *last_extent_end_ret = 0;
    2864           0 :                         return 0;
    2865             :                 }
    2866           0 :                 leaf = path->nodes[0];
    2867           0 :                 ei = btrfs_item_ptr(leaf, path->slots[0],
    2868             :                                     struct btrfs_file_extent_item);
    2869           0 :                 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
    2870             :         }
    2871             : 
    2872           0 :         *last_extent_end_ret = btrfs_file_extent_end(path);
    2873           0 :         return 0;
    2874             : }
    2875             : 
    2876           0 : int extent_fiemap(struct btrfs_inode *inode, struct fiemap_extent_info *fieinfo,
    2877             :                   u64 start, u64 len)
    2878             : {
    2879           0 :         const u64 ino = btrfs_ino(inode);
    2880           0 :         struct extent_state *cached_state = NULL;
    2881           0 :         struct extent_state *delalloc_cached_state = NULL;
    2882           0 :         struct btrfs_path *path;
    2883           0 :         struct fiemap_cache cache = { 0 };
    2884           0 :         struct btrfs_backref_share_check_ctx *backref_ctx;
    2885           0 :         u64 last_extent_end;
    2886           0 :         u64 prev_extent_end;
    2887           0 :         u64 lockstart;
    2888           0 :         u64 lockend;
    2889           0 :         bool stopped = false;
    2890           0 :         int ret;
    2891             : 
    2892           0 :         backref_ctx = btrfs_alloc_backref_share_check_ctx();
    2893           0 :         path = btrfs_alloc_path();
    2894           0 :         if (!backref_ctx || !path) {
    2895           0 :                 ret = -ENOMEM;
    2896           0 :                 goto out;
    2897             :         }
    2898             : 
    2899           0 :         lockstart = round_down(start, inode->root->fs_info->sectorsize);
    2900           0 :         lockend = round_up(start + len, inode->root->fs_info->sectorsize);
    2901           0 :         prev_extent_end = lockstart;
    2902             : 
    2903           0 :         btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
    2904           0 :         lock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
    2905             : 
    2906           0 :         ret = fiemap_find_last_extent_offset(inode, path, &last_extent_end);
    2907           0 :         if (ret < 0)
    2908           0 :                 goto out_unlock;
    2909           0 :         btrfs_release_path(path);
    2910             : 
    2911           0 :         path->reada = READA_FORWARD;
    2912           0 :         ret = fiemap_search_slot(inode, path, lockstart);
    2913           0 :         if (ret < 0) {
    2914           0 :                 goto out_unlock;
    2915           0 :         } else if (ret > 0) {
    2916             :                 /*
    2917             :                  * No file extent item found, but we may have delalloc between
    2918             :                  * the current offset and i_size. So check for that.
    2919             :                  */
    2920           0 :                 ret = 0;
    2921           0 :                 goto check_eof_delalloc;
    2922             :         }
    2923             : 
    2924           0 :         while (prev_extent_end < lockend) {
    2925           0 :                 struct extent_buffer *leaf = path->nodes[0];
    2926           0 :                 struct btrfs_file_extent_item *ei;
    2927           0 :                 struct btrfs_key key;
    2928           0 :                 u64 extent_end;
    2929           0 :                 u64 extent_len;
    2930           0 :                 u64 extent_offset = 0;
    2931           0 :                 u64 extent_gen;
    2932           0 :                 u64 disk_bytenr = 0;
    2933           0 :                 u64 flags = 0;
    2934           0 :                 int extent_type;
    2935           0 :                 u8 compression;
    2936             : 
    2937           0 :                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
    2938           0 :                 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY)
    2939             :                         break;
    2940             : 
    2941           0 :                 extent_end = btrfs_file_extent_end(path);
    2942             : 
    2943             :                 /*
    2944             :                  * The first iteration can leave us at an extent item that ends
    2945             :                  * before our range's start. Move to the next item.
    2946             :                  */
    2947           0 :                 if (extent_end <= lockstart)
    2948           0 :                         goto next_item;
    2949             : 
    2950           0 :                 backref_ctx->curr_leaf_bytenr = leaf->start;
    2951             : 
    2952             :                 /* We have in implicit hole (NO_HOLES feature enabled). */
    2953           0 :                 if (prev_extent_end < key.offset) {
    2954           0 :                         const u64 range_end = min(key.offset, lockend) - 1;
    2955             : 
    2956           0 :                         ret = fiemap_process_hole(inode, fieinfo, &cache,
    2957             :                                                   &delalloc_cached_state,
    2958             :                                                   backref_ctx, 0, 0, 0,
    2959             :                                                   prev_extent_end, range_end);
    2960           0 :                         if (ret < 0) {
    2961           0 :                                 goto out_unlock;
    2962           0 :                         } else if (ret > 0) {
    2963             :                                 /* fiemap_fill_next_extent() told us to stop. */
    2964             :                                 stopped = true;
    2965             :                                 break;
    2966             :                         }
    2967             : 
    2968             :                         /* We've reached the end of the fiemap range, stop. */
    2969           0 :                         if (key.offset >= lockend) {
    2970             :                                 stopped = true;
    2971             :                                 break;
    2972             :                         }
    2973             :                 }
    2974             : 
    2975           0 :                 extent_len = extent_end - key.offset;
    2976           0 :                 ei = btrfs_item_ptr(leaf, path->slots[0],
    2977             :                                     struct btrfs_file_extent_item);
    2978           0 :                 compression = btrfs_file_extent_compression(leaf, ei);
    2979           0 :                 extent_type = btrfs_file_extent_type(leaf, ei);
    2980           0 :                 extent_gen = btrfs_file_extent_generation(leaf, ei);
    2981             : 
    2982           0 :                 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
    2983           0 :                         disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, ei);
    2984           0 :                         if (compression == BTRFS_COMPRESS_NONE)
    2985           0 :                                 extent_offset = btrfs_file_extent_offset(leaf, ei);
    2986             :                 }
    2987             : 
    2988           0 :                 if (compression != BTRFS_COMPRESS_NONE)
    2989           0 :                         flags |= FIEMAP_EXTENT_ENCODED;
    2990             : 
    2991           0 :                 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
    2992           0 :                         flags |= FIEMAP_EXTENT_DATA_INLINE;
    2993           0 :                         flags |= FIEMAP_EXTENT_NOT_ALIGNED;
    2994           0 :                         ret = emit_fiemap_extent(fieinfo, &cache, key.offset, 0,
    2995             :                                                  extent_len, flags);
    2996           0 :                 } else if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
    2997           0 :                         ret = fiemap_process_hole(inode, fieinfo, &cache,
    2998             :                                                   &delalloc_cached_state,
    2999             :                                                   backref_ctx,
    3000             :                                                   disk_bytenr, extent_offset,
    3001             :                                                   extent_gen, key.offset,
    3002             :                                                   extent_end - 1);
    3003           0 :                 } else if (disk_bytenr == 0) {
    3004             :                         /* We have an explicit hole. */
    3005           0 :                         ret = fiemap_process_hole(inode, fieinfo, &cache,
    3006             :                                                   &delalloc_cached_state,
    3007             :                                                   backref_ctx, 0, 0, 0,
    3008             :                                                   key.offset, extent_end - 1);
    3009             :                 } else {
    3010             :                         /* We have a regular extent. */
    3011           0 :                         if (fieinfo->fi_extents_max) {
    3012           0 :                                 ret = btrfs_is_data_extent_shared(inode,
    3013             :                                                                   disk_bytenr,
    3014             :                                                                   extent_gen,
    3015             :                                                                   backref_ctx);
    3016           0 :                                 if (ret < 0)
    3017           0 :                                         goto out_unlock;
    3018           0 :                                 else if (ret > 0)
    3019           0 :                                         flags |= FIEMAP_EXTENT_SHARED;
    3020             :                         }
    3021             : 
    3022           0 :                         ret = emit_fiemap_extent(fieinfo, &cache, key.offset,
    3023             :                                                  disk_bytenr + extent_offset,
    3024             :                                                  extent_len, flags);
    3025             :                 }
    3026             : 
    3027           0 :                 if (ret < 0) {
    3028           0 :                         goto out_unlock;
    3029           0 :                 } else if (ret > 0) {
    3030             :                         /* fiemap_fill_next_extent() told us to stop. */
    3031             :                         stopped = true;
    3032             :                         break;
    3033             :                 }
    3034             : 
    3035             :                 prev_extent_end = extent_end;
    3036           0 : next_item:
    3037           0 :                 if (fatal_signal_pending(current)) {
    3038           0 :                         ret = -EINTR;
    3039           0 :                         goto out_unlock;
    3040             :                 }
    3041             : 
    3042           0 :                 ret = fiemap_next_leaf_item(inode, path);
    3043           0 :                 if (ret < 0) {
    3044           0 :                         goto out_unlock;
    3045           0 :                 } else if (ret > 0) {
    3046             :                         /* No more file extent items for this inode. */
    3047             :                         break;
    3048             :                 }
    3049           0 :                 cond_resched();
    3050             :         }
    3051             : 
    3052           0 : check_eof_delalloc:
    3053             :         /*
    3054             :          * Release (and free) the path before emitting any final entries to
    3055             :          * fiemap_fill_next_extent() to keep lockdep happy. This is because
    3056             :          * once we find no more file extent items exist, we may have a
    3057             :          * non-cloned leaf, and fiemap_fill_next_extent() can trigger page
    3058             :          * faults when copying data to the user space buffer.
    3059             :          */
    3060           0 :         btrfs_free_path(path);
    3061           0 :         path = NULL;
    3062             : 
    3063           0 :         if (!stopped && prev_extent_end < lockend) {
    3064           0 :                 ret = fiemap_process_hole(inode, fieinfo, &cache,
    3065             :                                           &delalloc_cached_state, backref_ctx,
    3066             :                                           0, 0, 0, prev_extent_end, lockend - 1);
    3067           0 :                 if (ret < 0)
    3068           0 :                         goto out_unlock;
    3069             :                 prev_extent_end = lockend;
    3070             :         }
    3071             : 
    3072           0 :         if (cache.cached && cache.offset + cache.len >= last_extent_end) {
    3073           0 :                 const u64 i_size = i_size_read(&inode->vfs_inode);
    3074             : 
    3075           0 :                 if (prev_extent_end < i_size) {
    3076           0 :                         u64 delalloc_start;
    3077           0 :                         u64 delalloc_end;
    3078           0 :                         bool delalloc;
    3079             : 
    3080           0 :                         delalloc = btrfs_find_delalloc_in_range(inode,
    3081             :                                                                 prev_extent_end,
    3082             :                                                                 i_size - 1,
    3083             :                                                                 &delalloc_cached_state,
    3084             :                                                                 &delalloc_start,
    3085             :                                                                 &delalloc_end);
    3086           0 :                         if (!delalloc)
    3087           0 :                                 cache.flags |= FIEMAP_EXTENT_LAST;
    3088             :                 } else {
    3089           0 :                         cache.flags |= FIEMAP_EXTENT_LAST;
    3090             :                 }
    3091             :         }
    3092             : 
    3093           0 :         ret = emit_last_fiemap_cache(fieinfo, &cache);
    3094             : 
    3095           0 : out_unlock:
    3096           0 :         unlock_extent(&inode->io_tree, lockstart, lockend, &cached_state);
    3097           0 :         btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
    3098           0 : out:
    3099           0 :         free_extent_state(delalloc_cached_state);
    3100           0 :         btrfs_free_backref_share_ctx(backref_ctx);
    3101           0 :         btrfs_free_path(path);
    3102           0 :         return ret;
    3103             : }
    3104             : 
    3105             : static void __free_extent_buffer(struct extent_buffer *eb)
    3106             : {
    3107           0 :         kmem_cache_free(extent_buffer_cache, eb);
    3108             : }
    3109             : 
    3110           0 : static int extent_buffer_under_io(const struct extent_buffer *eb)
    3111             : {
    3112           0 :         return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
    3113           0 :                 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
    3114             : }
    3115             : 
    3116             : static bool page_range_has_eb(struct btrfs_fs_info *fs_info, struct page *page)
    3117             : {
    3118           0 :         struct btrfs_subpage *subpage;
    3119             : 
    3120           0 :         lockdep_assert_held(&page->mapping->private_lock);
    3121             : 
    3122           0 :         if (PagePrivate(page)) {
    3123           0 :                 subpage = (struct btrfs_subpage *)page->private;
    3124           0 :                 if (atomic_read(&subpage->eb_refs))
    3125             :                         return true;
    3126             :                 /*
    3127             :                  * Even there is no eb refs here, we may still have
    3128             :                  * end_page_read() call relying on page::private.
    3129             :                  */
    3130           0 :                 if (atomic_read(&subpage->readers))
    3131             :                         return true;
    3132             :         }
    3133             :         return false;
    3134             : }
    3135             : 
    3136           0 : static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page)
    3137             : {
    3138           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    3139           0 :         const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
    3140             : 
    3141             :         /*
    3142             :          * For mapped eb, we're going to change the page private, which should
    3143             :          * be done under the private_lock.
    3144             :          */
    3145           0 :         if (mapped)
    3146           0 :                 spin_lock(&page->mapping->private_lock);
    3147             : 
    3148           0 :         if (!PagePrivate(page)) {
    3149           0 :                 if (mapped)
    3150           0 :                         spin_unlock(&page->mapping->private_lock);
    3151           0 :                 return;
    3152             :         }
    3153             : 
    3154           0 :         if (fs_info->nodesize >= PAGE_SIZE) {
    3155             :                 /*
    3156             :                  * We do this since we'll remove the pages after we've
    3157             :                  * removed the eb from the radix tree, so we could race
    3158             :                  * and have this page now attached to the new eb.  So
    3159             :                  * only clear page_private if it's still connected to
    3160             :                  * this eb.
    3161             :                  */
    3162           0 :                 if (PagePrivate(page) &&
    3163           0 :                     page->private == (unsigned long)eb) {
    3164           0 :                         BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
    3165           0 :                         BUG_ON(PageDirty(page));
    3166           0 :                         BUG_ON(PageWriteback(page));
    3167             :                         /*
    3168             :                          * We need to make sure we haven't be attached
    3169             :                          * to a new eb.
    3170             :                          */
    3171           0 :                         detach_page_private(page);
    3172             :                 }
    3173           0 :                 if (mapped)
    3174           0 :                         spin_unlock(&page->mapping->private_lock);
    3175           0 :                 return;
    3176             :         }
    3177             : 
    3178             :         /*
    3179             :          * For subpage, we can have dummy eb with page private.  In this case,
    3180             :          * we can directly detach the private as such page is only attached to
    3181             :          * one dummy eb, no sharing.
    3182             :          */
    3183           0 :         if (!mapped) {
    3184           0 :                 btrfs_detach_subpage(fs_info, page);
    3185           0 :                 return;
    3186             :         }
    3187             : 
    3188           0 :         btrfs_page_dec_eb_refs(fs_info, page);
    3189             : 
    3190             :         /*
    3191             :          * We can only detach the page private if there are no other ebs in the
    3192             :          * page range and no unfinished IO.
    3193             :          */
    3194           0 :         if (!page_range_has_eb(fs_info, page))
    3195           0 :                 btrfs_detach_subpage(fs_info, page);
    3196             : 
    3197           0 :         spin_unlock(&page->mapping->private_lock);
    3198             : }
    3199             : 
    3200             : /* Release all pages attached to the extent buffer */
    3201           0 : static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb)
    3202             : {
    3203           0 :         int i;
    3204           0 :         int num_pages;
    3205             : 
    3206           0 :         ASSERT(!extent_buffer_under_io(eb));
    3207             : 
    3208           0 :         num_pages = num_extent_pages(eb);
    3209           0 :         for (i = 0; i < num_pages; i++) {
    3210           0 :                 struct page *page = eb->pages[i];
    3211             : 
    3212           0 :                 if (!page)
    3213           0 :                         continue;
    3214             : 
    3215           0 :                 detach_extent_buffer_page(eb, page);
    3216             : 
    3217             :                 /* One for when we allocated the page */
    3218           0 :                 put_page(page);
    3219             :         }
    3220           0 : }
    3221             : 
    3222             : /*
    3223             :  * Helper for releasing the extent buffer.
    3224             :  */
    3225           0 : static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
    3226             : {
    3227           0 :         btrfs_release_extent_buffer_pages(eb);
    3228           0 :         btrfs_leak_debug_del_eb(eb);
    3229           0 :         __free_extent_buffer(eb);
    3230           0 : }
    3231             : 
    3232             : static struct extent_buffer *
    3233           0 : __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
    3234             :                       unsigned long len)
    3235             : {
    3236           0 :         struct extent_buffer *eb = NULL;
    3237             : 
    3238           0 :         eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL);
    3239           0 :         eb->start = start;
    3240           0 :         eb->len = len;
    3241           0 :         eb->fs_info = fs_info;
    3242           0 :         init_rwsem(&eb->lock);
    3243             : 
    3244           0 :         btrfs_leak_debug_add_eb(eb);
    3245             : 
    3246           0 :         spin_lock_init(&eb->refs_lock);
    3247           0 :         atomic_set(&eb->refs, 1);
    3248             : 
    3249           0 :         ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE);
    3250             : 
    3251           0 :         return eb;
    3252             : }
    3253             : 
    3254           0 : struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
    3255             : {
    3256           0 :         int i;
    3257           0 :         struct extent_buffer *new;
    3258           0 :         int num_pages = num_extent_pages(src);
    3259           0 :         int ret;
    3260             : 
    3261           0 :         new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
    3262           0 :         if (new == NULL)
    3263             :                 return NULL;
    3264             : 
    3265             :         /*
    3266             :          * Set UNMAPPED before calling btrfs_release_extent_buffer(), as
    3267             :          * btrfs_release_extent_buffer() have different behavior for
    3268             :          * UNMAPPED subpage extent buffer.
    3269             :          */
    3270           0 :         set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags);
    3271             : 
    3272           0 :         ret = btrfs_alloc_page_array(num_pages, new->pages);
    3273           0 :         if (ret) {
    3274           0 :                 btrfs_release_extent_buffer(new);
    3275           0 :                 return NULL;
    3276             :         }
    3277             : 
    3278           0 :         for (i = 0; i < num_pages; i++) {
    3279           0 :                 int ret;
    3280           0 :                 struct page *p = new->pages[i];
    3281             : 
    3282           0 :                 ret = attach_extent_buffer_page(new, p, NULL);
    3283           0 :                 if (ret < 0) {
    3284           0 :                         btrfs_release_extent_buffer(new);
    3285           0 :                         return NULL;
    3286             :                 }
    3287           0 :                 WARN_ON(PageDirty(p));
    3288           0 :                 copy_page(page_address(p), page_address(src->pages[i]));
    3289             :         }
    3290           0 :         set_extent_buffer_uptodate(new);
    3291             : 
    3292           0 :         return new;
    3293             : }
    3294             : 
    3295           0 : struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
    3296             :                                                   u64 start, unsigned long len)
    3297             : {
    3298           0 :         struct extent_buffer *eb;
    3299           0 :         int num_pages;
    3300           0 :         int i;
    3301           0 :         int ret;
    3302             : 
    3303           0 :         eb = __alloc_extent_buffer(fs_info, start, len);
    3304           0 :         if (!eb)
    3305             :                 return NULL;
    3306             : 
    3307           0 :         num_pages = num_extent_pages(eb);
    3308           0 :         ret = btrfs_alloc_page_array(num_pages, eb->pages);
    3309           0 :         if (ret)
    3310           0 :                 goto err;
    3311             : 
    3312           0 :         for (i = 0; i < num_pages; i++) {
    3313           0 :                 struct page *p = eb->pages[i];
    3314             : 
    3315           0 :                 ret = attach_extent_buffer_page(eb, p, NULL);
    3316           0 :                 if (ret < 0)
    3317           0 :                         goto err;
    3318             :         }
    3319             : 
    3320           0 :         set_extent_buffer_uptodate(eb);
    3321           0 :         btrfs_set_header_nritems(eb, 0);
    3322           0 :         set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags);
    3323             : 
    3324             :         return eb;
    3325             : err:
    3326           0 :         for (i = 0; i < num_pages; i++) {
    3327           0 :                 if (eb->pages[i]) {
    3328           0 :                         detach_extent_buffer_page(eb, eb->pages[i]);
    3329           0 :                         __free_page(eb->pages[i]);
    3330             :                 }
    3331             :         }
    3332           0 :         __free_extent_buffer(eb);
    3333           0 :         return NULL;
    3334             : }
    3335             : 
    3336           0 : struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
    3337             :                                                 u64 start)
    3338             : {
    3339           0 :         return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
    3340             : }
    3341             : 
    3342           0 : static void check_buffer_tree_ref(struct extent_buffer *eb)
    3343             : {
    3344           0 :         int refs;
    3345             :         /*
    3346             :          * The TREE_REF bit is first set when the extent_buffer is added
    3347             :          * to the radix tree. It is also reset, if unset, when a new reference
    3348             :          * is created by find_extent_buffer.
    3349             :          *
    3350             :          * It is only cleared in two cases: freeing the last non-tree
    3351             :          * reference to the extent_buffer when its STALE bit is set or
    3352             :          * calling release_folio when the tree reference is the only reference.
    3353             :          *
    3354             :          * In both cases, care is taken to ensure that the extent_buffer's
    3355             :          * pages are not under io. However, release_folio can be concurrently
    3356             :          * called with creating new references, which is prone to race
    3357             :          * conditions between the calls to check_buffer_tree_ref in those
    3358             :          * codepaths and clearing TREE_REF in try_release_extent_buffer.
    3359             :          *
    3360             :          * The actual lifetime of the extent_buffer in the radix tree is
    3361             :          * adequately protected by the refcount, but the TREE_REF bit and
    3362             :          * its corresponding reference are not. To protect against this
    3363             :          * class of races, we call check_buffer_tree_ref from the codepaths
    3364             :          * which trigger io. Note that once io is initiated, TREE_REF can no
    3365             :          * longer be cleared, so that is the moment at which any such race is
    3366             :          * best fixed.
    3367             :          */
    3368           0 :         refs = atomic_read(&eb->refs);
    3369           0 :         if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
    3370             :                 return;
    3371             : 
    3372           0 :         spin_lock(&eb->refs_lock);
    3373           0 :         if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
    3374           0 :                 atomic_inc(&eb->refs);
    3375           0 :         spin_unlock(&eb->refs_lock);
    3376             : }
    3377             : 
    3378           0 : static void mark_extent_buffer_accessed(struct extent_buffer *eb,
    3379             :                 struct page *accessed)
    3380             : {
    3381           0 :         int num_pages, i;
    3382             : 
    3383           0 :         check_buffer_tree_ref(eb);
    3384             : 
    3385           0 :         num_pages = num_extent_pages(eb);
    3386           0 :         for (i = 0; i < num_pages; i++) {
    3387           0 :                 struct page *p = eb->pages[i];
    3388             : 
    3389           0 :                 if (p != accessed)
    3390           0 :                         mark_page_accessed(p);
    3391             :         }
    3392           0 : }
    3393             : 
    3394           0 : struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
    3395             :                                          u64 start)
    3396             : {
    3397           0 :         struct extent_buffer *eb;
    3398             : 
    3399           0 :         eb = find_extent_buffer_nolock(fs_info, start);
    3400           0 :         if (!eb)
    3401             :                 return NULL;
    3402             :         /*
    3403             :          * Lock our eb's refs_lock to avoid races with free_extent_buffer().
    3404             :          * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and
    3405             :          * another task running free_extent_buffer() might have seen that flag
    3406             :          * set, eb->refs == 2, that the buffer isn't under IO (dirty and
    3407             :          * writeback flags not set) and it's still in the tree (flag
    3408             :          * EXTENT_BUFFER_TREE_REF set), therefore being in the process of
    3409             :          * decrementing the extent buffer's reference count twice.  So here we
    3410             :          * could race and increment the eb's reference count, clear its stale
    3411             :          * flag, mark it as dirty and drop our reference before the other task
    3412             :          * finishes executing free_extent_buffer, which would later result in
    3413             :          * an attempt to free an extent buffer that is dirty.
    3414             :          */
    3415           0 :         if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
    3416           0 :                 spin_lock(&eb->refs_lock);
    3417           0 :                 spin_unlock(&eb->refs_lock);
    3418             :         }
    3419           0 :         mark_extent_buffer_accessed(eb, NULL);
    3420           0 :         return eb;
    3421             : }
    3422             : 
    3423             : #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
    3424             : struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
    3425             :                                         u64 start)
    3426             : {
    3427             :         struct extent_buffer *eb, *exists = NULL;
    3428             :         int ret;
    3429             : 
    3430             :         eb = find_extent_buffer(fs_info, start);
    3431             :         if (eb)
    3432             :                 return eb;
    3433             :         eb = alloc_dummy_extent_buffer(fs_info, start);
    3434             :         if (!eb)
    3435             :                 return ERR_PTR(-ENOMEM);
    3436             :         eb->fs_info = fs_info;
    3437             : again:
    3438             :         ret = radix_tree_preload(GFP_NOFS);
    3439             :         if (ret) {
    3440             :                 exists = ERR_PTR(ret);
    3441             :                 goto free_eb;
    3442             :         }
    3443             :         spin_lock(&fs_info->buffer_lock);
    3444             :         ret = radix_tree_insert(&fs_info->buffer_radix,
    3445             :                                 start >> fs_info->sectorsize_bits, eb);
    3446             :         spin_unlock(&fs_info->buffer_lock);
    3447             :         radix_tree_preload_end();
    3448             :         if (ret == -EEXIST) {
    3449             :                 exists = find_extent_buffer(fs_info, start);
    3450             :                 if (exists)
    3451             :                         goto free_eb;
    3452             :                 else
    3453             :                         goto again;
    3454             :         }
    3455             :         check_buffer_tree_ref(eb);
    3456             :         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
    3457             : 
    3458             :         return eb;
    3459             : free_eb:
    3460             :         btrfs_release_extent_buffer(eb);
    3461             :         return exists;
    3462             : }
    3463             : #endif
    3464             : 
    3465           0 : static struct extent_buffer *grab_extent_buffer(
    3466             :                 struct btrfs_fs_info *fs_info, struct page *page)
    3467             : {
    3468           0 :         struct extent_buffer *exists;
    3469             : 
    3470             :         /*
    3471             :          * For subpage case, we completely rely on radix tree to ensure we
    3472             :          * don't try to insert two ebs for the same bytenr.  So here we always
    3473             :          * return NULL and just continue.
    3474             :          */
    3475           0 :         if (fs_info->nodesize < PAGE_SIZE)
    3476             :                 return NULL;
    3477             : 
    3478             :         /* Page not yet attached to an extent buffer */
    3479           0 :         if (!PagePrivate(page))
    3480             :                 return NULL;
    3481             : 
    3482             :         /*
    3483             :          * We could have already allocated an eb for this page and attached one
    3484             :          * so lets see if we can get a ref on the existing eb, and if we can we
    3485             :          * know it's good and we can just return that one, else we know we can
    3486             :          * just overwrite page->private.
    3487             :          */
    3488           0 :         exists = (struct extent_buffer *)page->private;
    3489           0 :         if (atomic_inc_not_zero(&exists->refs))
    3490             :                 return exists;
    3491             : 
    3492           0 :         WARN_ON(PageDirty(page));
    3493           0 :         detach_page_private(page);
    3494           0 :         return NULL;
    3495             : }
    3496             : 
    3497           0 : static int check_eb_alignment(struct btrfs_fs_info *fs_info, u64 start)
    3498             : {
    3499           0 :         if (!IS_ALIGNED(start, fs_info->sectorsize)) {
    3500           0 :                 btrfs_err(fs_info, "bad tree block start %llu", start);
    3501           0 :                 return -EINVAL;
    3502             :         }
    3503             : 
    3504           0 :         if (fs_info->nodesize < PAGE_SIZE &&
    3505           0 :             offset_in_page(start) + fs_info->nodesize > PAGE_SIZE) {
    3506           0 :                 btrfs_err(fs_info,
    3507             :                 "tree block crosses page boundary, start %llu nodesize %u",
    3508             :                           start, fs_info->nodesize);
    3509           0 :                 return -EINVAL;
    3510             :         }
    3511           0 :         if (fs_info->nodesize >= PAGE_SIZE &&
    3512           0 :             !PAGE_ALIGNED(start)) {
    3513           0 :                 btrfs_err(fs_info,
    3514             :                 "tree block is not page aligned, start %llu nodesize %u",
    3515             :                           start, fs_info->nodesize);
    3516           0 :                 return -EINVAL;
    3517             :         }
    3518             :         return 0;
    3519             : }
    3520             : 
    3521           0 : struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
    3522             :                                           u64 start, u64 owner_root, int level)
    3523             : {
    3524           0 :         unsigned long len = fs_info->nodesize;
    3525           0 :         int num_pages;
    3526           0 :         int i;
    3527           0 :         unsigned long index = start >> PAGE_SHIFT;
    3528           0 :         struct extent_buffer *eb;
    3529           0 :         struct extent_buffer *exists = NULL;
    3530           0 :         struct page *p;
    3531           0 :         struct address_space *mapping = fs_info->btree_inode->i_mapping;
    3532           0 :         u64 lockdep_owner = owner_root;
    3533           0 :         int uptodate = 1;
    3534           0 :         int ret;
    3535             : 
    3536           0 :         if (check_eb_alignment(fs_info, start))
    3537             :                 return ERR_PTR(-EINVAL);
    3538             : 
    3539             : #if BITS_PER_LONG == 32
    3540             :         if (start >= MAX_LFS_FILESIZE) {
    3541             :                 btrfs_err_rl(fs_info,
    3542             :                 "extent buffer %llu is beyond 32bit page cache limit", start);
    3543             :                 btrfs_err_32bit_limit(fs_info);
    3544             :                 return ERR_PTR(-EOVERFLOW);
    3545             :         }
    3546             :         if (start >= BTRFS_32BIT_EARLY_WARN_THRESHOLD)
    3547             :                 btrfs_warn_32bit_limit(fs_info);
    3548             : #endif
    3549             : 
    3550           0 :         eb = find_extent_buffer(fs_info, start);
    3551           0 :         if (eb)
    3552             :                 return eb;
    3553             : 
    3554           0 :         eb = __alloc_extent_buffer(fs_info, start, len);
    3555           0 :         if (!eb)
    3556             :                 return ERR_PTR(-ENOMEM);
    3557             : 
    3558             :         /*
    3559             :          * The reloc trees are just snapshots, so we need them to appear to be
    3560             :          * just like any other fs tree WRT lockdep.
    3561             :          */
    3562           0 :         if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID)
    3563             :                 lockdep_owner = BTRFS_FS_TREE_OBJECTID;
    3564             : 
    3565           0 :         btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level);
    3566             : 
    3567           0 :         num_pages = num_extent_pages(eb);
    3568           0 :         for (i = 0; i < num_pages; i++, index++) {
    3569           0 :                 struct btrfs_subpage *prealloc = NULL;
    3570             : 
    3571           0 :                 p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
    3572           0 :                 if (!p) {
    3573           0 :                         exists = ERR_PTR(-ENOMEM);
    3574           0 :                         goto free_eb;
    3575             :                 }
    3576             : 
    3577             :                 /*
    3578             :                  * Preallocate page->private for subpage case, so that we won't
    3579             :                  * allocate memory with private_lock hold.  The memory will be
    3580             :                  * freed by attach_extent_buffer_page() or freed manually if
    3581             :                  * we exit earlier.
    3582             :                  *
    3583             :                  * Although we have ensured one subpage eb can only have one
    3584             :                  * page, but it may change in the future for 16K page size
    3585             :                  * support, so we still preallocate the memory in the loop.
    3586             :                  */
    3587           0 :                 if (fs_info->nodesize < PAGE_SIZE) {
    3588           0 :                         prealloc = btrfs_alloc_subpage(fs_info, BTRFS_SUBPAGE_METADATA);
    3589           0 :                         if (IS_ERR(prealloc)) {
    3590           0 :                                 ret = PTR_ERR(prealloc);
    3591           0 :                                 unlock_page(p);
    3592           0 :                                 put_page(p);
    3593           0 :                                 exists = ERR_PTR(ret);
    3594           0 :                                 goto free_eb;
    3595             :                         }
    3596             :                 }
    3597             : 
    3598           0 :                 spin_lock(&mapping->private_lock);
    3599           0 :                 exists = grab_extent_buffer(fs_info, p);
    3600           0 :                 if (exists) {
    3601           0 :                         spin_unlock(&mapping->private_lock);
    3602           0 :                         unlock_page(p);
    3603           0 :                         put_page(p);
    3604           0 :                         mark_extent_buffer_accessed(exists, p);
    3605           0 :                         btrfs_free_subpage(prealloc);
    3606           0 :                         goto free_eb;
    3607             :                 }
    3608             :                 /* Should not fail, as we have preallocated the memory */
    3609           0 :                 ret = attach_extent_buffer_page(eb, p, prealloc);
    3610           0 :                 ASSERT(!ret);
    3611             :                 /*
    3612             :                  * To inform we have extra eb under allocation, so that
    3613             :                  * detach_extent_buffer_page() won't release the page private
    3614             :                  * when the eb hasn't yet been inserted into radix tree.
    3615             :                  *
    3616             :                  * The ref will be decreased when the eb released the page, in
    3617             :                  * detach_extent_buffer_page().
    3618             :                  * Thus needs no special handling in error path.
    3619             :                  */
    3620           0 :                 btrfs_page_inc_eb_refs(fs_info, p);
    3621           0 :                 spin_unlock(&mapping->private_lock);
    3622             : 
    3623           0 :                 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len));
    3624           0 :                 eb->pages[i] = p;
    3625           0 :                 if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len))
    3626           0 :                         uptodate = 0;
    3627             : 
    3628             :                 /*
    3629             :                  * We can't unlock the pages just yet since the extent buffer
    3630             :                  * hasn't been properly inserted in the radix tree, this
    3631             :                  * opens a race with btree_release_folio which can free a page
    3632             :                  * while we are still filling in all pages for the buffer and
    3633             :                  * we could crash.
    3634             :                  */
    3635             :         }
    3636           0 :         if (uptodate)
    3637           0 :                 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
    3638           0 : again:
    3639           0 :         ret = radix_tree_preload(GFP_NOFS);
    3640           0 :         if (ret) {
    3641           0 :                 exists = ERR_PTR(ret);
    3642           0 :                 goto free_eb;
    3643             :         }
    3644             : 
    3645           0 :         spin_lock(&fs_info->buffer_lock);
    3646           0 :         ret = radix_tree_insert(&fs_info->buffer_radix,
    3647           0 :                                 start >> fs_info->sectorsize_bits, eb);
    3648           0 :         spin_unlock(&fs_info->buffer_lock);
    3649           0 :         radix_tree_preload_end();
    3650           0 :         if (ret == -EEXIST) {
    3651           0 :                 exists = find_extent_buffer(fs_info, start);
    3652           0 :                 if (exists)
    3653           0 :                         goto free_eb;
    3654             :                 else
    3655           0 :                         goto again;
    3656             :         }
    3657             :         /* add one reference for the tree */
    3658           0 :         check_buffer_tree_ref(eb);
    3659           0 :         set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags);
    3660             : 
    3661             :         /*
    3662             :          * Now it's safe to unlock the pages because any calls to
    3663             :          * btree_release_folio will correctly detect that a page belongs to a
    3664             :          * live buffer and won't free them prematurely.
    3665             :          */
    3666           0 :         for (i = 0; i < num_pages; i++)
    3667           0 :                 unlock_page(eb->pages[i]);
    3668             :         return eb;
    3669             : 
    3670           0 : free_eb:
    3671           0 :         WARN_ON(!atomic_dec_and_test(&eb->refs));
    3672           0 :         for (i = 0; i < num_pages; i++) {
    3673           0 :                 if (eb->pages[i])
    3674           0 :                         unlock_page(eb->pages[i]);
    3675             :         }
    3676             : 
    3677           0 :         btrfs_release_extent_buffer(eb);
    3678           0 :         return exists;
    3679             : }
    3680             : 
    3681           0 : static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head)
    3682             : {
    3683           0 :         struct extent_buffer *eb =
    3684           0 :                         container_of(head, struct extent_buffer, rcu_head);
    3685             : 
    3686           0 :         __free_extent_buffer(eb);
    3687           0 : }
    3688             : 
    3689           0 : static int release_extent_buffer(struct extent_buffer *eb)
    3690             :         __releases(&eb->refs_lock)
    3691             : {
    3692           0 :         lockdep_assert_held(&eb->refs_lock);
    3693             : 
    3694           0 :         WARN_ON(atomic_read(&eb->refs) == 0);
    3695           0 :         if (atomic_dec_and_test(&eb->refs)) {
    3696           0 :                 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) {
    3697           0 :                         struct btrfs_fs_info *fs_info = eb->fs_info;
    3698             : 
    3699           0 :                         spin_unlock(&eb->refs_lock);
    3700             : 
    3701           0 :                         spin_lock(&fs_info->buffer_lock);
    3702           0 :                         radix_tree_delete(&fs_info->buffer_radix,
    3703           0 :                                           eb->start >> fs_info->sectorsize_bits);
    3704           0 :                         spin_unlock(&fs_info->buffer_lock);
    3705             :                 } else {
    3706           0 :                         spin_unlock(&eb->refs_lock);
    3707             :                 }
    3708             : 
    3709           0 :                 btrfs_leak_debug_del_eb(eb);
    3710             :                 /* Should be safe to release our pages at this point */
    3711           0 :                 btrfs_release_extent_buffer_pages(eb);
    3712             : #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
    3713             :                 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) {
    3714             :                         __free_extent_buffer(eb);
    3715             :                         return 1;
    3716             :                 }
    3717             : #endif
    3718           0 :                 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu);
    3719           0 :                 return 1;
    3720             :         }
    3721           0 :         spin_unlock(&eb->refs_lock);
    3722             : 
    3723           0 :         return 0;
    3724             : }
    3725             : 
    3726           0 : void free_extent_buffer(struct extent_buffer *eb)
    3727             : {
    3728           0 :         int refs;
    3729           0 :         if (!eb)
    3730             :                 return;
    3731             : 
    3732           0 :         refs = atomic_read(&eb->refs);
    3733           0 :         while (1) {
    3734           0 :                 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3)
    3735           0 :                     || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) &&
    3736             :                         refs == 1))
    3737             :                         break;
    3738           0 :                 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1))
    3739             :                         return;
    3740             :         }
    3741             : 
    3742           0 :         spin_lock(&eb->refs_lock);
    3743           0 :         if (atomic_read(&eb->refs) == 2 &&
    3744           0 :             test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
    3745           0 :             !extent_buffer_under_io(eb) &&
    3746             :             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
    3747           0 :                 atomic_dec(&eb->refs);
    3748             : 
    3749             :         /*
    3750             :          * I know this is terrible, but it's temporary until we stop tracking
    3751             :          * the uptodate bits and such for the extent buffers.
    3752             :          */
    3753           0 :         release_extent_buffer(eb);
    3754             : }
    3755             : 
    3756           0 : void free_extent_buffer_stale(struct extent_buffer *eb)
    3757             : {
    3758           0 :         if (!eb)
    3759             :                 return;
    3760             : 
    3761           0 :         spin_lock(&eb->refs_lock);
    3762           0 :         set_bit(EXTENT_BUFFER_STALE, &eb->bflags);
    3763             : 
    3764           0 :         if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) &&
    3765             :             test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
    3766           0 :                 atomic_dec(&eb->refs);
    3767           0 :         release_extent_buffer(eb);
    3768             : }
    3769             : 
    3770           0 : static void btree_clear_page_dirty(struct page *page)
    3771             : {
    3772           0 :         ASSERT(PageDirty(page));
    3773           0 :         ASSERT(PageLocked(page));
    3774           0 :         clear_page_dirty_for_io(page);
    3775           0 :         xa_lock_irq(&page->mapping->i_pages);
    3776           0 :         if (!PageDirty(page))
    3777           0 :                 __xa_clear_mark(&page->mapping->i_pages,
    3778             :                                 page_index(page), PAGECACHE_TAG_DIRTY);
    3779           0 :         xa_unlock_irq(&page->mapping->i_pages);
    3780           0 : }
    3781             : 
    3782           0 : static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
    3783             : {
    3784           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    3785           0 :         struct page *page = eb->pages[0];
    3786           0 :         bool last;
    3787             : 
    3788             :         /* btree_clear_page_dirty() needs page locked */
    3789           0 :         lock_page(page);
    3790           0 :         last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start,
    3791           0 :                                                   eb->len);
    3792           0 :         if (last)
    3793           0 :                 btree_clear_page_dirty(page);
    3794           0 :         unlock_page(page);
    3795           0 :         WARN_ON(atomic_read(&eb->refs) == 0);
    3796           0 : }
    3797             : 
    3798           0 : void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
    3799             :                               struct extent_buffer *eb)
    3800             : {
    3801           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    3802           0 :         int i;
    3803           0 :         int num_pages;
    3804           0 :         struct page *page;
    3805             : 
    3806           0 :         btrfs_assert_tree_write_locked(eb);
    3807             : 
    3808           0 :         if (trans && btrfs_header_generation(eb) != trans->transid)
    3809             :                 return;
    3810             : 
    3811           0 :         if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags))
    3812             :                 return;
    3813             : 
    3814           0 :         percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
    3815             :                                  fs_info->dirty_metadata_batch);
    3816             : 
    3817           0 :         if (eb->fs_info->nodesize < PAGE_SIZE)
    3818           0 :                 return clear_subpage_extent_buffer_dirty(eb);
    3819             : 
    3820           0 :         num_pages = num_extent_pages(eb);
    3821             : 
    3822           0 :         for (i = 0; i < num_pages; i++) {
    3823           0 :                 page = eb->pages[i];
    3824           0 :                 if (!PageDirty(page))
    3825           0 :                         continue;
    3826           0 :                 lock_page(page);
    3827           0 :                 btree_clear_page_dirty(page);
    3828           0 :                 unlock_page(page);
    3829             :         }
    3830           0 :         WARN_ON(atomic_read(&eb->refs) == 0);
    3831             : }
    3832             : 
    3833           0 : void set_extent_buffer_dirty(struct extent_buffer *eb)
    3834             : {
    3835           0 :         int i;
    3836           0 :         int num_pages;
    3837           0 :         bool was_dirty;
    3838             : 
    3839           0 :         check_buffer_tree_ref(eb);
    3840             : 
    3841           0 :         was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
    3842             : 
    3843           0 :         num_pages = num_extent_pages(eb);
    3844           0 :         WARN_ON(atomic_read(&eb->refs) == 0);
    3845           0 :         WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
    3846             : 
    3847           0 :         if (!was_dirty) {
    3848           0 :                 bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
    3849             : 
    3850             :                 /*
    3851             :                  * For subpage case, we can have other extent buffers in the
    3852             :                  * same page, and in clear_subpage_extent_buffer_dirty() we
    3853             :                  * have to clear page dirty without subpage lock held.
    3854             :                  * This can cause race where our page gets dirty cleared after
    3855             :                  * we just set it.
    3856             :                  *
    3857             :                  * Thankfully, clear_subpage_extent_buffer_dirty() has locked
    3858             :                  * its page for other reasons, we can use page lock to prevent
    3859             :                  * the above race.
    3860             :                  */
    3861           0 :                 if (subpage)
    3862           0 :                         lock_page(eb->pages[0]);
    3863           0 :                 for (i = 0; i < num_pages; i++)
    3864           0 :                         btrfs_page_set_dirty(eb->fs_info, eb->pages[i],
    3865           0 :                                              eb->start, eb->len);
    3866           0 :                 if (subpage)
    3867           0 :                         unlock_page(eb->pages[0]);
    3868           0 :                 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes,
    3869           0 :                                          eb->len,
    3870             :                                          eb->fs_info->dirty_metadata_batch);
    3871             :         }
    3872             : #ifdef CONFIG_BTRFS_DEBUG
    3873             :         for (i = 0; i < num_pages; i++)
    3874             :                 ASSERT(PageDirty(eb->pages[i]));
    3875             : #endif
    3876           0 : }
    3877             : 
    3878           0 : void clear_extent_buffer_uptodate(struct extent_buffer *eb)
    3879             : {
    3880           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    3881           0 :         struct page *page;
    3882           0 :         int num_pages;
    3883           0 :         int i;
    3884             : 
    3885           0 :         clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
    3886           0 :         num_pages = num_extent_pages(eb);
    3887           0 :         for (i = 0; i < num_pages; i++) {
    3888           0 :                 page = eb->pages[i];
    3889           0 :                 if (!page)
    3890           0 :                         continue;
    3891             : 
    3892             :                 /*
    3893             :                  * This is special handling for metadata subpage, as regular
    3894             :                  * btrfs_is_subpage() can not handle cloned/dummy metadata.
    3895             :                  */
    3896           0 :                 if (fs_info->nodesize >= PAGE_SIZE)
    3897           0 :                         ClearPageUptodate(page);
    3898             :                 else
    3899           0 :                         btrfs_subpage_clear_uptodate(fs_info, page, eb->start,
    3900           0 :                                                      eb->len);
    3901             :         }
    3902           0 : }
    3903             : 
    3904           0 : void set_extent_buffer_uptodate(struct extent_buffer *eb)
    3905             : {
    3906           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    3907           0 :         struct page *page;
    3908           0 :         int num_pages;
    3909           0 :         int i;
    3910             : 
    3911           0 :         set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
    3912           0 :         num_pages = num_extent_pages(eb);
    3913           0 :         for (i = 0; i < num_pages; i++) {
    3914           0 :                 page = eb->pages[i];
    3915             : 
    3916             :                 /*
    3917             :                  * This is special handling for metadata subpage, as regular
    3918             :                  * btrfs_is_subpage() can not handle cloned/dummy metadata.
    3919             :                  */
    3920           0 :                 if (fs_info->nodesize >= PAGE_SIZE)
    3921           0 :                         SetPageUptodate(page);
    3922             :                 else
    3923           0 :                         btrfs_subpage_set_uptodate(fs_info, page, eb->start,
    3924           0 :                                                    eb->len);
    3925             :         }
    3926           0 : }
    3927             : 
    3928           0 : static void extent_buffer_read_end_io(struct btrfs_bio *bbio)
    3929             : {
    3930           0 :         struct extent_buffer *eb = bbio->private;
    3931           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    3932           0 :         bool uptodate = !bbio->bio.bi_status;
    3933           0 :         struct bvec_iter_all iter_all;
    3934           0 :         struct bio_vec *bvec;
    3935           0 :         u32 bio_offset = 0;
    3936             : 
    3937           0 :         eb->read_mirror = bbio->mirror_num;
    3938             : 
    3939           0 :         if (uptodate &&
    3940           0 :             btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0)
    3941             :                 uptodate = false;
    3942             : 
    3943           0 :         if (uptodate) {
    3944           0 :                 set_extent_buffer_uptodate(eb);
    3945             :         } else {
    3946           0 :                 clear_extent_buffer_uptodate(eb);
    3947           0 :                 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
    3948             :         }
    3949             : 
    3950           0 :         bio_for_each_segment_all(bvec, &bbio->bio, iter_all) {
    3951           0 :                 u64 start = eb->start + bio_offset;
    3952           0 :                 struct page *page = bvec->bv_page;
    3953           0 :                 u32 len = bvec->bv_len;
    3954             : 
    3955           0 :                 if (uptodate)
    3956           0 :                         btrfs_page_set_uptodate(fs_info, page, start, len);
    3957             :                 else
    3958           0 :                         btrfs_page_clear_uptodate(fs_info, page, start, len);
    3959             : 
    3960           0 :                 bio_offset += len;
    3961             :         }
    3962             : 
    3963           0 :         clear_bit(EXTENT_BUFFER_READING, &eb->bflags);
    3964           0 :         smp_mb__after_atomic();
    3965           0 :         wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING);
    3966           0 :         free_extent_buffer(eb);
    3967             : 
    3968           0 :         bio_put(&bbio->bio);
    3969           0 : }
    3970             : 
    3971           0 : int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
    3972             :                              struct btrfs_tree_parent_check *check)
    3973             : {
    3974           0 :         int num_pages = num_extent_pages(eb), i;
    3975           0 :         struct btrfs_bio *bbio;
    3976             : 
    3977           0 :         if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
    3978             :                 return 0;
    3979             : 
    3980             :         /*
    3981             :          * We could have had EXTENT_BUFFER_UPTODATE cleared by the write
    3982             :          * operation, which could potentially still be in flight.  In this case
    3983             :          * we simply want to return an error.
    3984             :          */
    3985           0 :         if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)))
    3986             :                 return -EIO;
    3987             : 
    3988             :         /* Someone else is already reading the buffer, just wait for it. */
    3989           0 :         if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags))
    3990           0 :                 goto done;
    3991             : 
    3992           0 :         clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
    3993           0 :         eb->read_mirror = 0;
    3994           0 :         check_buffer_tree_ref(eb);
    3995           0 :         atomic_inc(&eb->refs);
    3996             : 
    3997           0 :         bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
    3998             :                                REQ_OP_READ | REQ_META, eb->fs_info,
    3999             :                                extent_buffer_read_end_io, eb);
    4000           0 :         bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
    4001           0 :         bbio->inode = BTRFS_I(eb->fs_info->btree_inode);
    4002           0 :         bbio->file_offset = eb->start;
    4003           0 :         memcpy(&bbio->parent_check, check, sizeof(*check));
    4004           0 :         if (eb->fs_info->nodesize < PAGE_SIZE) {
    4005           0 :                 __bio_add_page(&bbio->bio, eb->pages[0], eb->len,
    4006           0 :                                eb->start - page_offset(eb->pages[0]));
    4007             :         } else {
    4008           0 :                 for (i = 0; i < num_pages; i++)
    4009           0 :                         __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0);
    4010             :         }
    4011           0 :         btrfs_submit_bio(bbio, mirror_num);
    4012             : 
    4013           0 : done:
    4014           0 :         if (wait == WAIT_COMPLETE) {
    4015           0 :                 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE);
    4016           0 :                 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
    4017           0 :                         return -EIO;
    4018             :         }
    4019             : 
    4020             :         return 0;
    4021             : }
    4022             : 
    4023           0 : static bool report_eb_range(const struct extent_buffer *eb, unsigned long start,
    4024             :                             unsigned long len)
    4025             : {
    4026           0 :         btrfs_warn(eb->fs_info,
    4027             :                 "access to eb bytenr %llu len %lu out of range start %lu len %lu",
    4028             :                 eb->start, eb->len, start, len);
    4029           0 :         WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
    4030             : 
    4031           0 :         return true;
    4032             : }
    4033             : 
    4034             : /*
    4035             :  * Check if the [start, start + len) range is valid before reading/writing
    4036             :  * the eb.
    4037             :  * NOTE: @start and @len are offset inside the eb, not logical address.
    4038             :  *
    4039             :  * Caller should not touch the dst/src memory if this function returns error.
    4040             :  */
    4041           0 : static inline int check_eb_range(const struct extent_buffer *eb,
    4042             :                                  unsigned long start, unsigned long len)
    4043             : {
    4044           0 :         unsigned long offset;
    4045             : 
    4046             :         /* start, start + len should not go beyond eb->len nor overflow */
    4047           0 :         if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len))
    4048           0 :                 return report_eb_range(eb, start, len);
    4049             : 
    4050             :         return false;
    4051             : }
    4052             : 
    4053           0 : void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
    4054             :                         unsigned long start, unsigned long len)
    4055             : {
    4056           0 :         size_t cur;
    4057           0 :         size_t offset;
    4058           0 :         struct page *page;
    4059           0 :         char *kaddr;
    4060           0 :         char *dst = (char *)dstv;
    4061           0 :         unsigned long i = get_eb_page_index(start);
    4062             : 
    4063           0 :         if (check_eb_range(eb, start, len))
    4064             :                 return;
    4065             : 
    4066           0 :         offset = get_eb_offset_in_page(eb, start);
    4067             : 
    4068           0 :         while (len > 0) {
    4069           0 :                 page = eb->pages[i];
    4070             : 
    4071           0 :                 cur = min(len, (PAGE_SIZE - offset));
    4072           0 :                 kaddr = page_address(page);
    4073           0 :                 memcpy(dst, kaddr + offset, cur);
    4074             : 
    4075           0 :                 dst += cur;
    4076           0 :                 len -= cur;
    4077           0 :                 offset = 0;
    4078           0 :                 i++;
    4079             :         }
    4080             : }
    4081             : 
    4082           0 : int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb,
    4083             :                                        void __user *dstv,
    4084             :                                        unsigned long start, unsigned long len)
    4085             : {
    4086           0 :         size_t cur;
    4087           0 :         size_t offset;
    4088           0 :         struct page *page;
    4089           0 :         char *kaddr;
    4090           0 :         char __user *dst = (char __user *)dstv;
    4091           0 :         unsigned long i = get_eb_page_index(start);
    4092           0 :         int ret = 0;
    4093             : 
    4094           0 :         WARN_ON(start > eb->len);
    4095           0 :         WARN_ON(start + len > eb->start + eb->len);
    4096             : 
    4097           0 :         offset = get_eb_offset_in_page(eb, start);
    4098             : 
    4099           0 :         while (len > 0) {
    4100           0 :                 page = eb->pages[i];
    4101             : 
    4102           0 :                 cur = min(len, (PAGE_SIZE - offset));
    4103           0 :                 kaddr = page_address(page);
    4104           0 :                 if (copy_to_user_nofault(dst, kaddr + offset, cur)) {
    4105             :                         ret = -EFAULT;
    4106             :                         break;
    4107             :                 }
    4108             : 
    4109           0 :                 dst += cur;
    4110           0 :                 len -= cur;
    4111           0 :                 offset = 0;
    4112           0 :                 i++;
    4113             :         }
    4114             : 
    4115           0 :         return ret;
    4116             : }
    4117             : 
    4118           0 : int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
    4119             :                          unsigned long start, unsigned long len)
    4120             : {
    4121           0 :         size_t cur;
    4122           0 :         size_t offset;
    4123           0 :         struct page *page;
    4124           0 :         char *kaddr;
    4125           0 :         char *ptr = (char *)ptrv;
    4126           0 :         unsigned long i = get_eb_page_index(start);
    4127           0 :         int ret = 0;
    4128             : 
    4129           0 :         if (check_eb_range(eb, start, len))
    4130             :                 return -EINVAL;
    4131             : 
    4132           0 :         offset = get_eb_offset_in_page(eb, start);
    4133             : 
    4134           0 :         while (len > 0) {
    4135           0 :                 page = eb->pages[i];
    4136             : 
    4137           0 :                 cur = min(len, (PAGE_SIZE - offset));
    4138             : 
    4139           0 :                 kaddr = page_address(page);
    4140           0 :                 ret = memcmp(ptr, kaddr + offset, cur);
    4141           0 :                 if (ret)
    4142             :                         break;
    4143             : 
    4144           0 :                 ptr += cur;
    4145           0 :                 len -= cur;
    4146           0 :                 offset = 0;
    4147           0 :                 i++;
    4148             :         }
    4149             :         return ret;
    4150             : }
    4151             : 
    4152             : /*
    4153             :  * Check that the extent buffer is uptodate.
    4154             :  *
    4155             :  * For regular sector size == PAGE_SIZE case, check if @page is uptodate.
    4156             :  * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
    4157             :  */
    4158           0 : static void assert_eb_page_uptodate(const struct extent_buffer *eb,
    4159             :                                     struct page *page)
    4160             : {
    4161           0 :         struct btrfs_fs_info *fs_info = eb->fs_info;
    4162             : 
    4163             :         /*
    4164             :          * If we are using the commit root we could potentially clear a page
    4165             :          * Uptodate while we're using the extent buffer that we've previously
    4166             :          * looked up.  We don't want to complain in this case, as the page was
    4167             :          * valid before, we just didn't write it out.  Instead we want to catch
    4168             :          * the case where we didn't actually read the block properly, which
    4169             :          * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR.
    4170             :          */
    4171           0 :         if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))
    4172             :                 return;
    4173             : 
    4174           0 :         if (fs_info->nodesize < PAGE_SIZE) {
    4175           0 :                 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page,
    4176             :                                                          eb->start, eb->len)))
    4177           0 :                         btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len);
    4178             :         } else {
    4179           0 :                 WARN_ON(!PageUptodate(page));
    4180             :         }
    4181             : }
    4182             : 
    4183           0 : void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
    4184             :                 const void *srcv)
    4185             : {
    4186           0 :         char *kaddr;
    4187             : 
    4188           0 :         assert_eb_page_uptodate(eb, eb->pages[0]);
    4189           0 :         kaddr = page_address(eb->pages[0]) +
    4190             :                 get_eb_offset_in_page(eb, offsetof(struct btrfs_header,
    4191             :                                                    chunk_tree_uuid));
    4192           0 :         memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
    4193           0 : }
    4194             : 
    4195           0 : void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
    4196             : {
    4197           0 :         char *kaddr;
    4198             : 
    4199           0 :         assert_eb_page_uptodate(eb, eb->pages[0]);
    4200           0 :         kaddr = page_address(eb->pages[0]) +
    4201             :                 get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid));
    4202           0 :         memcpy(kaddr, srcv, BTRFS_FSID_SIZE);
    4203           0 : }
    4204             : 
    4205           0 : void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
    4206             :                          unsigned long start, unsigned long len)
    4207             : {
    4208           0 :         size_t cur;
    4209           0 :         size_t offset;
    4210           0 :         struct page *page;
    4211           0 :         char *kaddr;
    4212           0 :         char *src = (char *)srcv;
    4213           0 :         unsigned long i = get_eb_page_index(start);
    4214             : 
    4215           0 :         WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags));
    4216             : 
    4217           0 :         if (check_eb_range(eb, start, len))
    4218             :                 return;
    4219             : 
    4220           0 :         offset = get_eb_offset_in_page(eb, start);
    4221             : 
    4222           0 :         while (len > 0) {
    4223           0 :                 page = eb->pages[i];
    4224           0 :                 assert_eb_page_uptodate(eb, page);
    4225             : 
    4226           0 :                 cur = min(len, PAGE_SIZE - offset);
    4227           0 :                 kaddr = page_address(page);
    4228           0 :                 memcpy(kaddr + offset, src, cur);
    4229             : 
    4230           0 :                 src += cur;
    4231           0 :                 len -= cur;
    4232           0 :                 offset = 0;
    4233           0 :                 i++;
    4234             :         }
    4235             : }
    4236             : 
    4237           0 : void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
    4238             :                 unsigned long len)
    4239             : {
    4240           0 :         size_t cur;
    4241           0 :         size_t offset;
    4242           0 :         struct page *page;
    4243           0 :         char *kaddr;
    4244           0 :         unsigned long i = get_eb_page_index(start);
    4245             : 
    4246           0 :         if (check_eb_range(eb, start, len))
    4247             :                 return;
    4248             : 
    4249           0 :         offset = get_eb_offset_in_page(eb, start);
    4250             : 
    4251           0 :         while (len > 0) {
    4252           0 :                 page = eb->pages[i];
    4253           0 :                 assert_eb_page_uptodate(eb, page);
    4254             : 
    4255           0 :                 cur = min(len, PAGE_SIZE - offset);
    4256           0 :                 kaddr = page_address(page);
    4257           0 :                 memset(kaddr + offset, 0, cur);
    4258             : 
    4259           0 :                 len -= cur;
    4260           0 :                 offset = 0;
    4261           0 :                 i++;
    4262             :         }
    4263             : }
    4264             : 
    4265           0 : void copy_extent_buffer_full(const struct extent_buffer *dst,
    4266             :                              const struct extent_buffer *src)
    4267             : {
    4268           0 :         int i;
    4269           0 :         int num_pages;
    4270             : 
    4271           0 :         ASSERT(dst->len == src->len);
    4272             : 
    4273           0 :         if (dst->fs_info->nodesize >= PAGE_SIZE) {
    4274           0 :                 num_pages = num_extent_pages(dst);
    4275           0 :                 for (i = 0; i < num_pages; i++)
    4276           0 :                         copy_page(page_address(dst->pages[i]),
    4277           0 :                                   page_address(src->pages[i]));
    4278             :         } else {
    4279           0 :                 size_t src_offset = get_eb_offset_in_page(src, 0);
    4280           0 :                 size_t dst_offset = get_eb_offset_in_page(dst, 0);
    4281             : 
    4282           0 :                 ASSERT(src->fs_info->nodesize < PAGE_SIZE);
    4283           0 :                 memcpy(page_address(dst->pages[0]) + dst_offset,
    4284             :                        page_address(src->pages[0]) + src_offset,
    4285             :                        src->len);
    4286             :         }
    4287           0 : }
    4288             : 
    4289           0 : void copy_extent_buffer(const struct extent_buffer *dst,
    4290             :                         const struct extent_buffer *src,
    4291             :                         unsigned long dst_offset, unsigned long src_offset,
    4292             :                         unsigned long len)
    4293             : {
    4294           0 :         u64 dst_len = dst->len;
    4295           0 :         size_t cur;
    4296           0 :         size_t offset;
    4297           0 :         struct page *page;
    4298           0 :         char *kaddr;
    4299           0 :         unsigned long i = get_eb_page_index(dst_offset);
    4300             : 
    4301           0 :         if (check_eb_range(dst, dst_offset, len) ||
    4302           0 :             check_eb_range(src, src_offset, len))
    4303           0 :                 return;
    4304             : 
    4305           0 :         WARN_ON(src->len != dst_len);
    4306             : 
    4307           0 :         offset = get_eb_offset_in_page(dst, dst_offset);
    4308             : 
    4309           0 :         while (len > 0) {
    4310           0 :                 page = dst->pages[i];
    4311           0 :                 assert_eb_page_uptodate(dst, page);
    4312             : 
    4313           0 :                 cur = min(len, (unsigned long)(PAGE_SIZE - offset));
    4314             : 
    4315           0 :                 kaddr = page_address(page);
    4316           0 :                 read_extent_buffer(src, kaddr + offset, src_offset, cur);
    4317             : 
    4318           0 :                 src_offset += cur;
    4319           0 :                 len -= cur;
    4320           0 :                 offset = 0;
    4321           0 :                 i++;
    4322             :         }
    4323             : }
    4324             : 
    4325             : /*
    4326             :  * eb_bitmap_offset() - calculate the page and offset of the byte containing the
    4327             :  * given bit number
    4328             :  * @eb: the extent buffer
    4329             :  * @start: offset of the bitmap item in the extent buffer
    4330             :  * @nr: bit number
    4331             :  * @page_index: return index of the page in the extent buffer that contains the
    4332             :  * given bit number
    4333             :  * @page_offset: return offset into the page given by page_index
    4334             :  *
    4335             :  * This helper hides the ugliness of finding the byte in an extent buffer which
    4336             :  * contains a given bit.
    4337             :  */
    4338             : static inline void eb_bitmap_offset(const struct extent_buffer *eb,
    4339             :                                     unsigned long start, unsigned long nr,
    4340             :                                     unsigned long *page_index,
    4341             :                                     size_t *page_offset)
    4342             : {
    4343           0 :         size_t byte_offset = BIT_BYTE(nr);
    4344           0 :         size_t offset;
    4345             : 
    4346             :         /*
    4347             :          * The byte we want is the offset of the extent buffer + the offset of
    4348             :          * the bitmap item in the extent buffer + the offset of the byte in the
    4349             :          * bitmap item.
    4350             :          */
    4351           0 :         offset = start + offset_in_page(eb->start) + byte_offset;
    4352             : 
    4353           0 :         *page_index = offset >> PAGE_SHIFT;
    4354           0 :         *page_offset = offset_in_page(offset);
    4355             : }
    4356             : 
    4357             : /*
    4358             :  * Determine whether a bit in a bitmap item is set.
    4359             :  *
    4360             :  * @eb:     the extent buffer
    4361             :  * @start:  offset of the bitmap item in the extent buffer
    4362             :  * @nr:     bit number to test
    4363             :  */
    4364           0 : int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
    4365             :                            unsigned long nr)
    4366             : {
    4367           0 :         u8 *kaddr;
    4368           0 :         struct page *page;
    4369           0 :         unsigned long i;
    4370           0 :         size_t offset;
    4371             : 
    4372           0 :         eb_bitmap_offset(eb, start, nr, &i, &offset);
    4373           0 :         page = eb->pages[i];
    4374           0 :         assert_eb_page_uptodate(eb, page);
    4375           0 :         kaddr = page_address(page);
    4376           0 :         return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1)));
    4377             : }
    4378             : 
    4379             : /*
    4380             :  * Set an area of a bitmap to 1.
    4381             :  *
    4382             :  * @eb:     the extent buffer
    4383             :  * @start:  offset of the bitmap item in the extent buffer
    4384             :  * @pos:    bit number of the first bit
    4385             :  * @len:    number of bits to set
    4386             :  */
    4387           0 : void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
    4388             :                               unsigned long pos, unsigned long len)
    4389             : {
    4390           0 :         u8 *kaddr;
    4391           0 :         struct page *page;
    4392           0 :         unsigned long i;
    4393           0 :         size_t offset;
    4394           0 :         const unsigned int size = pos + len;
    4395           0 :         int bits_to_set = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
    4396           0 :         u8 mask_to_set = BITMAP_FIRST_BYTE_MASK(pos);
    4397             : 
    4398           0 :         eb_bitmap_offset(eb, start, pos, &i, &offset);
    4399           0 :         page = eb->pages[i];
    4400           0 :         assert_eb_page_uptodate(eb, page);
    4401           0 :         kaddr = page_address(page);
    4402             : 
    4403           0 :         while (len >= bits_to_set) {
    4404           0 :                 kaddr[offset] |= mask_to_set;
    4405           0 :                 len -= bits_to_set;
    4406           0 :                 bits_to_set = BITS_PER_BYTE;
    4407           0 :                 mask_to_set = ~0;
    4408           0 :                 if (++offset >= PAGE_SIZE && len > 0) {
    4409           0 :                         offset = 0;
    4410           0 :                         page = eb->pages[++i];
    4411           0 :                         assert_eb_page_uptodate(eb, page);
    4412           0 :                         kaddr = page_address(page);
    4413             :                 }
    4414             :         }
    4415           0 :         if (len) {
    4416           0 :                 mask_to_set &= BITMAP_LAST_BYTE_MASK(size);
    4417           0 :                 kaddr[offset] |= mask_to_set;
    4418             :         }
    4419           0 : }
    4420             : 
    4421             : 
    4422             : /*
    4423             :  * Clear an area of a bitmap.
    4424             :  *
    4425             :  * @eb:     the extent buffer
    4426             :  * @start:  offset of the bitmap item in the extent buffer
    4427             :  * @pos:    bit number of the first bit
    4428             :  * @len:    number of bits to clear
    4429             :  */
    4430           0 : void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
    4431             :                                 unsigned long start, unsigned long pos,
    4432             :                                 unsigned long len)
    4433             : {
    4434           0 :         u8 *kaddr;
    4435           0 :         struct page *page;
    4436           0 :         unsigned long i;
    4437           0 :         size_t offset;
    4438           0 :         const unsigned int size = pos + len;
    4439           0 :         int bits_to_clear = BITS_PER_BYTE - (pos % BITS_PER_BYTE);
    4440           0 :         u8 mask_to_clear = BITMAP_FIRST_BYTE_MASK(pos);
    4441             : 
    4442           0 :         eb_bitmap_offset(eb, start, pos, &i, &offset);
    4443           0 :         page = eb->pages[i];
    4444           0 :         assert_eb_page_uptodate(eb, page);
    4445           0 :         kaddr = page_address(page);
    4446             : 
    4447           0 :         while (len >= bits_to_clear) {
    4448           0 :                 kaddr[offset] &= ~mask_to_clear;
    4449           0 :                 len -= bits_to_clear;
    4450           0 :                 bits_to_clear = BITS_PER_BYTE;
    4451           0 :                 mask_to_clear = ~0;
    4452           0 :                 if (++offset >= PAGE_SIZE && len > 0) {
    4453           0 :                         offset = 0;
    4454           0 :                         page = eb->pages[++i];
    4455           0 :                         assert_eb_page_uptodate(eb, page);
    4456           0 :                         kaddr = page_address(page);
    4457             :                 }
    4458             :         }
    4459           0 :         if (len) {
    4460           0 :                 mask_to_clear &= BITMAP_LAST_BYTE_MASK(size);
    4461           0 :                 kaddr[offset] &= ~mask_to_clear;
    4462             :         }
    4463           0 : }
    4464             : 
    4465             : static inline bool areas_overlap(unsigned long src, unsigned long dst, unsigned long len)
    4466             : {
    4467           0 :         unsigned long distance = (src > dst) ? src - dst : dst - src;
    4468           0 :         return distance < len;
    4469             : }
    4470             : 
    4471           0 : static void copy_pages(struct page *dst_page, struct page *src_page,
    4472             :                        unsigned long dst_off, unsigned long src_off,
    4473             :                        unsigned long len)
    4474             : {
    4475           0 :         char *dst_kaddr = page_address(dst_page);
    4476           0 :         char *src_kaddr;
    4477           0 :         int must_memmove = 0;
    4478             : 
    4479           0 :         if (dst_page != src_page) {
    4480           0 :                 src_kaddr = page_address(src_page);
    4481             :         } else {
    4482           0 :                 src_kaddr = dst_kaddr;
    4483           0 :                 if (areas_overlap(src_off, dst_off, len))
    4484           0 :                         must_memmove = 1;
    4485             :         }
    4486             : 
    4487           0 :         if (must_memmove)
    4488           0 :                 memmove(dst_kaddr + dst_off, src_kaddr + src_off, len);
    4489             :         else
    4490           0 :                 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
    4491           0 : }
    4492             : 
    4493           0 : void memcpy_extent_buffer(const struct extent_buffer *dst,
    4494             :                           unsigned long dst_offset, unsigned long src_offset,
    4495             :                           unsigned long len)
    4496             : {
    4497           0 :         size_t cur;
    4498           0 :         size_t dst_off_in_page;
    4499           0 :         size_t src_off_in_page;
    4500           0 :         unsigned long dst_i;
    4501           0 :         unsigned long src_i;
    4502             : 
    4503           0 :         if (check_eb_range(dst, dst_offset, len) ||
    4504           0 :             check_eb_range(dst, src_offset, len))
    4505           0 :                 return;
    4506             : 
    4507           0 :         while (len > 0) {
    4508           0 :                 dst_off_in_page = get_eb_offset_in_page(dst, dst_offset);
    4509           0 :                 src_off_in_page = get_eb_offset_in_page(dst, src_offset);
    4510             : 
    4511           0 :                 dst_i = get_eb_page_index(dst_offset);
    4512           0 :                 src_i = get_eb_page_index(src_offset);
    4513             : 
    4514           0 :                 cur = min(len, (unsigned long)(PAGE_SIZE -
    4515             :                                                src_off_in_page));
    4516           0 :                 cur = min_t(unsigned long, cur,
    4517             :                         (unsigned long)(PAGE_SIZE - dst_off_in_page));
    4518             : 
    4519           0 :                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
    4520             :                            dst_off_in_page, src_off_in_page, cur);
    4521             : 
    4522           0 :                 src_offset += cur;
    4523           0 :                 dst_offset += cur;
    4524           0 :                 len -= cur;
    4525             :         }
    4526             : }
    4527             : 
    4528           0 : void memmove_extent_buffer(const struct extent_buffer *dst,
    4529             :                            unsigned long dst_offset, unsigned long src_offset,
    4530             :                            unsigned long len)
    4531             : {
    4532           0 :         size_t cur;
    4533           0 :         size_t dst_off_in_page;
    4534           0 :         size_t src_off_in_page;
    4535           0 :         unsigned long dst_end = dst_offset + len - 1;
    4536           0 :         unsigned long src_end = src_offset + len - 1;
    4537           0 :         unsigned long dst_i;
    4538           0 :         unsigned long src_i;
    4539             : 
    4540           0 :         if (check_eb_range(dst, dst_offset, len) ||
    4541           0 :             check_eb_range(dst, src_offset, len))
    4542           0 :                 return;
    4543           0 :         if (dst_offset < src_offset) {
    4544           0 :                 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
    4545           0 :                 return;
    4546             :         }
    4547           0 :         while (len > 0) {
    4548           0 :                 dst_i = get_eb_page_index(dst_end);
    4549           0 :                 src_i = get_eb_page_index(src_end);
    4550             : 
    4551           0 :                 dst_off_in_page = get_eb_offset_in_page(dst, dst_end);
    4552           0 :                 src_off_in_page = get_eb_offset_in_page(dst, src_end);
    4553             : 
    4554           0 :                 cur = min_t(unsigned long, len, src_off_in_page + 1);
    4555           0 :                 cur = min(cur, dst_off_in_page + 1);
    4556           0 :                 copy_pages(dst->pages[dst_i], dst->pages[src_i],
    4557           0 :                            dst_off_in_page - cur + 1,
    4558           0 :                            src_off_in_page - cur + 1, cur);
    4559             : 
    4560           0 :                 dst_end -= cur;
    4561           0 :                 src_end -= cur;
    4562           0 :                 len -= cur;
    4563             :         }
    4564             : }
    4565             : 
    4566             : #define GANG_LOOKUP_SIZE        16
    4567           0 : static struct extent_buffer *get_next_extent_buffer(
    4568             :                 struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr)
    4569             : {
    4570           0 :         struct extent_buffer *gang[GANG_LOOKUP_SIZE];
    4571           0 :         struct extent_buffer *found = NULL;
    4572           0 :         u64 page_start = page_offset(page);
    4573           0 :         u64 cur = page_start;
    4574             : 
    4575           0 :         ASSERT(in_range(bytenr, page_start, PAGE_SIZE));
    4576             :         lockdep_assert_held(&fs_info->buffer_lock);
    4577             : 
    4578           0 :         while (cur < page_start + PAGE_SIZE) {
    4579           0 :                 int ret;
    4580           0 :                 int i;
    4581             : 
    4582           0 :                 ret = radix_tree_gang_lookup(&fs_info->buffer_radix,
    4583           0 :                                 (void **)gang, cur >> fs_info->sectorsize_bits,
    4584           0 :                                 min_t(unsigned int, GANG_LOOKUP_SIZE,
    4585             :                                       PAGE_SIZE / fs_info->nodesize));
    4586           0 :                 if (ret == 0)
    4587           0 :                         goto out;
    4588           0 :                 for (i = 0; i < ret; i++) {
    4589             :                         /* Already beyond page end */
    4590           0 :                         if (gang[i]->start >= page_start + PAGE_SIZE)
    4591           0 :                                 goto out;
    4592             :                         /* Found one */
    4593           0 :                         if (gang[i]->start >= bytenr) {
    4594           0 :                                 found = gang[i];
    4595           0 :                                 goto out;
    4596             :                         }
    4597             :                 }
    4598           0 :                 cur = gang[ret - 1]->start + gang[ret - 1]->len;
    4599             :         }
    4600           0 : out:
    4601           0 :         return found;
    4602             : }
    4603             : 
    4604           0 : static int try_release_subpage_extent_buffer(struct page *page)
    4605             : {
    4606           0 :         struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb);
    4607           0 :         u64 cur = page_offset(page);
    4608           0 :         const u64 end = page_offset(page) + PAGE_SIZE;
    4609           0 :         int ret;
    4610             : 
    4611           0 :         while (cur < end) {
    4612           0 :                 struct extent_buffer *eb = NULL;
    4613             : 
    4614             :                 /*
    4615             :                  * Unlike try_release_extent_buffer() which uses page->private
    4616             :                  * to grab buffer, for subpage case we rely on radix tree, thus
    4617             :                  * we need to ensure radix tree consistency.
    4618             :                  *
    4619             :                  * We also want an atomic snapshot of the radix tree, thus go
    4620             :                  * with spinlock rather than RCU.
    4621             :                  */
    4622           0 :                 spin_lock(&fs_info->buffer_lock);
    4623           0 :                 eb = get_next_extent_buffer(fs_info, page, cur);
    4624           0 :                 if (!eb) {
    4625             :                         /* No more eb in the page range after or at cur */
    4626           0 :                         spin_unlock(&fs_info->buffer_lock);
    4627             :                         break;
    4628             :                 }
    4629           0 :                 cur = eb->start + eb->len;
    4630             : 
    4631             :                 /*
    4632             :                  * The same as try_release_extent_buffer(), to ensure the eb
    4633             :                  * won't disappear out from under us.
    4634             :                  */
    4635           0 :                 spin_lock(&eb->refs_lock);
    4636           0 :                 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
    4637           0 :                         spin_unlock(&eb->refs_lock);
    4638           0 :                         spin_unlock(&fs_info->buffer_lock);
    4639             :                         break;
    4640             :                 }
    4641           0 :                 spin_unlock(&fs_info->buffer_lock);
    4642             : 
    4643             :                 /*
    4644             :                  * If tree ref isn't set then we know the ref on this eb is a
    4645             :                  * real ref, so just return, this eb will likely be freed soon
    4646             :                  * anyway.
    4647             :                  */
    4648           0 :                 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
    4649           0 :                         spin_unlock(&eb->refs_lock);
    4650             :                         break;
    4651             :                 }
    4652             : 
    4653             :                 /*
    4654             :                  * Here we don't care about the return value, we will always
    4655             :                  * check the page private at the end.  And
    4656             :                  * release_extent_buffer() will release the refs_lock.
    4657             :                  */
    4658           0 :                 release_extent_buffer(eb);
    4659             :         }
    4660             :         /*
    4661             :          * Finally to check if we have cleared page private, as if we have
    4662             :          * released all ebs in the page, the page private should be cleared now.
    4663             :          */
    4664           0 :         spin_lock(&page->mapping->private_lock);
    4665           0 :         if (!PagePrivate(page))
    4666             :                 ret = 1;
    4667             :         else
    4668           0 :                 ret = 0;
    4669           0 :         spin_unlock(&page->mapping->private_lock);
    4670           0 :         return ret;
    4671             : 
    4672             : }
    4673             : 
    4674           0 : int try_release_extent_buffer(struct page *page)
    4675             : {
    4676           0 :         struct extent_buffer *eb;
    4677             : 
    4678           0 :         if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
    4679           0 :                 return try_release_subpage_extent_buffer(page);
    4680             : 
    4681             :         /*
    4682             :          * We need to make sure nobody is changing page->private, as we rely on
    4683             :          * page->private as the pointer to extent buffer.
    4684             :          */
    4685           0 :         spin_lock(&page->mapping->private_lock);
    4686           0 :         if (!PagePrivate(page)) {
    4687           0 :                 spin_unlock(&page->mapping->private_lock);
    4688           0 :                 return 1;
    4689             :         }
    4690             : 
    4691           0 :         eb = (struct extent_buffer *)page->private;
    4692           0 :         BUG_ON(!eb);
    4693             : 
    4694             :         /*
    4695             :          * This is a little awful but should be ok, we need to make sure that
    4696             :          * the eb doesn't disappear out from under us while we're looking at
    4697             :          * this page.
    4698             :          */
    4699           0 :         spin_lock(&eb->refs_lock);
    4700           0 :         if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
    4701           0 :                 spin_unlock(&eb->refs_lock);
    4702           0 :                 spin_unlock(&page->mapping->private_lock);
    4703           0 :                 return 0;
    4704             :         }
    4705           0 :         spin_unlock(&page->mapping->private_lock);
    4706             : 
    4707             :         /*
    4708             :          * If tree ref isn't set then we know the ref on this eb is a real ref,
    4709             :          * so just return, this page will likely be freed soon anyway.
    4710             :          */
    4711           0 :         if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
    4712           0 :                 spin_unlock(&eb->refs_lock);
    4713           0 :                 return 0;
    4714             :         }
    4715             : 
    4716           0 :         return release_extent_buffer(eb);
    4717             : }
    4718             : 
    4719             : /*
    4720             :  * btrfs_readahead_tree_block - attempt to readahead a child block
    4721             :  * @fs_info:    the fs_info
    4722             :  * @bytenr:     bytenr to read
    4723             :  * @owner_root: objectid of the root that owns this eb
    4724             :  * @gen:        generation for the uptodate check, can be 0
    4725             :  * @level:      level for the eb
    4726             :  *
    4727             :  * Attempt to readahead a tree block at @bytenr.  If @gen is 0 then we do a
    4728             :  * normal uptodate check of the eb, without checking the generation.  If we have
    4729             :  * to read the block we will not block on anything.
    4730             :  */
    4731           0 : void btrfs_readahead_tree_block(struct btrfs_fs_info *fs_info,
    4732             :                                 u64 bytenr, u64 owner_root, u64 gen, int level)
    4733             : {
    4734           0 :         struct btrfs_tree_parent_check check = {
    4735             :                 .has_first_key = 0,
    4736             :                 .level = level,
    4737             :                 .transid = gen
    4738             :         };
    4739           0 :         struct extent_buffer *eb;
    4740           0 :         int ret;
    4741             : 
    4742           0 :         eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
    4743           0 :         if (IS_ERR(eb))
    4744           0 :                 return;
    4745             : 
    4746           0 :         if (btrfs_buffer_uptodate(eb, gen, 1)) {
    4747           0 :                 free_extent_buffer(eb);
    4748           0 :                 return;
    4749             :         }
    4750             : 
    4751           0 :         ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check);
    4752           0 :         if (ret < 0)
    4753           0 :                 free_extent_buffer_stale(eb);
    4754             :         else
    4755           0 :                 free_extent_buffer(eb);
    4756             : }
    4757             : 
    4758             : /*
    4759             :  * btrfs_readahead_node_child - readahead a node's child block
    4760             :  * @node:       parent node we're reading from
    4761             :  * @slot:       slot in the parent node for the child we want to read
    4762             :  *
    4763             :  * A helper for btrfs_readahead_tree_block, we simply read the bytenr pointed at
    4764             :  * the slot in the node provided.
    4765             :  */
    4766           0 : void btrfs_readahead_node_child(struct extent_buffer *node, int slot)
    4767             : {
    4768           0 :         btrfs_readahead_tree_block(node->fs_info,
    4769             :                                    btrfs_node_blockptr(node, slot),
    4770             :                                    btrfs_header_owner(node),
    4771             :                                    btrfs_node_ptr_generation(node, slot),
    4772           0 :                                    btrfs_header_level(node) - 1);
    4773           0 : }

Generated by: LCOV version 1.14