LCOV - code coverage report
Current view: top level - fs/btrfs - ctree.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-achx @ Mon Jul 31 20:08:12 PDT 2023 Lines: 25 25 100.0 %
Date: 2023-07-31 20:08:12 Functions: 0 0 -

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * Copyright (C) 2007 Oracle.  All rights reserved.
       4             :  */
       5             : 
       6             : #ifndef BTRFS_CTREE_H
       7             : #define BTRFS_CTREE_H
       8             : 
       9             : #include <linux/mm.h>
      10             : #include <linux/sched/signal.h>
      11             : #include <linux/highmem.h>
      12             : #include <linux/fs.h>
      13             : #include <linux/rwsem.h>
      14             : #include <linux/semaphore.h>
      15             : #include <linux/completion.h>
      16             : #include <linux/backing-dev.h>
      17             : #include <linux/wait.h>
      18             : #include <linux/slab.h>
      19             : #include <trace/events/btrfs.h>
      20             : #include <asm/unaligned.h>
      21             : #include <linux/pagemap.h>
      22             : #include <linux/btrfs.h>
      23             : #include <linux/btrfs_tree.h>
      24             : #include <linux/workqueue.h>
      25             : #include <linux/security.h>
      26             : #include <linux/sizes.h>
      27             : #include <linux/dynamic_debug.h>
      28             : #include <linux/refcount.h>
      29             : #include <linux/crc32c.h>
      30             : #include <linux/iomap.h>
      31             : #include <linux/fscrypt.h>
      32             : #include "extent-io-tree.h"
      33             : #include "extent_io.h"
      34             : #include "extent_map.h"
      35             : #include "async-thread.h"
      36             : #include "block-rsv.h"
      37             : #include "locking.h"
      38             : #include "misc.h"
      39             : #include "fs.h"
      40             : 
      41             : struct btrfs_trans_handle;
      42             : struct btrfs_transaction;
      43             : struct btrfs_pending_snapshot;
      44             : struct btrfs_delayed_ref_root;
      45             : struct btrfs_space_info;
      46             : struct btrfs_block_group;
      47             : struct btrfs_ordered_sum;
      48             : struct btrfs_ref;
      49             : struct btrfs_bio;
      50             : struct btrfs_ioctl_encoded_io_args;
      51             : struct btrfs_device;
      52             : struct btrfs_fs_devices;
      53             : struct btrfs_balance_control;
      54             : struct btrfs_delayed_root;
      55             : struct reloc_control;
      56             : 
      57             : /* Read ahead values for struct btrfs_path.reada */
      58             : enum {
      59             :         READA_NONE,
      60             :         READA_BACK,
      61             :         READA_FORWARD,
      62             :         /*
      63             :          * Similar to READA_FORWARD but unlike it:
      64             :          *
      65             :          * 1) It will trigger readahead even for leaves that are not close to
      66             :          *    each other on disk;
      67             :          * 2) It also triggers readahead for nodes;
      68             :          * 3) During a search, even when a node or leaf is already in memory, it
      69             :          *    will still trigger readahead for other nodes and leaves that follow
      70             :          *    it.
      71             :          *
      72             :          * This is meant to be used only when we know we are iterating over the
      73             :          * entire tree or a very large part of it.
      74             :          */
      75             :         READA_FORWARD_ALWAYS,
      76             : };
      77             : 
      78             : /*
      79             :  * btrfs_paths remember the path taken from the root down to the leaf.
      80             :  * level 0 is always the leaf, and nodes[1...BTRFS_MAX_LEVEL] will point
      81             :  * to any other levels that are present.
      82             :  *
      83             :  * The slots array records the index of the item or block pointer
      84             :  * used while walking the tree.
      85             :  */
      86             : struct btrfs_path {
      87             :         struct extent_buffer *nodes[BTRFS_MAX_LEVEL];
      88             :         int slots[BTRFS_MAX_LEVEL];
      89             :         /* if there is real range locking, this locks field will change */
      90             :         u8 locks[BTRFS_MAX_LEVEL];
      91             :         u8 reada;
      92             :         /* keep some upper locks as we walk down */
      93             :         u8 lowest_level;
      94             : 
      95             :         /*
      96             :          * set by btrfs_split_item, tells search_slot to keep all locks
      97             :          * and to force calls to keep space in the nodes
      98             :          */
      99             :         unsigned int search_for_split:1;
     100             :         unsigned int keep_locks:1;
     101             :         unsigned int skip_locking:1;
     102             :         unsigned int search_commit_root:1;
     103             :         unsigned int need_commit_sem:1;
     104             :         unsigned int skip_release_on_error:1;
     105             :         /*
     106             :          * Indicate that new item (btrfs_search_slot) is extending already
     107             :          * existing item and ins_len contains only the data size and not item
     108             :          * header (ie. sizeof(struct btrfs_item) is not included).
     109             :          */
     110             :         unsigned int search_for_extension:1;
     111             :         /* Stop search if any locks need to be taken (for read) */
     112             :         unsigned int nowait:1;
     113             : };
     114             : 
     115             : /*
     116             :  * The state of btrfs root
     117             :  */
     118             : enum {
     119             :         /*
     120             :          * btrfs_record_root_in_trans is a multi-step process, and it can race
     121             :          * with the balancing code.   But the race is very small, and only the
     122             :          * first time the root is added to each transaction.  So IN_TRANS_SETUP
     123             :          * is used to tell us when more checks are required
     124             :          */
     125             :         BTRFS_ROOT_IN_TRANS_SETUP,
     126             : 
     127             :         /*
     128             :          * Set if tree blocks of this root can be shared by other roots.
     129             :          * Only subvolume trees and their reloc trees have this bit set.
     130             :          * Conflicts with TRACK_DIRTY bit.
     131             :          *
     132             :          * This affects two things:
     133             :          *
     134             :          * - How balance works
     135             :          *   For shareable roots, we need to use reloc tree and do path
     136             :          *   replacement for balance, and need various pre/post hooks for
     137             :          *   snapshot creation to handle them.
     138             :          *
     139             :          *   While for non-shareable trees, we just simply do a tree search
     140             :          *   with COW.
     141             :          *
     142             :          * - How dirty roots are tracked
     143             :          *   For shareable roots, btrfs_record_root_in_trans() is needed to
     144             :          *   track them, while non-subvolume roots have TRACK_DIRTY bit, they
     145             :          *   don't need to set this manually.
     146             :          */
     147             :         BTRFS_ROOT_SHAREABLE,
     148             :         BTRFS_ROOT_TRACK_DIRTY,
     149             :         BTRFS_ROOT_IN_RADIX,
     150             :         BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
     151             :         BTRFS_ROOT_DEFRAG_RUNNING,
     152             :         BTRFS_ROOT_FORCE_COW,
     153             :         BTRFS_ROOT_MULTI_LOG_TASKS,
     154             :         BTRFS_ROOT_DIRTY,
     155             :         BTRFS_ROOT_DELETING,
     156             : 
     157             :         /*
     158             :          * Reloc tree is orphan, only kept here for qgroup delayed subtree scan
     159             :          *
     160             :          * Set for the subvolume tree owning the reloc tree.
     161             :          */
     162             :         BTRFS_ROOT_DEAD_RELOC_TREE,
     163             :         /* Mark dead root stored on device whose cleanup needs to be resumed */
     164             :         BTRFS_ROOT_DEAD_TREE,
     165             :         /* The root has a log tree. Used for subvolume roots and the tree root. */
     166             :         BTRFS_ROOT_HAS_LOG_TREE,
     167             :         /* Qgroup flushing is in progress */
     168             :         BTRFS_ROOT_QGROUP_FLUSHING,
     169             :         /* We started the orphan cleanup for this root. */
     170             :         BTRFS_ROOT_ORPHAN_CLEANUP,
     171             :         /* This root has a drop operation that was started previously. */
     172             :         BTRFS_ROOT_UNFINISHED_DROP,
     173             :         /* This reloc root needs to have its buffers lockdep class reset. */
     174             :         BTRFS_ROOT_RESET_LOCKDEP_CLASS,
     175             : };
     176             : 
     177             : /*
     178             :  * Record swapped tree blocks of a subvolume tree for delayed subtree trace
     179             :  * code. For detail check comment in fs/btrfs/qgroup.c.
     180             :  */
     181             : struct btrfs_qgroup_swapped_blocks {
     182             :         spinlock_t lock;
     183             :         /* RM_EMPTY_ROOT() of above blocks[] */
     184             :         bool swapped;
     185             :         struct rb_root blocks[BTRFS_MAX_LEVEL];
     186             : };
     187             : 
     188             : /*
     189             :  * in ram representation of the tree.  extent_root is used for all allocations
     190             :  * and for the extent tree extent_root root.
     191             :  */
     192             : struct btrfs_root {
     193             :         struct rb_node rb_node;
     194             : 
     195             :         struct extent_buffer *node;
     196             : 
     197             :         struct extent_buffer *commit_root;
     198             :         struct btrfs_root *log_root;
     199             :         struct btrfs_root *reloc_root;
     200             : 
     201             :         unsigned long state;
     202             :         struct btrfs_root_item root_item;
     203             :         struct btrfs_key root_key;
     204             :         struct btrfs_fs_info *fs_info;
     205             :         struct extent_io_tree dirty_log_pages;
     206             : 
     207             :         struct mutex objectid_mutex;
     208             : 
     209             :         spinlock_t accounting_lock;
     210             :         struct btrfs_block_rsv *block_rsv;
     211             : 
     212             :         struct mutex log_mutex;
     213             :         wait_queue_head_t log_writer_wait;
     214             :         wait_queue_head_t log_commit_wait[2];
     215             :         struct list_head log_ctxs[2];
     216             :         /* Used only for log trees of subvolumes, not for the log root tree */
     217             :         atomic_t log_writers;
     218             :         atomic_t log_commit[2];
     219             :         /* Used only for log trees of subvolumes, not for the log root tree */
     220             :         atomic_t log_batch;
     221             :         int log_transid;
     222             :         /* No matter the commit succeeds or not*/
     223             :         int log_transid_committed;
     224             :         /* Just be updated when the commit succeeds. */
     225             :         int last_log_commit;
     226             :         pid_t log_start_pid;
     227             : 
     228             :         u64 last_trans;
     229             : 
     230             :         u32 type;
     231             : 
     232             :         u64 free_objectid;
     233             : 
     234             :         struct btrfs_key defrag_progress;
     235             :         struct btrfs_key defrag_max;
     236             : 
     237             :         /* The dirty list is only used by non-shareable roots */
     238             :         struct list_head dirty_list;
     239             : 
     240             :         struct list_head root_list;
     241             : 
     242             :         spinlock_t log_extents_lock[2];
     243             :         struct list_head logged_list[2];
     244             : 
     245             :         spinlock_t inode_lock;
     246             :         /* red-black tree that keeps track of in-memory inodes */
     247             :         struct rb_root inode_tree;
     248             : 
     249             :         /*
     250             :          * radix tree that keeps track of delayed nodes of every inode,
     251             :          * protected by inode_lock
     252             :          */
     253             :         struct radix_tree_root delayed_nodes_tree;
     254             :         /*
     255             :          * right now this just gets used so that a root has its own devid
     256             :          * for stat.  It may be used for more later
     257             :          */
     258             :         dev_t anon_dev;
     259             : 
     260             :         spinlock_t root_item_lock;
     261             :         refcount_t refs;
     262             : 
     263             :         struct mutex delalloc_mutex;
     264             :         spinlock_t delalloc_lock;
     265             :         /*
     266             :          * all of the inodes that have delalloc bytes.  It is possible for
     267             :          * this list to be empty even when there is still dirty data=ordered
     268             :          * extents waiting to finish IO.
     269             :          */
     270             :         struct list_head delalloc_inodes;
     271             :         struct list_head delalloc_root;
     272             :         u64 nr_delalloc_inodes;
     273             : 
     274             :         struct mutex ordered_extent_mutex;
     275             :         /*
     276             :          * this is used by the balancing code to wait for all the pending
     277             :          * ordered extents
     278             :          */
     279             :         spinlock_t ordered_extent_lock;
     280             : 
     281             :         /*
     282             :          * all of the data=ordered extents pending writeback
     283             :          * these can span multiple transactions and basically include
     284             :          * every dirty data page that isn't from nodatacow
     285             :          */
     286             :         struct list_head ordered_extents;
     287             :         struct list_head ordered_root;
     288             :         u64 nr_ordered_extents;
     289             : 
     290             :         /*
     291             :          * Not empty if this subvolume root has gone through tree block swap
     292             :          * (relocation)
     293             :          *
     294             :          * Will be used by reloc_control::dirty_subvol_roots.
     295             :          */
     296             :         struct list_head reloc_dirty_list;
     297             : 
     298             :         /*
     299             :          * Number of currently running SEND ioctls to prevent
     300             :          * manipulation with the read-only status via SUBVOL_SETFLAGS
     301             :          */
     302             :         int send_in_progress;
     303             :         /*
     304             :          * Number of currently running deduplication operations that have a
     305             :          * destination inode belonging to this root. Protected by the lock
     306             :          * root_item_lock.
     307             :          */
     308             :         int dedupe_in_progress;
     309             :         /* For exclusion of snapshot creation and nocow writes */
     310             :         struct btrfs_drew_lock snapshot_lock;
     311             : 
     312             :         atomic_t snapshot_force_cow;
     313             : 
     314             :         /* For qgroup metadata reserved space */
     315             :         spinlock_t qgroup_meta_rsv_lock;
     316             :         u64 qgroup_meta_rsv_pertrans;
     317             :         u64 qgroup_meta_rsv_prealloc;
     318             :         wait_queue_head_t qgroup_flush_wait;
     319             : 
     320             :         /* Number of active swapfiles */
     321             :         atomic_t nr_swapfiles;
     322             : 
     323             :         /* Record pairs of swapped blocks for qgroup */
     324             :         struct btrfs_qgroup_swapped_blocks swapped_blocks;
     325             : 
     326             :         /* Used only by log trees, when logging csum items */
     327             :         struct extent_io_tree log_csum_range;
     328             : 
     329             : #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
     330             :         u64 alloc_bytenr;
     331             : #endif
     332             : 
     333             : #ifdef CONFIG_BTRFS_DEBUG
     334             :         struct list_head leak_list;
     335             : #endif
     336             : };
     337             : 
     338             : static inline bool btrfs_root_readonly(const struct btrfs_root *root)
     339             : {
     340             :         /* Byte-swap the constant at compile time, root_item::flags is LE */
     341    18003628 :         return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
     342             : }
     343             : 
     344             : static inline bool btrfs_root_dead(const struct btrfs_root *root)
     345             : {
     346             :         /* Byte-swap the constant at compile time, root_item::flags is LE */
     347         509 :         return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
     348             : }
     349             : 
     350             : static inline u64 btrfs_root_id(const struct btrfs_root *root)
     351             : {
     352    10895301 :         return root->root_key.objectid;
     353             : }
     354             : 
     355             : /*
     356             :  * Structure that conveys information about an extent that is going to replace
     357             :  * all the extents in a file range.
     358             :  */
     359             : struct btrfs_replace_extent_info {
     360             :         u64 disk_offset;
     361             :         u64 disk_len;
     362             :         u64 data_offset;
     363             :         u64 data_len;
     364             :         u64 file_offset;
     365             :         /* Pointer to a file extent item of type regular or prealloc. */
     366             :         char *extent_buf;
     367             :         /*
     368             :          * Set to true when attempting to replace a file range with a new extent
     369             :          * described by this structure, set to false when attempting to clone an
     370             :          * existing extent into a file range.
     371             :          */
     372             :         bool is_new_extent;
     373             :         /* Indicate if we should update the inode's mtime and ctime. */
     374             :         bool update_times;
     375             :         /* Meaningful only if is_new_extent is true. */
     376             :         int qgroup_reserved;
     377             :         /*
     378             :          * Meaningful only if is_new_extent is true.
     379             :          * Used to track how many extent items we have already inserted in a
     380             :          * subvolume tree that refer to the extent described by this structure,
     381             :          * so that we know when to create a new delayed ref or update an existing
     382             :          * one.
     383             :          */
     384             :         int insertions;
     385             : };
     386             : 
     387             : /* Arguments for btrfs_drop_extents() */
     388             : struct btrfs_drop_extents_args {
     389             :         /* Input parameters */
     390             : 
     391             :         /*
     392             :          * If NULL, btrfs_drop_extents() will allocate and free its own path.
     393             :          * If 'replace_extent' is true, this must not be NULL. Also the path
     394             :          * is always released except if 'replace_extent' is true and
     395             :          * btrfs_drop_extents() sets 'extent_inserted' to true, in which case
     396             :          * the path is kept locked.
     397             :          */
     398             :         struct btrfs_path *path;
     399             :         /* Start offset of the range to drop extents from */
     400             :         u64 start;
     401             :         /* End (exclusive, last byte + 1) of the range to drop extents from */
     402             :         u64 end;
     403             :         /* If true drop all the extent maps in the range */
     404             :         bool drop_cache;
     405             :         /*
     406             :          * If true it means we want to insert a new extent after dropping all
     407             :          * the extents in the range. If this is true, the 'extent_item_size'
     408             :          * parameter must be set as well and the 'extent_inserted' field will
     409             :          * be set to true by btrfs_drop_extents() if it could insert the new
     410             :          * extent.
     411             :          * Note: when this is set to true the path must not be NULL.
     412             :          */
     413             :         bool replace_extent;
     414             :         /*
     415             :          * Used if 'replace_extent' is true. Size of the file extent item to
     416             :          * insert after dropping all existing extents in the range
     417             :          */
     418             :         u32 extent_item_size;
     419             : 
     420             :         /* Output parameters */
     421             : 
     422             :         /*
     423             :          * Set to the minimum between the input parameter 'end' and the end
     424             :          * (exclusive, last byte + 1) of the last dropped extent. This is always
     425             :          * set even if btrfs_drop_extents() returns an error.
     426             :          */
     427             :         u64 drop_end;
     428             :         /*
     429             :          * The number of allocated bytes found in the range. This can be smaller
     430             :          * than the range's length when there are holes in the range.
     431             :          */
     432             :         u64 bytes_found;
     433             :         /*
     434             :          * Only set if 'replace_extent' is true. Set to true if we were able
     435             :          * to insert a replacement extent after dropping all extents in the
     436             :          * range, otherwise set to false by btrfs_drop_extents().
     437             :          * Also, if btrfs_drop_extents() has set this to true it means it
     438             :          * returned with the path locked, otherwise if it has set this to
     439             :          * false it has returned with the path released.
     440             :          */
     441             :         bool extent_inserted;
     442             : };
     443             : 
     444             : struct btrfs_file_private {
     445             :         void *filldir_buf;
     446             :         struct extent_state *llseek_cached_state;
     447             : };
     448             : 
     449             : static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
     450             : {
     451  1327097337 :         return info->nodesize - sizeof(struct btrfs_header);
     452             : }
     453             : 
     454             : static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
     455             : {
     456   131852551 :         return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
     457             : }
     458             : 
     459             : static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
     460             : {
     461   266441470 :         return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
     462             : }
     463             : 
     464             : static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
     465             : {
     466   130518482 :         return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
     467             : }
     468             : 
     469             : #define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
     470             :                                 ((bytes) >> (fs_info)->sectorsize_bits)
     471             : 
     472             : static inline u32 btrfs_crc32c(u32 crc, const void *address, unsigned length)
     473             : {
     474   486911525 :         return crc32c(crc, address, length);
     475             : }
     476             : 
     477             : static inline void btrfs_crc32c_final(u32 crc, u8 *result)
     478             : {
     479         705 :         put_unaligned_le32(~crc, result);
     480             : }
     481             : 
     482             : static inline u64 btrfs_name_hash(const char *name, int len)
     483             : {
     484    97010978 :        return crc32c((u32)~1, name, len);
     485             : }
     486             : 
     487             : /*
     488             :  * Figure the key offset of an extended inode ref
     489             :  */
     490             : static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
     491             :                                    int len)
     492             : {
     493       15563 :        return (u64) crc32c(parent_objectid, name, len);
     494             : }
     495             : 
     496             : static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
     497             : {
     498    33336722 :         return mapping_gfp_constraint(mapping, ~__GFP_FS);
     499             : }
     500             : 
     501             : int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
     502             :                                    u64 start, u64 end);
     503             : int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
     504             :                          u64 num_bytes, u64 *actual_bytes);
     505             : int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
     506             : 
     507             : /* ctree.c */
     508             : int __init btrfs_ctree_init(void);
     509             : void __cold btrfs_ctree_exit(void);
     510             : 
     511             : int btrfs_bin_search(struct extent_buffer *eb, int first_slot,
     512             :                      const struct btrfs_key *key, int *slot);
     513             : 
     514             : int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
     515             : int btrfs_previous_item(struct btrfs_root *root,
     516             :                         struct btrfs_path *path, u64 min_objectid,
     517             :                         int type);
     518             : int btrfs_previous_extent_item(struct btrfs_root *root,
     519             :                         struct btrfs_path *path, u64 min_objectid);
     520             : void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
     521             :                              struct btrfs_path *path,
     522             :                              const struct btrfs_key *new_key);
     523             : struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
     524             : int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
     525             :                         struct btrfs_key *key, int lowest_level,
     526             :                         u64 min_trans);
     527             : int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
     528             :                          struct btrfs_path *path,
     529             :                          u64 min_trans);
     530             : struct extent_buffer *btrfs_read_node_slot(struct extent_buffer *parent,
     531             :                                            int slot);
     532             : 
     533             : int btrfs_cow_block(struct btrfs_trans_handle *trans,
     534             :                     struct btrfs_root *root, struct extent_buffer *buf,
     535             :                     struct extent_buffer *parent, int parent_slot,
     536             :                     struct extent_buffer **cow_ret,
     537             :                     enum btrfs_lock_nesting nest);
     538             : int btrfs_copy_root(struct btrfs_trans_handle *trans,
     539             :                       struct btrfs_root *root,
     540             :                       struct extent_buffer *buf,
     541             :                       struct extent_buffer **cow_ret, u64 new_root_objectid);
     542             : int btrfs_block_can_be_shared(struct btrfs_root *root,
     543             :                               struct extent_buffer *buf);
     544             : int btrfs_del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
     545             :                   struct btrfs_path *path, int level, int slot);
     546             : void btrfs_extend_item(struct btrfs_path *path, u32 data_size);
     547             : void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end);
     548             : int btrfs_split_item(struct btrfs_trans_handle *trans,
     549             :                      struct btrfs_root *root,
     550             :                      struct btrfs_path *path,
     551             :                      const struct btrfs_key *new_key,
     552             :                      unsigned long split_offset);
     553             : int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
     554             :                          struct btrfs_root *root,
     555             :                          struct btrfs_path *path,
     556             :                          const struct btrfs_key *new_key);
     557             : int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
     558             :                 u64 inum, u64 ioff, u8 key_type, struct btrfs_key *found_key);
     559             : int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
     560             :                       const struct btrfs_key *key, struct btrfs_path *p,
     561             :                       int ins_len, int cow);
     562             : int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
     563             :                           struct btrfs_path *p, u64 time_seq);
     564             : int btrfs_search_slot_for_read(struct btrfs_root *root,
     565             :                                const struct btrfs_key *key,
     566             :                                struct btrfs_path *p, int find_higher,
     567             :                                int return_any);
     568             : int btrfs_realloc_node(struct btrfs_trans_handle *trans,
     569             :                        struct btrfs_root *root, struct extent_buffer *parent,
     570             :                        int start_slot, u64 *last_ret,
     571             :                        struct btrfs_key *progress);
     572             : void btrfs_release_path(struct btrfs_path *p);
     573             : struct btrfs_path *btrfs_alloc_path(void);
     574             : void btrfs_free_path(struct btrfs_path *p);
     575             : 
     576             : int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
     577             :                    struct btrfs_path *path, int slot, int nr);
     578             : static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
     579             :                                  struct btrfs_root *root,
     580             :                                  struct btrfs_path *path)
     581             : {
     582    13094748 :         return btrfs_del_items(trans, root, path, path->slots[0], 1);
     583             : }
     584             : 
     585             : /*
     586             :  * Describes a batch of items to insert in a btree. This is used by
     587             :  * btrfs_insert_empty_items().
     588             :  */
     589             : struct btrfs_item_batch {
     590             :         /*
     591             :          * Pointer to an array containing the keys of the items to insert (in
     592             :          * sorted order).
     593             :          */
     594             :         const struct btrfs_key *keys;
     595             :         /* Pointer to an array containing the data size for each item to insert. */
     596             :         const u32 *data_sizes;
     597             :         /*
     598             :          * The sum of data sizes for all items. The caller can compute this while
     599             :          * setting up the data_sizes array, so it ends up being more efficient
     600             :          * than having btrfs_insert_empty_items() or setup_item_for_insert()
     601             :          * doing it, as it would avoid an extra loop over a potentially large
     602             :          * array, and in the case of setup_item_for_insert(), we would be doing
     603             :          * it while holding a write lock on a leaf and often on upper level nodes
     604             :          * too, unnecessarily increasing the size of a critical section.
     605             :          */
     606             :         u32 total_data_size;
     607             :         /* Size of the keys and data_sizes arrays (number of items in the batch). */
     608             :         int nr;
     609             : };
     610             : 
     611             : void btrfs_setup_item_for_insert(struct btrfs_root *root,
     612             :                                  struct btrfs_path *path,
     613             :                                  const struct btrfs_key *key,
     614             :                                  u32 data_size);
     615             : int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
     616             :                       const struct btrfs_key *key, void *data, u32 data_size);
     617             : int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
     618             :                              struct btrfs_root *root,
     619             :                              struct btrfs_path *path,
     620             :                              const struct btrfs_item_batch *batch);
     621             : 
     622             : static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
     623             :                                           struct btrfs_root *root,
     624             :                                           struct btrfs_path *path,
     625             :                                           const struct btrfs_key *key,
     626             :                                           u32 data_size)
     627             : {
     628    57972949 :         struct btrfs_item_batch batch;
     629             : 
     630    57972949 :         batch.keys = key;
     631    57972949 :         batch.data_sizes = &data_size;
     632    57972949 :         batch.total_data_size = data_size;
     633    57972949 :         batch.nr = 1;
     634             : 
     635    57972949 :         return btrfs_insert_empty_items(trans, root, path, &batch);
     636             : }
     637             : 
     638             : int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
     639             :                         u64 time_seq);
     640             : 
     641             : int btrfs_search_backwards(struct btrfs_root *root, struct btrfs_key *key,
     642             :                            struct btrfs_path *path);
     643             : 
     644             : int btrfs_get_next_valid_item(struct btrfs_root *root, struct btrfs_key *key,
     645             :                               struct btrfs_path *path);
     646             : 
     647             : /*
     648             :  * Search in @root for a given @key, and store the slot found in @found_key.
     649             :  *
     650             :  * @root:       The root node of the tree.
     651             :  * @key:        The key we are looking for.
     652             :  * @found_key:  Will hold the found item.
     653             :  * @path:       Holds the current slot/leaf.
     654             :  * @iter_ret:   Contains the value returned from btrfs_search_slot or
     655             :  *              btrfs_get_next_valid_item, whichever was executed last.
     656             :  *
     657             :  * The @iter_ret is an output variable that will contain the return value of
     658             :  * btrfs_search_slot, if it encountered an error, or the value returned from
     659             :  * btrfs_get_next_valid_item otherwise. That return value can be 0, if a valid
     660             :  * slot was found, 1 if there were no more leaves, and <0 if there was an error.
     661             :  *
     662             :  * It's recommended to use a separate variable for iter_ret and then use it to
     663             :  * set the function return value so there's no confusion of the 0/1/errno
     664             :  * values stemming from btrfs_search_slot.
     665             :  */
     666             : #define btrfs_for_each_slot(root, key, found_key, path, iter_ret)               \
     667             :         for (iter_ret = btrfs_search_slot(NULL, (root), (key), (path), 0, 0);   \
     668             :                 (iter_ret) >= 0 &&                                           \
     669             :                 (iter_ret = btrfs_get_next_valid_item((root), (found_key), (path))) == 0; \
     670             :                 (path)->slots[0]++                                           \
     671             :         )
     672             : 
     673             : int btrfs_next_old_item(struct btrfs_root *root, struct btrfs_path *path, u64 time_seq);
     674             : 
     675             : /*
     676             :  * Search the tree again to find a leaf with greater keys.
     677             :  *
     678             :  * Returns 0 if it found something or 1 if there are no greater leaves.
     679             :  * Returns < 0 on error.
     680             :  */
     681             : static inline int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
     682             : {
     683    29176532 :         return btrfs_next_old_leaf(root, path, 0);
     684             : }
     685             : 
     686             : static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
     687             : {
     688    17206266 :         return btrfs_next_old_item(root, p, 0);
     689             : }
     690             : int btrfs_leaf_free_space(const struct extent_buffer *leaf);
     691             : 
     692             : static inline int is_fstree(u64 rootid)
     693             : {
     694   514992345 :         if (rootid == BTRFS_FS_TREE_OBJECTID ||
     695    97880609 :             ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID &&
     696             :               !btrfs_qgroup_level(rootid)))
     697   446359686 :                 return 1;
     698             :         return 0;
     699             : }
     700             : 
     701             : static inline bool btrfs_is_data_reloc_root(const struct btrfs_root *root)
     702             : {
     703    97553074 :         return root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID;
     704             : }
     705             : 
     706             : u16 btrfs_csum_type_size(u16 type);
     707             : int btrfs_super_csum_size(const struct btrfs_super_block *s);
     708             : const char *btrfs_super_csum_name(u16 csum_type);
     709             : const char *btrfs_super_csum_driver(u16 csum_type);
     710             : size_t __attribute_const__ btrfs_get_num_csums(void);
     711             : 
     712             : /*
     713             :  * We use page status Private2 to indicate there is an ordered extent with
     714             :  * unfinished IO.
     715             :  *
     716             :  * Rename the Private2 accessors to Ordered, to improve readability.
     717             :  */
     718             : #define PageOrdered(page)               PagePrivate2(page)
     719             : #define SetPageOrdered(page)            SetPagePrivate2(page)
     720             : #define ClearPageOrdered(page)          ClearPagePrivate2(page)
     721             : #define folio_test_ordered(folio)       folio_test_private_2(folio)
     722             : #define folio_set_ordered(folio)        folio_set_private_2(folio)
     723             : #define folio_clear_ordered(folio)      folio_clear_private_2(folio)
     724             : 
     725             : #endif

Generated by: LCOV version 1.14