LCOV - code coverage report
Current view: top level - fs/btrfs - backref.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc3-djwa @ Mon Jul 31 20:08:17 PDT 2023 Lines: 3 1724 0.2 %
Date: 2023-07-31 20:08:17 Functions: 1 56 1.8 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0
       2             : /*
       3             :  * Copyright (C) 2011 STRATO.  All rights reserved.
       4             :  */
       5             : 
       6             : #include <linux/mm.h>
       7             : #include <linux/rbtree.h>
       8             : #include <trace/events/btrfs.h>
       9             : #include "ctree.h"
      10             : #include "disk-io.h"
      11             : #include "backref.h"
      12             : #include "ulist.h"
      13             : #include "transaction.h"
      14             : #include "delayed-ref.h"
      15             : #include "locking.h"
      16             : #include "misc.h"
      17             : #include "tree-mod-log.h"
      18             : #include "fs.h"
      19             : #include "accessors.h"
      20             : #include "extent-tree.h"
      21             : #include "relocation.h"
      22             : #include "tree-checker.h"
      23             : 
      24             : /* Just arbitrary numbers so we can be sure one of these happened. */
      25             : #define BACKREF_FOUND_SHARED     6
      26             : #define BACKREF_FOUND_NOT_SHARED 7
      27             : 
      28             : struct extent_inode_elem {
      29             :         u64 inum;
      30             :         u64 offset;
      31             :         u64 num_bytes;
      32             :         struct extent_inode_elem *next;
      33             : };
      34             : 
      35           0 : static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
      36             :                               const struct btrfs_key *key,
      37             :                               const struct extent_buffer *eb,
      38             :                               const struct btrfs_file_extent_item *fi,
      39             :                               struct extent_inode_elem **eie)
      40             : {
      41           0 :         const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
      42           0 :         u64 offset = key->offset;
      43           0 :         struct extent_inode_elem *e;
      44           0 :         const u64 *root_ids;
      45           0 :         int root_count;
      46           0 :         bool cached;
      47             : 
      48           0 :         if (!ctx->ignore_extent_item_pos &&
      49           0 :             !btrfs_file_extent_compression(eb, fi) &&
      50           0 :             !btrfs_file_extent_encryption(eb, fi) &&
      51             :             !btrfs_file_extent_other_encoding(eb, fi)) {
      52           0 :                 u64 data_offset;
      53             : 
      54           0 :                 data_offset = btrfs_file_extent_offset(eb, fi);
      55             : 
      56           0 :                 if (ctx->extent_item_pos < data_offset ||
      57           0 :                     ctx->extent_item_pos >= data_offset + data_len)
      58             :                         return 1;
      59           0 :                 offset += ctx->extent_item_pos - data_offset;
      60             :         }
      61             : 
      62           0 :         if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
      63           0 :                 goto add_inode_elem;
      64             : 
      65           0 :         cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
      66             :                                    &root_count);
      67           0 :         if (!cached)
      68           0 :                 goto add_inode_elem;
      69             : 
      70           0 :         for (int i = 0; i < root_count; i++) {
      71           0 :                 int ret;
      72             : 
      73           0 :                 ret = ctx->indirect_ref_iterator(key->objectid, offset,
      74           0 :                                                  data_len, root_ids[i],
      75             :                                                  ctx->user_ctx);
      76           0 :                 if (ret)
      77           0 :                         return ret;
      78             :         }
      79             : 
      80           0 : add_inode_elem:
      81           0 :         e = kmalloc(sizeof(*e), GFP_NOFS);
      82           0 :         if (!e)
      83             :                 return -ENOMEM;
      84             : 
      85           0 :         e->next = *eie;
      86           0 :         e->inum = key->objectid;
      87           0 :         e->offset = offset;
      88           0 :         e->num_bytes = data_len;
      89           0 :         *eie = e;
      90             : 
      91           0 :         return 0;
      92             : }
      93             : 
      94             : static void free_inode_elem_list(struct extent_inode_elem *eie)
      95             : {
      96           0 :         struct extent_inode_elem *eie_next;
      97             : 
      98           0 :         for (; eie; eie = eie_next) {
      99           0 :                 eie_next = eie->next;
     100           0 :                 kfree(eie);
     101             :         }
     102             : }
     103             : 
     104           0 : static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
     105             :                              const struct extent_buffer *eb,
     106             :                              struct extent_inode_elem **eie)
     107             : {
     108           0 :         u64 disk_byte;
     109           0 :         struct btrfs_key key;
     110           0 :         struct btrfs_file_extent_item *fi;
     111           0 :         int slot;
     112           0 :         int nritems;
     113           0 :         int extent_type;
     114           0 :         int ret;
     115             : 
     116             :         /*
     117             :          * from the shared data ref, we only have the leaf but we need
     118             :          * the key. thus, we must look into all items and see that we
     119             :          * find one (some) with a reference to our extent item.
     120             :          */
     121           0 :         nritems = btrfs_header_nritems(eb);
     122           0 :         for (slot = 0; slot < nritems; ++slot) {
     123           0 :                 btrfs_item_key_to_cpu(eb, &key, slot);
     124           0 :                 if (key.type != BTRFS_EXTENT_DATA_KEY)
     125           0 :                         continue;
     126           0 :                 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
     127           0 :                 extent_type = btrfs_file_extent_type(eb, fi);
     128           0 :                 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
     129           0 :                         continue;
     130             :                 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
     131           0 :                 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
     132           0 :                 if (disk_byte != ctx->bytenr)
     133           0 :                         continue;
     134             : 
     135           0 :                 ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
     136           0 :                 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
     137           0 :                         return ret;
     138             :         }
     139             : 
     140             :         return 0;
     141             : }
     142             : 
     143             : struct preftree {
     144             :         struct rb_root_cached root;
     145             :         unsigned int count;
     146             : };
     147             : 
     148             : #define PREFTREE_INIT   { .root = RB_ROOT_CACHED, .count = 0 }
     149             : 
     150             : struct preftrees {
     151             :         struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
     152             :         struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
     153             :         struct preftree indirect_missing_keys;
     154             : };
     155             : 
     156             : /*
     157             :  * Checks for a shared extent during backref search.
     158             :  *
     159             :  * The share_count tracks prelim_refs (direct and indirect) having a
     160             :  * ref->count >0:
     161             :  *  - incremented when a ref->count transitions to >0
     162             :  *  - decremented when a ref->count transitions to <1
     163             :  */
     164             : struct share_check {
     165             :         struct btrfs_backref_share_check_ctx *ctx;
     166             :         struct btrfs_root *root;
     167             :         u64 inum;
     168             :         u64 data_bytenr;
     169             :         u64 data_extent_gen;
     170             :         /*
     171             :          * Counts number of inodes that refer to an extent (different inodes in
     172             :          * the same root or different roots) that we could find. The sharedness
     173             :          * check typically stops once this counter gets greater than 1, so it
     174             :          * may not reflect the total number of inodes.
     175             :          */
     176             :         int share_count;
     177             :         /*
     178             :          * The number of times we found our inode refers to the data extent we
     179             :          * are determining the sharedness. In other words, how many file extent
     180             :          * items we could find for our inode that point to our target data
     181             :          * extent. The value we get here after finishing the extent sharedness
     182             :          * check may be smaller than reality, but if it ends up being greater
     183             :          * than 1, then we know for sure the inode has multiple file extent
     184             :          * items that point to our inode, and we can safely assume it's useful
     185             :          * to cache the sharedness check result.
     186             :          */
     187             :         int self_ref_count;
     188             :         bool have_delayed_delete_refs;
     189             : };
     190             : 
     191             : static inline int extent_is_shared(struct share_check *sc)
     192             : {
     193           0 :         return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
     194             : }
     195             : 
     196             : static struct kmem_cache *btrfs_prelim_ref_cache;
     197             : 
     198           2 : int __init btrfs_prelim_ref_init(void)
     199             : {
     200           2 :         btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
     201             :                                         sizeof(struct prelim_ref),
     202             :                                         0,
     203             :                                         SLAB_MEM_SPREAD,
     204             :                                         NULL);
     205           2 :         if (!btrfs_prelim_ref_cache)
     206           0 :                 return -ENOMEM;
     207             :         return 0;
     208             : }
     209             : 
     210           0 : void __cold btrfs_prelim_ref_exit(void)
     211             : {
     212           0 :         kmem_cache_destroy(btrfs_prelim_ref_cache);
     213           0 : }
     214             : 
     215             : static void free_pref(struct prelim_ref *ref)
     216             : {
     217           0 :         kmem_cache_free(btrfs_prelim_ref_cache, ref);
     218             : }
     219             : 
     220             : /*
     221             :  * Return 0 when both refs are for the same block (and can be merged).
     222             :  * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
     223             :  * indicates a 'higher' block.
     224             :  */
     225           0 : static int prelim_ref_compare(struct prelim_ref *ref1,
     226             :                               struct prelim_ref *ref2)
     227             : {
     228           0 :         if (ref1->level < ref2->level)
     229             :                 return -1;
     230           0 :         if (ref1->level > ref2->level)
     231             :                 return 1;
     232           0 :         if (ref1->root_id < ref2->root_id)
     233             :                 return -1;
     234           0 :         if (ref1->root_id > ref2->root_id)
     235             :                 return 1;
     236           0 :         if (ref1->key_for_search.type < ref2->key_for_search.type)
     237             :                 return -1;
     238           0 :         if (ref1->key_for_search.type > ref2->key_for_search.type)
     239             :                 return 1;
     240           0 :         if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
     241             :                 return -1;
     242           0 :         if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
     243             :                 return 1;
     244           0 :         if (ref1->key_for_search.offset < ref2->key_for_search.offset)
     245             :                 return -1;
     246           0 :         if (ref1->key_for_search.offset > ref2->key_for_search.offset)
     247             :                 return 1;
     248           0 :         if (ref1->parent < ref2->parent)
     249             :                 return -1;
     250           0 :         if (ref1->parent > ref2->parent)
     251           0 :                 return 1;
     252             : 
     253             :         return 0;
     254             : }
     255             : 
     256           0 : static void update_share_count(struct share_check *sc, int oldcount,
     257             :                                int newcount, struct prelim_ref *newref)
     258             : {
     259           0 :         if ((!sc) || (oldcount == 0 && newcount < 1))
     260             :                 return;
     261             : 
     262           0 :         if (oldcount > 0 && newcount < 1)
     263           0 :                 sc->share_count--;
     264           0 :         else if (oldcount < 1 && newcount > 0)
     265           0 :                 sc->share_count++;
     266             : 
     267           0 :         if (newref->root_id == sc->root->root_key.objectid &&
     268           0 :             newref->wanted_disk_byte == sc->data_bytenr &&
     269           0 :             newref->key_for_search.objectid == sc->inum)
     270           0 :                 sc->self_ref_count += newref->count;
     271             : }
     272             : 
     273             : /*
     274             :  * Add @newref to the @root rbtree, merging identical refs.
     275             :  *
     276             :  * Callers should assume that newref has been freed after calling.
     277             :  */
     278           0 : static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
     279             :                               struct preftree *preftree,
     280             :                               struct prelim_ref *newref,
     281             :                               struct share_check *sc)
     282             : {
     283           0 :         struct rb_root_cached *root;
     284           0 :         struct rb_node **p;
     285           0 :         struct rb_node *parent = NULL;
     286           0 :         struct prelim_ref *ref;
     287           0 :         int result;
     288           0 :         bool leftmost = true;
     289             : 
     290           0 :         root = &preftree->root;
     291           0 :         p = &root->rb_root.rb_node;
     292             : 
     293           0 :         while (*p) {
     294           0 :                 parent = *p;
     295           0 :                 ref = rb_entry(parent, struct prelim_ref, rbnode);
     296           0 :                 result = prelim_ref_compare(ref, newref);
     297           0 :                 if (result < 0) {
     298           0 :                         p = &(*p)->rb_left;
     299           0 :                 } else if (result > 0) {
     300           0 :                         p = &(*p)->rb_right;
     301           0 :                         leftmost = false;
     302             :                 } else {
     303             :                         /* Identical refs, merge them and free @newref */
     304           0 :                         struct extent_inode_elem *eie = ref->inode_list;
     305             : 
     306           0 :                         while (eie && eie->next)
     307             :                                 eie = eie->next;
     308             : 
     309           0 :                         if (!eie)
     310           0 :                                 ref->inode_list = newref->inode_list;
     311             :                         else
     312           0 :                                 eie->next = newref->inode_list;
     313           0 :                         trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
     314           0 :                                                      preftree->count);
     315             :                         /*
     316             :                          * A delayed ref can have newref->count < 0.
     317             :                          * The ref->count is updated to follow any
     318             :                          * BTRFS_[ADD|DROP]_DELAYED_REF actions.
     319             :                          */
     320           0 :                         update_share_count(sc, ref->count,
     321           0 :                                            ref->count + newref->count, newref);
     322           0 :                         ref->count += newref->count;
     323           0 :                         free_pref(newref);
     324           0 :                         return;
     325             :                 }
     326             :         }
     327             : 
     328           0 :         update_share_count(sc, 0, newref->count, newref);
     329           0 :         preftree->count++;
     330           0 :         trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
     331           0 :         rb_link_node(&newref->rbnode, parent, p);
     332           0 :         rb_insert_color_cached(&newref->rbnode, root, leftmost);
     333             : }
     334             : 
     335             : /*
     336             :  * Release the entire tree.  We don't care about internal consistency so
     337             :  * just free everything and then reset the tree root.
     338             :  */
     339           0 : static void prelim_release(struct preftree *preftree)
     340             : {
     341           0 :         struct prelim_ref *ref, *next_ref;
     342             : 
     343           0 :         rbtree_postorder_for_each_entry_safe(ref, next_ref,
     344             :                                              &preftree->root.rb_root, rbnode) {
     345           0 :                 free_inode_elem_list(ref->inode_list);
     346           0 :                 free_pref(ref);
     347             :         }
     348             : 
     349           0 :         preftree->root = RB_ROOT_CACHED;
     350           0 :         preftree->count = 0;
     351           0 : }
     352             : 
     353             : /*
     354             :  * the rules for all callers of this function are:
     355             :  * - obtaining the parent is the goal
     356             :  * - if you add a key, you must know that it is a correct key
     357             :  * - if you cannot add the parent or a correct key, then we will look into the
     358             :  *   block later to set a correct key
     359             :  *
     360             :  * delayed refs
     361             :  * ============
     362             :  *        backref type | shared | indirect | shared | indirect
     363             :  * information         |   tree |     tree |   data |     data
     364             :  * --------------------+--------+----------+--------+----------
     365             :  *      parent logical |    y   |     -    |    -   |     -
     366             :  *      key to resolve |    -   |     y    |    y   |     y
     367             :  *  tree block logical |    -   |     -    |    -   |     -
     368             :  *  root for resolving |    y   |     y    |    y   |     y
     369             :  *
     370             :  * - column 1:       we've the parent -> done
     371             :  * - column 2, 3, 4: we use the key to find the parent
     372             :  *
     373             :  * on disk refs (inline or keyed)
     374             :  * ==============================
     375             :  *        backref type | shared | indirect | shared | indirect
     376             :  * information         |   tree |     tree |   data |     data
     377             :  * --------------------+--------+----------+--------+----------
     378             :  *      parent logical |    y   |     -    |    y   |     -
     379             :  *      key to resolve |    -   |     -    |    -   |     y
     380             :  *  tree block logical |    y   |     y    |    y   |     y
     381             :  *  root for resolving |    -   |     y    |    y   |     y
     382             :  *
     383             :  * - column 1, 3: we've the parent -> done
     384             :  * - column 2:    we take the first key from the block to find the parent
     385             :  *                (see add_missing_keys)
     386             :  * - column 4:    we use the key to find the parent
     387             :  *
     388             :  * additional information that's available but not required to find the parent
     389             :  * block might help in merging entries to gain some speed.
     390             :  */
     391           0 : static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
     392             :                           struct preftree *preftree, u64 root_id,
     393             :                           const struct btrfs_key *key, int level, u64 parent,
     394             :                           u64 wanted_disk_byte, int count,
     395             :                           struct share_check *sc, gfp_t gfp_mask)
     396             : {
     397           0 :         struct prelim_ref *ref;
     398             : 
     399           0 :         if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
     400             :                 return 0;
     401             : 
     402           0 :         ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
     403           0 :         if (!ref)
     404             :                 return -ENOMEM;
     405             : 
     406           0 :         ref->root_id = root_id;
     407           0 :         if (key)
     408           0 :                 ref->key_for_search = *key;
     409             :         else
     410           0 :                 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
     411             : 
     412           0 :         ref->inode_list = NULL;
     413           0 :         ref->level = level;
     414           0 :         ref->count = count;
     415           0 :         ref->parent = parent;
     416           0 :         ref->wanted_disk_byte = wanted_disk_byte;
     417           0 :         prelim_ref_insert(fs_info, preftree, ref, sc);
     418           0 :         return extent_is_shared(sc);
     419             : }
     420             : 
     421             : /* direct refs use root == 0, key == NULL */
     422             : static int add_direct_ref(const struct btrfs_fs_info *fs_info,
     423             :                           struct preftrees *preftrees, int level, u64 parent,
     424             :                           u64 wanted_disk_byte, int count,
     425             :                           struct share_check *sc, gfp_t gfp_mask)
     426             : {
     427           0 :         return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
     428             :                               parent, wanted_disk_byte, count, sc, gfp_mask);
     429             : }
     430             : 
     431             : /* indirect refs use parent == 0 */
     432             : static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
     433             :                             struct preftrees *preftrees, u64 root_id,
     434             :                             const struct btrfs_key *key, int level,
     435             :                             u64 wanted_disk_byte, int count,
     436             :                             struct share_check *sc, gfp_t gfp_mask)
     437             : {
     438           0 :         struct preftree *tree = &preftrees->indirect;
     439             : 
     440           0 :         if (!key)
     441           0 :                 tree = &preftrees->indirect_missing_keys;
     442           0 :         return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
     443             :                               wanted_disk_byte, count, sc, gfp_mask);
     444             : }
     445             : 
     446           0 : static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
     447             : {
     448           0 :         struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
     449           0 :         struct rb_node *parent = NULL;
     450           0 :         struct prelim_ref *ref = NULL;
     451           0 :         struct prelim_ref target = {};
     452           0 :         int result;
     453             : 
     454           0 :         target.parent = bytenr;
     455             : 
     456           0 :         while (*p) {
     457           0 :                 parent = *p;
     458           0 :                 ref = rb_entry(parent, struct prelim_ref, rbnode);
     459           0 :                 result = prelim_ref_compare(ref, &target);
     460             : 
     461           0 :                 if (result < 0)
     462           0 :                         p = &(*p)->rb_left;
     463           0 :                 else if (result > 0)
     464           0 :                         p = &(*p)->rb_right;
     465             :                 else
     466             :                         return 1;
     467             :         }
     468             :         return 0;
     469             : }
     470             : 
     471           0 : static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
     472             :                            struct btrfs_root *root, struct btrfs_path *path,
     473             :                            struct ulist *parents,
     474             :                            struct preftrees *preftrees, struct prelim_ref *ref,
     475             :                            int level)
     476             : {
     477           0 :         int ret = 0;
     478           0 :         int slot;
     479           0 :         struct extent_buffer *eb;
     480           0 :         struct btrfs_key key;
     481           0 :         struct btrfs_key *key_for_search = &ref->key_for_search;
     482           0 :         struct btrfs_file_extent_item *fi;
     483           0 :         struct extent_inode_elem *eie = NULL, *old = NULL;
     484           0 :         u64 disk_byte;
     485           0 :         u64 wanted_disk_byte = ref->wanted_disk_byte;
     486           0 :         u64 count = 0;
     487           0 :         u64 data_offset;
     488           0 :         u8 type;
     489             : 
     490           0 :         if (level != 0) {
     491           0 :                 eb = path->nodes[level];
     492           0 :                 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
     493           0 :                 if (ret < 0)
     494             :                         return ret;
     495           0 :                 return 0;
     496             :         }
     497             : 
     498             :         /*
     499             :          * 1. We normally enter this function with the path already pointing to
     500             :          *    the first item to check. But sometimes, we may enter it with
     501             :          *    slot == nritems.
     502             :          * 2. We are searching for normal backref but bytenr of this leaf
     503             :          *    matches shared data backref
     504             :          * 3. The leaf owner is not equal to the root we are searching
     505             :          *
     506             :          * For these cases, go to the next leaf before we continue.
     507             :          */
     508           0 :         eb = path->nodes[0];
     509           0 :         if (path->slots[0] >= btrfs_header_nritems(eb) ||
     510           0 :             is_shared_data_backref(preftrees, eb->start) ||
     511           0 :             ref->root_id != btrfs_header_owner(eb)) {
     512           0 :                 if (ctx->time_seq == BTRFS_SEQ_LAST)
     513           0 :                         ret = btrfs_next_leaf(root, path);
     514             :                 else
     515           0 :                         ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
     516             :         }
     517             : 
     518           0 :         while (!ret && count < ref->count) {
     519           0 :                 eb = path->nodes[0];
     520           0 :                 slot = path->slots[0];
     521             : 
     522           0 :                 btrfs_item_key_to_cpu(eb, &key, slot);
     523             : 
     524           0 :                 if (key.objectid != key_for_search->objectid ||
     525           0 :                     key.type != BTRFS_EXTENT_DATA_KEY)
     526             :                         break;
     527             : 
     528             :                 /*
     529             :                  * We are searching for normal backref but bytenr of this leaf
     530             :                  * matches shared data backref, OR
     531             :                  * the leaf owner is not equal to the root we are searching for
     532             :                  */
     533           0 :                 if (slot == 0 &&
     534           0 :                     (is_shared_data_backref(preftrees, eb->start) ||
     535           0 :                      ref->root_id != btrfs_header_owner(eb))) {
     536           0 :                         if (ctx->time_seq == BTRFS_SEQ_LAST)
     537           0 :                                 ret = btrfs_next_leaf(root, path);
     538             :                         else
     539           0 :                                 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
     540           0 :                         continue;
     541             :                 }
     542           0 :                 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
     543           0 :                 type = btrfs_file_extent_type(eb, fi);
     544           0 :                 if (type == BTRFS_FILE_EXTENT_INLINE)
     545           0 :                         goto next;
     546           0 :                 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
     547           0 :                 data_offset = btrfs_file_extent_offset(eb, fi);
     548             : 
     549           0 :                 if (disk_byte == wanted_disk_byte) {
     550           0 :                         eie = NULL;
     551           0 :                         old = NULL;
     552           0 :                         if (ref->key_for_search.offset == key.offset - data_offset)
     553           0 :                                 count++;
     554             :                         else
     555           0 :                                 goto next;
     556           0 :                         if (!ctx->skip_inode_ref_list) {
     557           0 :                                 ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
     558           0 :                                 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
     559           0 :                                     ret < 0)
     560             :                                         break;
     561             :                         }
     562           0 :                         if (ret > 0)
     563           0 :                                 goto next;
     564           0 :                         ret = ulist_add_merge_ptr(parents, eb->start,
     565             :                                                   eie, (void **)&old, GFP_NOFS);
     566           0 :                         if (ret < 0)
     567             :                                 break;
     568           0 :                         if (!ret && !ctx->skip_inode_ref_list) {
     569           0 :                                 while (old->next)
     570           0 :                                         old = old->next;
     571           0 :                                 old->next = eie;
     572             :                         }
     573           0 :                         eie = NULL;
     574             :                 }
     575           0 : next:
     576           0 :                 if (ctx->time_seq == BTRFS_SEQ_LAST)
     577           0 :                         ret = btrfs_next_item(root, path);
     578             :                 else
     579           0 :                         ret = btrfs_next_old_item(root, path, ctx->time_seq);
     580             :         }
     581             : 
     582           0 :         if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
     583           0 :                 free_inode_elem_list(eie);
     584             :         else if (ret > 0)
     585             :                 ret = 0;
     586             : 
     587             :         return ret;
     588             : }
     589             : 
     590             : /*
     591             :  * resolve an indirect backref in the form (root_id, key, level)
     592             :  * to a logical address
     593             :  */
     594           0 : static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
     595             :                                 struct btrfs_path *path,
     596             :                                 struct preftrees *preftrees,
     597             :                                 struct prelim_ref *ref, struct ulist *parents)
     598             : {
     599           0 :         struct btrfs_root *root;
     600           0 :         struct extent_buffer *eb;
     601           0 :         int ret = 0;
     602           0 :         int root_level;
     603           0 :         int level = ref->level;
     604           0 :         struct btrfs_key search_key = ref->key_for_search;
     605             : 
     606             :         /*
     607             :          * If we're search_commit_root we could possibly be holding locks on
     608             :          * other tree nodes.  This happens when qgroups does backref walks when
     609             :          * adding new delayed refs.  To deal with this we need to look in cache
     610             :          * for the root, and if we don't find it then we need to search the
     611             :          * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
     612             :          * here.
     613             :          */
     614           0 :         if (path->search_commit_root)
     615           0 :                 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
     616             :         else
     617           0 :                 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
     618           0 :         if (IS_ERR(root)) {
     619           0 :                 ret = PTR_ERR(root);
     620           0 :                 goto out_free;
     621             :         }
     622             : 
     623           0 :         if (!path->search_commit_root &&
     624           0 :             test_bit(BTRFS_ROOT_DELETING, &root->state)) {
     625           0 :                 ret = -ENOENT;
     626           0 :                 goto out;
     627             :         }
     628             : 
     629           0 :         if (btrfs_is_testing(ctx->fs_info)) {
     630             :                 ret = -ENOENT;
     631             :                 goto out;
     632             :         }
     633             : 
     634           0 :         if (path->search_commit_root)
     635           0 :                 root_level = btrfs_header_level(root->commit_root);
     636           0 :         else if (ctx->time_seq == BTRFS_SEQ_LAST)
     637           0 :                 root_level = btrfs_header_level(root->node);
     638             :         else
     639           0 :                 root_level = btrfs_old_root_level(root, ctx->time_seq);
     640             : 
     641           0 :         if (root_level + 1 == level)
     642           0 :                 goto out;
     643             : 
     644             :         /*
     645             :          * We can often find data backrefs with an offset that is too large
     646             :          * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
     647             :          * subtracting a file's offset with the data offset of its
     648             :          * corresponding extent data item. This can happen for example in the
     649             :          * clone ioctl.
     650             :          *
     651             :          * So if we detect such case we set the search key's offset to zero to
     652             :          * make sure we will find the matching file extent item at
     653             :          * add_all_parents(), otherwise we will miss it because the offset
     654             :          * taken form the backref is much larger then the offset of the file
     655             :          * extent item. This can make us scan a very large number of file
     656             :          * extent items, but at least it will not make us miss any.
     657             :          *
     658             :          * This is an ugly workaround for a behaviour that should have never
     659             :          * existed, but it does and a fix for the clone ioctl would touch a lot
     660             :          * of places, cause backwards incompatibility and would not fix the
     661             :          * problem for extents cloned with older kernels.
     662             :          */
     663           0 :         if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
     664           0 :             search_key.offset >= LLONG_MAX)
     665           0 :                 search_key.offset = 0;
     666           0 :         path->lowest_level = level;
     667           0 :         if (ctx->time_seq == BTRFS_SEQ_LAST)
     668           0 :                 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
     669             :         else
     670           0 :                 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
     671             : 
     672           0 :         btrfs_debug(ctx->fs_info,
     673             :                 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
     674             :                  ref->root_id, level, ref->count, ret,
     675             :                  ref->key_for_search.objectid, ref->key_for_search.type,
     676             :                  ref->key_for_search.offset);
     677           0 :         if (ret < 0)
     678           0 :                 goto out;
     679             : 
     680           0 :         eb = path->nodes[level];
     681           0 :         while (!eb) {
     682           0 :                 if (WARN_ON(!level)) {
     683           0 :                         ret = 1;
     684           0 :                         goto out;
     685             :                 }
     686           0 :                 level--;
     687           0 :                 eb = path->nodes[level];
     688             :         }
     689             : 
     690           0 :         ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
     691           0 : out:
     692           0 :         btrfs_put_root(root);
     693           0 : out_free:
     694           0 :         path->lowest_level = 0;
     695           0 :         btrfs_release_path(path);
     696           0 :         return ret;
     697             : }
     698             : 
     699             : static struct extent_inode_elem *
     700             : unode_aux_to_inode_list(struct ulist_node *node)
     701             : {
     702           0 :         if (!node)
     703             :                 return NULL;
     704           0 :         return (struct extent_inode_elem *)(uintptr_t)node->aux;
     705             : }
     706             : 
     707           0 : static void free_leaf_list(struct ulist *ulist)
     708             : {
     709           0 :         struct ulist_node *node;
     710           0 :         struct ulist_iterator uiter;
     711             : 
     712           0 :         ULIST_ITER_INIT(&uiter);
     713           0 :         while ((node = ulist_next(ulist, &uiter)))
     714           0 :                 free_inode_elem_list(unode_aux_to_inode_list(node));
     715             : 
     716           0 :         ulist_free(ulist);
     717           0 : }
     718             : 
     719             : /*
     720             :  * We maintain three separate rbtrees: one for direct refs, one for
     721             :  * indirect refs which have a key, and one for indirect refs which do not
     722             :  * have a key. Each tree does merge on insertion.
     723             :  *
     724             :  * Once all of the references are located, we iterate over the tree of
     725             :  * indirect refs with missing keys. An appropriate key is located and
     726             :  * the ref is moved onto the tree for indirect refs. After all missing
     727             :  * keys are thus located, we iterate over the indirect ref tree, resolve
     728             :  * each reference, and then insert the resolved reference onto the
     729             :  * direct tree (merging there too).
     730             :  *
     731             :  * New backrefs (i.e., for parent nodes) are added to the appropriate
     732             :  * rbtree as they are encountered. The new backrefs are subsequently
     733             :  * resolved as above.
     734             :  */
     735           0 : static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
     736             :                                  struct btrfs_path *path,
     737             :                                  struct preftrees *preftrees,
     738             :                                  struct share_check *sc)
     739             : {
     740           0 :         int err;
     741           0 :         int ret = 0;
     742           0 :         struct ulist *parents;
     743           0 :         struct ulist_node *node;
     744           0 :         struct ulist_iterator uiter;
     745           0 :         struct rb_node *rnode;
     746             : 
     747           0 :         parents = ulist_alloc(GFP_NOFS);
     748           0 :         if (!parents)
     749             :                 return -ENOMEM;
     750             : 
     751             :         /*
     752             :          * We could trade memory usage for performance here by iterating
     753             :          * the tree, allocating new refs for each insertion, and then
     754             :          * freeing the entire indirect tree when we're done.  In some test
     755             :          * cases, the tree can grow quite large (~200k objects).
     756             :          */
     757           0 :         while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
     758           0 :                 struct prelim_ref *ref;
     759             : 
     760           0 :                 ref = rb_entry(rnode, struct prelim_ref, rbnode);
     761           0 :                 if (WARN(ref->parent,
     762             :                          "BUG: direct ref found in indirect tree")) {
     763           0 :                         ret = -EINVAL;
     764           0 :                         goto out;
     765             :                 }
     766             : 
     767           0 :                 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
     768           0 :                 preftrees->indirect.count--;
     769             : 
     770           0 :                 if (ref->count == 0) {
     771           0 :                         free_pref(ref);
     772           0 :                         continue;
     773             :                 }
     774             : 
     775           0 :                 if (sc && ref->root_id != sc->root->root_key.objectid) {
     776           0 :                         free_pref(ref);
     777           0 :                         ret = BACKREF_FOUND_SHARED;
     778           0 :                         goto out;
     779             :                 }
     780           0 :                 err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
     781             :                 /*
     782             :                  * we can only tolerate ENOENT,otherwise,we should catch error
     783             :                  * and return directly.
     784             :                  */
     785           0 :                 if (err == -ENOENT) {
     786           0 :                         prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
     787             :                                           NULL);
     788           0 :                         continue;
     789           0 :                 } else if (err) {
     790           0 :                         free_pref(ref);
     791           0 :                         ret = err;
     792           0 :                         goto out;
     793             :                 }
     794             : 
     795             :                 /* we put the first parent into the ref at hand */
     796           0 :                 ULIST_ITER_INIT(&uiter);
     797           0 :                 node = ulist_next(parents, &uiter);
     798           0 :                 ref->parent = node ? node->val : 0;
     799           0 :                 ref->inode_list = unode_aux_to_inode_list(node);
     800             : 
     801             :                 /* Add a prelim_ref(s) for any other parent(s). */
     802           0 :                 while ((node = ulist_next(parents, &uiter))) {
     803           0 :                         struct prelim_ref *new_ref;
     804             : 
     805           0 :                         new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
     806             :                                                    GFP_NOFS);
     807           0 :                         if (!new_ref) {
     808           0 :                                 free_pref(ref);
     809           0 :                                 ret = -ENOMEM;
     810           0 :                                 goto out;
     811             :                         }
     812           0 :                         memcpy(new_ref, ref, sizeof(*ref));
     813           0 :                         new_ref->parent = node->val;
     814           0 :                         new_ref->inode_list = unode_aux_to_inode_list(node);
     815           0 :                         prelim_ref_insert(ctx->fs_info, &preftrees->direct,
     816             :                                           new_ref, NULL);
     817             :                 }
     818             : 
     819             :                 /*
     820             :                  * Now it's a direct ref, put it in the direct tree. We must
     821             :                  * do this last because the ref could be merged/freed here.
     822             :                  */
     823           0 :                 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
     824             : 
     825           0 :                 ulist_reinit(parents);
     826           0 :                 cond_resched();
     827             :         }
     828           0 : out:
     829             :         /*
     830             :          * We may have inode lists attached to refs in the parents ulist, so we
     831             :          * must free them before freeing the ulist and its refs.
     832             :          */
     833           0 :         free_leaf_list(parents);
     834           0 :         return ret;
     835             : }
     836             : 
     837             : /*
     838             :  * read tree blocks and add keys where required.
     839             :  */
     840           0 : static int add_missing_keys(struct btrfs_fs_info *fs_info,
     841             :                             struct preftrees *preftrees, bool lock)
     842             : {
     843           0 :         struct prelim_ref *ref;
     844           0 :         struct extent_buffer *eb;
     845           0 :         struct preftree *tree = &preftrees->indirect_missing_keys;
     846           0 :         struct rb_node *node;
     847             : 
     848           0 :         while ((node = rb_first_cached(&tree->root))) {
     849           0 :                 struct btrfs_tree_parent_check check = { 0 };
     850             : 
     851           0 :                 ref = rb_entry(node, struct prelim_ref, rbnode);
     852           0 :                 rb_erase_cached(node, &tree->root);
     853             : 
     854           0 :                 BUG_ON(ref->parent); /* should not be a direct ref */
     855           0 :                 BUG_ON(ref->key_for_search.type);
     856           0 :                 BUG_ON(!ref->wanted_disk_byte);
     857             : 
     858           0 :                 check.level = ref->level - 1;
     859           0 :                 check.owner_root = ref->root_id;
     860             : 
     861           0 :                 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
     862           0 :                 if (IS_ERR(eb)) {
     863           0 :                         free_pref(ref);
     864           0 :                         return PTR_ERR(eb);
     865             :                 }
     866           0 :                 if (!extent_buffer_uptodate(eb)) {
     867           0 :                         free_pref(ref);
     868           0 :                         free_extent_buffer(eb);
     869           0 :                         return -EIO;
     870             :                 }
     871             : 
     872           0 :                 if (lock)
     873           0 :                         btrfs_tree_read_lock(eb);
     874           0 :                 if (btrfs_header_level(eb) == 0)
     875           0 :                         btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
     876             :                 else
     877           0 :                         btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
     878           0 :                 if (lock)
     879           0 :                         btrfs_tree_read_unlock(eb);
     880           0 :                 free_extent_buffer(eb);
     881           0 :                 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
     882           0 :                 cond_resched();
     883             :         }
     884             :         return 0;
     885             : }
     886             : 
     887             : /*
     888             :  * add all currently queued delayed refs from this head whose seq nr is
     889             :  * smaller or equal that seq to the list
     890             :  */
     891           0 : static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
     892             :                             struct btrfs_delayed_ref_head *head, u64 seq,
     893             :                             struct preftrees *preftrees, struct share_check *sc)
     894             : {
     895           0 :         struct btrfs_delayed_ref_node *node;
     896           0 :         struct btrfs_key key;
     897           0 :         struct rb_node *n;
     898           0 :         int count;
     899           0 :         int ret = 0;
     900             : 
     901           0 :         spin_lock(&head->lock);
     902           0 :         for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
     903           0 :                 node = rb_entry(n, struct btrfs_delayed_ref_node,
     904             :                                 ref_node);
     905           0 :                 if (node->seq > seq)
     906           0 :                         continue;
     907             : 
     908           0 :                 switch (node->action) {
     909             :                 case BTRFS_ADD_DELAYED_EXTENT:
     910             :                 case BTRFS_UPDATE_DELAYED_HEAD:
     911           0 :                         WARN_ON(1);
     912           0 :                         continue;
     913           0 :                 case BTRFS_ADD_DELAYED_REF:
     914           0 :                         count = node->ref_mod;
     915           0 :                         break;
     916           0 :                 case BTRFS_DROP_DELAYED_REF:
     917           0 :                         count = node->ref_mod * -1;
     918           0 :                         break;
     919           0 :                 default:
     920           0 :                         BUG();
     921             :                 }
     922           0 :                 switch (node->type) {
     923           0 :                 case BTRFS_TREE_BLOCK_REF_KEY: {
     924             :                         /* NORMAL INDIRECT METADATA backref */
     925           0 :                         struct btrfs_delayed_tree_ref *ref;
     926           0 :                         struct btrfs_key *key_ptr = NULL;
     927             : 
     928           0 :                         if (head->extent_op && head->extent_op->update_key) {
     929           0 :                                 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
     930           0 :                                 key_ptr = &key;
     931             :                         }
     932             : 
     933           0 :                         ref = btrfs_delayed_node_to_tree_ref(node);
     934           0 :                         ret = add_indirect_ref(fs_info, preftrees, ref->root,
     935           0 :                                                key_ptr, ref->level + 1,
     936             :                                                node->bytenr, count, sc,
     937             :                                                GFP_ATOMIC);
     938           0 :                         break;
     939             :                 }
     940             :                 case BTRFS_SHARED_BLOCK_REF_KEY: {
     941             :                         /* SHARED DIRECT METADATA backref */
     942           0 :                         struct btrfs_delayed_tree_ref *ref;
     943             : 
     944           0 :                         ref = btrfs_delayed_node_to_tree_ref(node);
     945             : 
     946           0 :                         ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
     947             :                                              ref->parent, node->bytenr, count,
     948             :                                              sc, GFP_ATOMIC);
     949           0 :                         break;
     950             :                 }
     951             :                 case BTRFS_EXTENT_DATA_REF_KEY: {
     952             :                         /* NORMAL INDIRECT DATA backref */
     953           0 :                         struct btrfs_delayed_data_ref *ref;
     954           0 :                         ref = btrfs_delayed_node_to_data_ref(node);
     955             : 
     956           0 :                         key.objectid = ref->objectid;
     957           0 :                         key.type = BTRFS_EXTENT_DATA_KEY;
     958           0 :                         key.offset = ref->offset;
     959             : 
     960             :                         /*
     961             :                          * If we have a share check context and a reference for
     962             :                          * another inode, we can't exit immediately. This is
     963             :                          * because even if this is a BTRFS_ADD_DELAYED_REF
     964             :                          * reference we may find next a BTRFS_DROP_DELAYED_REF
     965             :                          * which cancels out this ADD reference.
     966             :                          *
     967             :                          * If this is a DROP reference and there was no previous
     968             :                          * ADD reference, then we need to signal that when we
     969             :                          * process references from the extent tree (through
     970             :                          * add_inline_refs() and add_keyed_refs()), we should
     971             :                          * not exit early if we find a reference for another
     972             :                          * inode, because one of the delayed DROP references
     973             :                          * may cancel that reference in the extent tree.
     974             :                          */
     975           0 :                         if (sc && count < 0)
     976           0 :                                 sc->have_delayed_delete_refs = true;
     977             : 
     978           0 :                         ret = add_indirect_ref(fs_info, preftrees, ref->root,
     979             :                                                &key, 0, node->bytenr, count, sc,
     980             :                                                GFP_ATOMIC);
     981           0 :                         break;
     982             :                 }
     983             :                 case BTRFS_SHARED_DATA_REF_KEY: {
     984             :                         /* SHARED DIRECT FULL backref */
     985           0 :                         struct btrfs_delayed_data_ref *ref;
     986             : 
     987           0 :                         ref = btrfs_delayed_node_to_data_ref(node);
     988             : 
     989           0 :                         ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
     990             :                                              node->bytenr, count, sc,
     991             :                                              GFP_ATOMIC);
     992           0 :                         break;
     993             :                 }
     994             :                 default:
     995           0 :                         WARN_ON(1);
     996             :                 }
     997             :                 /*
     998             :                  * We must ignore BACKREF_FOUND_SHARED until all delayed
     999             :                  * refs have been checked.
    1000             :                  */
    1001           0 :                 if (ret && (ret != BACKREF_FOUND_SHARED))
    1002             :                         break;
    1003             :         }
    1004           0 :         if (!ret)
    1005           0 :                 ret = extent_is_shared(sc);
    1006             : 
    1007           0 :         spin_unlock(&head->lock);
    1008           0 :         return ret;
    1009             : }
    1010             : 
    1011             : /*
    1012             :  * add all inline backrefs for bytenr to the list
    1013             :  *
    1014             :  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
    1015             :  */
    1016           0 : static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
    1017             :                            struct btrfs_path *path,
    1018             :                            int *info_level, struct preftrees *preftrees,
    1019             :                            struct share_check *sc)
    1020             : {
    1021           0 :         int ret = 0;
    1022           0 :         int slot;
    1023           0 :         struct extent_buffer *leaf;
    1024           0 :         struct btrfs_key key;
    1025           0 :         struct btrfs_key found_key;
    1026           0 :         unsigned long ptr;
    1027           0 :         unsigned long end;
    1028           0 :         struct btrfs_extent_item *ei;
    1029           0 :         u64 flags;
    1030           0 :         u64 item_size;
    1031             : 
    1032             :         /*
    1033             :          * enumerate all inline refs
    1034             :          */
    1035           0 :         leaf = path->nodes[0];
    1036           0 :         slot = path->slots[0];
    1037             : 
    1038           0 :         item_size = btrfs_item_size(leaf, slot);
    1039           0 :         BUG_ON(item_size < sizeof(*ei));
    1040             : 
    1041           0 :         ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
    1042             : 
    1043           0 :         if (ctx->check_extent_item) {
    1044           0 :                 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
    1045           0 :                 if (ret)
    1046             :                         return ret;
    1047             :         }
    1048             : 
    1049           0 :         flags = btrfs_extent_flags(leaf, ei);
    1050           0 :         btrfs_item_key_to_cpu(leaf, &found_key, slot);
    1051             : 
    1052           0 :         ptr = (unsigned long)(ei + 1);
    1053           0 :         end = (unsigned long)ei + item_size;
    1054             : 
    1055           0 :         if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
    1056           0 :             flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
    1057           0 :                 struct btrfs_tree_block_info *info;
    1058             : 
    1059           0 :                 info = (struct btrfs_tree_block_info *)ptr;
    1060           0 :                 *info_level = btrfs_tree_block_level(leaf, info);
    1061           0 :                 ptr += sizeof(struct btrfs_tree_block_info);
    1062           0 :                 BUG_ON(ptr > end);
    1063           0 :         } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
    1064           0 :                 *info_level = found_key.offset;
    1065             :         } else {
    1066           0 :                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
    1067             :         }
    1068             : 
    1069           0 :         while (ptr < end) {
    1070           0 :                 struct btrfs_extent_inline_ref *iref;
    1071           0 :                 u64 offset;
    1072           0 :                 int type;
    1073             : 
    1074           0 :                 iref = (struct btrfs_extent_inline_ref *)ptr;
    1075           0 :                 type = btrfs_get_extent_inline_ref_type(leaf, iref,
    1076             :                                                         BTRFS_REF_TYPE_ANY);
    1077           0 :                 if (type == BTRFS_REF_TYPE_INVALID)
    1078             :                         return -EUCLEAN;
    1079             : 
    1080           0 :                 offset = btrfs_extent_inline_ref_offset(leaf, iref);
    1081             : 
    1082           0 :                 switch (type) {
    1083           0 :                 case BTRFS_SHARED_BLOCK_REF_KEY:
    1084           0 :                         ret = add_direct_ref(ctx->fs_info, preftrees,
    1085           0 :                                              *info_level + 1, offset,
    1086             :                                              ctx->bytenr, 1, NULL, GFP_NOFS);
    1087           0 :                         break;
    1088           0 :                 case BTRFS_SHARED_DATA_REF_KEY: {
    1089           0 :                         struct btrfs_shared_data_ref *sdref;
    1090           0 :                         int count;
    1091             : 
    1092           0 :                         sdref = (struct btrfs_shared_data_ref *)(iref + 1);
    1093           0 :                         count = btrfs_shared_data_ref_count(leaf, sdref);
    1094             : 
    1095           0 :                         ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
    1096             :                                              ctx->bytenr, count, sc, GFP_NOFS);
    1097           0 :                         break;
    1098             :                 }
    1099           0 :                 case BTRFS_TREE_BLOCK_REF_KEY:
    1100           0 :                         ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
    1101           0 :                                                NULL, *info_level + 1,
    1102             :                                                ctx->bytenr, 1, NULL, GFP_NOFS);
    1103           0 :                         break;
    1104           0 :                 case BTRFS_EXTENT_DATA_REF_KEY: {
    1105           0 :                         struct btrfs_extent_data_ref *dref;
    1106           0 :                         int count;
    1107           0 :                         u64 root;
    1108             : 
    1109           0 :                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
    1110           0 :                         count = btrfs_extent_data_ref_count(leaf, dref);
    1111           0 :                         key.objectid = btrfs_extent_data_ref_objectid(leaf,
    1112             :                                                                       dref);
    1113           0 :                         key.type = BTRFS_EXTENT_DATA_KEY;
    1114           0 :                         key.offset = btrfs_extent_data_ref_offset(leaf, dref);
    1115             : 
    1116           0 :                         if (sc && key.objectid != sc->inum &&
    1117           0 :                             !sc->have_delayed_delete_refs) {
    1118             :                                 ret = BACKREF_FOUND_SHARED;
    1119             :                                 break;
    1120             :                         }
    1121             : 
    1122           0 :                         root = btrfs_extent_data_ref_root(leaf, dref);
    1123             : 
    1124           0 :                         if (!ctx->skip_data_ref ||
    1125           0 :                             !ctx->skip_data_ref(root, key.objectid, key.offset,
    1126             :                                                 ctx->user_ctx))
    1127           0 :                                 ret = add_indirect_ref(ctx->fs_info, preftrees,
    1128             :                                                        root, &key, 0, ctx->bytenr,
    1129             :                                                        count, sc, GFP_NOFS);
    1130             :                         break;
    1131             :                 }
    1132             :                 default:
    1133           0 :                         WARN_ON(1);
    1134             :                 }
    1135           0 :                 if (ret)
    1136           0 :                         return ret;
    1137           0 :                 ptr += btrfs_extent_inline_ref_size(type);
    1138             :         }
    1139             : 
    1140             :         return 0;
    1141             : }
    1142             : 
    1143             : /*
    1144             :  * add all non-inline backrefs for bytenr to the list
    1145             :  *
    1146             :  * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
    1147             :  */
    1148           0 : static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
    1149             :                           struct btrfs_root *extent_root,
    1150             :                           struct btrfs_path *path,
    1151             :                           int info_level, struct preftrees *preftrees,
    1152             :                           struct share_check *sc)
    1153             : {
    1154           0 :         struct btrfs_fs_info *fs_info = extent_root->fs_info;
    1155           0 :         int ret;
    1156           0 :         int slot;
    1157           0 :         struct extent_buffer *leaf;
    1158           0 :         struct btrfs_key key;
    1159             : 
    1160           0 :         while (1) {
    1161           0 :                 ret = btrfs_next_item(extent_root, path);
    1162           0 :                 if (ret < 0)
    1163             :                         break;
    1164           0 :                 if (ret) {
    1165             :                         ret = 0;
    1166             :                         break;
    1167             :                 }
    1168             : 
    1169           0 :                 slot = path->slots[0];
    1170           0 :                 leaf = path->nodes[0];
    1171           0 :                 btrfs_item_key_to_cpu(leaf, &key, slot);
    1172             : 
    1173           0 :                 if (key.objectid != ctx->bytenr)
    1174             :                         break;
    1175           0 :                 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
    1176           0 :                         continue;
    1177           0 :                 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
    1178             :                         break;
    1179             : 
    1180           0 :                 switch (key.type) {
    1181           0 :                 case BTRFS_SHARED_BLOCK_REF_KEY:
    1182             :                         /* SHARED DIRECT METADATA backref */
    1183           0 :                         ret = add_direct_ref(fs_info, preftrees,
    1184             :                                              info_level + 1, key.offset,
    1185             :                                              ctx->bytenr, 1, NULL, GFP_NOFS);
    1186           0 :                         break;
    1187             :                 case BTRFS_SHARED_DATA_REF_KEY: {
    1188             :                         /* SHARED DIRECT FULL backref */
    1189           0 :                         struct btrfs_shared_data_ref *sdref;
    1190           0 :                         int count;
    1191             : 
    1192           0 :                         sdref = btrfs_item_ptr(leaf, slot,
    1193             :                                               struct btrfs_shared_data_ref);
    1194           0 :                         count = btrfs_shared_data_ref_count(leaf, sdref);
    1195           0 :                         ret = add_direct_ref(fs_info, preftrees, 0,
    1196             :                                              key.offset, ctx->bytenr, count,
    1197             :                                              sc, GFP_NOFS);
    1198           0 :                         break;
    1199             :                 }
    1200           0 :                 case BTRFS_TREE_BLOCK_REF_KEY:
    1201             :                         /* NORMAL INDIRECT METADATA backref */
    1202           0 :                         ret = add_indirect_ref(fs_info, preftrees, key.offset,
    1203             :                                                NULL, info_level + 1, ctx->bytenr,
    1204             :                                                1, NULL, GFP_NOFS);
    1205           0 :                         break;
    1206             :                 case BTRFS_EXTENT_DATA_REF_KEY: {
    1207             :                         /* NORMAL INDIRECT DATA backref */
    1208           0 :                         struct btrfs_extent_data_ref *dref;
    1209           0 :                         int count;
    1210           0 :                         u64 root;
    1211             : 
    1212           0 :                         dref = btrfs_item_ptr(leaf, slot,
    1213             :                                               struct btrfs_extent_data_ref);
    1214           0 :                         count = btrfs_extent_data_ref_count(leaf, dref);
    1215           0 :                         key.objectid = btrfs_extent_data_ref_objectid(leaf,
    1216             :                                                                       dref);
    1217           0 :                         key.type = BTRFS_EXTENT_DATA_KEY;
    1218           0 :                         key.offset = btrfs_extent_data_ref_offset(leaf, dref);
    1219             : 
    1220           0 :                         if (sc && key.objectid != sc->inum &&
    1221           0 :                             !sc->have_delayed_delete_refs) {
    1222             :                                 ret = BACKREF_FOUND_SHARED;
    1223             :                                 break;
    1224             :                         }
    1225             : 
    1226           0 :                         root = btrfs_extent_data_ref_root(leaf, dref);
    1227             : 
    1228           0 :                         if (!ctx->skip_data_ref ||
    1229           0 :                             !ctx->skip_data_ref(root, key.objectid, key.offset,
    1230             :                                                 ctx->user_ctx))
    1231           0 :                                 ret = add_indirect_ref(fs_info, preftrees, root,
    1232             :                                                        &key, 0, ctx->bytenr,
    1233             :                                                        count, sc, GFP_NOFS);
    1234             :                         break;
    1235             :                 }
    1236             :                 default:
    1237           0 :                         WARN_ON(1);
    1238             :                 }
    1239           0 :                 if (ret)
    1240           0 :                         return ret;
    1241             : 
    1242             :         }
    1243             : 
    1244             :         return ret;
    1245             : }
    1246             : 
    1247             : /*
    1248             :  * The caller has joined a transaction or is holding a read lock on the
    1249             :  * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
    1250             :  * snapshot field changing while updating or checking the cache.
    1251             :  */
    1252           0 : static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
    1253             :                                         struct btrfs_root *root,
    1254             :                                         u64 bytenr, int level, bool *is_shared)
    1255             : {
    1256           0 :         const struct btrfs_fs_info *fs_info = root->fs_info;
    1257           0 :         struct btrfs_backref_shared_cache_entry *entry;
    1258             : 
    1259           0 :         if (!current->journal_info)
    1260           0 :                 lockdep_assert_held(&fs_info->commit_root_sem);
    1261             : 
    1262           0 :         if (!ctx->use_path_cache)
    1263             :                 return false;
    1264             : 
    1265           0 :         if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
    1266             :                 return false;
    1267             : 
    1268             :         /*
    1269             :          * Level -1 is used for the data extent, which is not reliable to cache
    1270             :          * because its reference count can increase or decrease without us
    1271             :          * realizing. We cache results only for extent buffers that lead from
    1272             :          * the root node down to the leaf with the file extent item.
    1273             :          */
    1274           0 :         ASSERT(level >= 0);
    1275             : 
    1276           0 :         entry = &ctx->path_cache_entries[level];
    1277             : 
    1278             :         /* Unused cache entry or being used for some other extent buffer. */
    1279           0 :         if (entry->bytenr != bytenr)
    1280             :                 return false;
    1281             : 
    1282             :         /*
    1283             :          * We cached a false result, but the last snapshot generation of the
    1284             :          * root changed, so we now have a snapshot. Don't trust the result.
    1285             :          */
    1286           0 :         if (!entry->is_shared &&
    1287           0 :             entry->gen != btrfs_root_last_snapshot(&root->root_item))
    1288             :                 return false;
    1289             : 
    1290             :         /*
    1291             :          * If we cached a true result and the last generation used for dropping
    1292             :          * a root changed, we can not trust the result, because the dropped root
    1293             :          * could be a snapshot sharing this extent buffer.
    1294             :          */
    1295           0 :         if (entry->is_shared &&
    1296           0 :             entry->gen != btrfs_get_last_root_drop_gen(fs_info))
    1297             :                 return false;
    1298             : 
    1299           0 :         *is_shared = entry->is_shared;
    1300             :         /*
    1301             :          * If the node at this level is shared, than all nodes below are also
    1302             :          * shared. Currently some of the nodes below may be marked as not shared
    1303             :          * because we have just switched from one leaf to another, and switched
    1304             :          * also other nodes above the leaf and below the current level, so mark
    1305             :          * them as shared.
    1306             :          */
    1307           0 :         if (*is_shared) {
    1308           0 :                 for (int i = 0; i < level; i++) {
    1309           0 :                         ctx->path_cache_entries[i].is_shared = true;
    1310           0 :                         ctx->path_cache_entries[i].gen = entry->gen;
    1311             :                 }
    1312             :         }
    1313             : 
    1314             :         return true;
    1315             : }
    1316             : 
    1317             : /*
    1318             :  * The caller has joined a transaction or is holding a read lock on the
    1319             :  * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
    1320             :  * snapshot field changing while updating or checking the cache.
    1321             :  */
    1322           0 : static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
    1323             :                                        struct btrfs_root *root,
    1324             :                                        u64 bytenr, int level, bool is_shared)
    1325             : {
    1326           0 :         const struct btrfs_fs_info *fs_info = root->fs_info;
    1327           0 :         struct btrfs_backref_shared_cache_entry *entry;
    1328           0 :         u64 gen;
    1329             : 
    1330           0 :         if (!current->journal_info)
    1331           0 :                 lockdep_assert_held(&fs_info->commit_root_sem);
    1332             : 
    1333           0 :         if (!ctx->use_path_cache)
    1334             :                 return;
    1335             : 
    1336           0 :         if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
    1337             :                 return;
    1338             : 
    1339             :         /*
    1340             :          * Level -1 is used for the data extent, which is not reliable to cache
    1341             :          * because its reference count can increase or decrease without us
    1342             :          * realizing. We cache results only for extent buffers that lead from
    1343             :          * the root node down to the leaf with the file extent item.
    1344             :          */
    1345           0 :         ASSERT(level >= 0);
    1346             : 
    1347           0 :         if (is_shared)
    1348           0 :                 gen = btrfs_get_last_root_drop_gen(fs_info);
    1349             :         else
    1350           0 :                 gen = btrfs_root_last_snapshot(&root->root_item);
    1351             : 
    1352           0 :         entry = &ctx->path_cache_entries[level];
    1353           0 :         entry->bytenr = bytenr;
    1354           0 :         entry->is_shared = is_shared;
    1355           0 :         entry->gen = gen;
    1356             : 
    1357             :         /*
    1358             :          * If we found an extent buffer is shared, set the cache result for all
    1359             :          * extent buffers below it to true. As nodes in the path are COWed,
    1360             :          * their sharedness is moved to their children, and if a leaf is COWed,
    1361             :          * then the sharedness of a data extent becomes direct, the refcount of
    1362             :          * data extent is increased in the extent item at the extent tree.
    1363             :          */
    1364           0 :         if (is_shared) {
    1365           0 :                 for (int i = 0; i < level; i++) {
    1366           0 :                         entry = &ctx->path_cache_entries[i];
    1367           0 :                         entry->is_shared = is_shared;
    1368           0 :                         entry->gen = gen;
    1369             :                 }
    1370             :         }
    1371             : }
    1372             : 
    1373             : /*
    1374             :  * this adds all existing backrefs (inline backrefs, backrefs and delayed
    1375             :  * refs) for the given bytenr to the refs list, merges duplicates and resolves
    1376             :  * indirect refs to their parent bytenr.
    1377             :  * When roots are found, they're added to the roots list
    1378             :  *
    1379             :  * @ctx:     Backref walking context object, must be not NULL.
    1380             :  * @sc:      If !NULL, then immediately return BACKREF_FOUND_SHARED when a
    1381             :  *           shared extent is detected.
    1382             :  *
    1383             :  * Otherwise this returns 0 for success and <0 for an error.
    1384             :  *
    1385             :  * FIXME some caching might speed things up
    1386             :  */
    1387           0 : static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
    1388             :                              struct share_check *sc)
    1389             : {
    1390           0 :         struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
    1391           0 :         struct btrfs_key key;
    1392           0 :         struct btrfs_path *path;
    1393           0 :         struct btrfs_delayed_ref_root *delayed_refs = NULL;
    1394           0 :         struct btrfs_delayed_ref_head *head;
    1395           0 :         int info_level = 0;
    1396           0 :         int ret;
    1397           0 :         struct prelim_ref *ref;
    1398           0 :         struct rb_node *node;
    1399           0 :         struct extent_inode_elem *eie = NULL;
    1400           0 :         struct preftrees preftrees = {
    1401             :                 .direct = PREFTREE_INIT,
    1402             :                 .indirect = PREFTREE_INIT,
    1403             :                 .indirect_missing_keys = PREFTREE_INIT
    1404             :         };
    1405             : 
    1406             :         /* Roots ulist is not needed when using a sharedness check context. */
    1407           0 :         if (sc)
    1408             :                 ASSERT(ctx->roots == NULL);
    1409             : 
    1410           0 :         key.objectid = ctx->bytenr;
    1411           0 :         key.offset = (u64)-1;
    1412           0 :         if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
    1413           0 :                 key.type = BTRFS_METADATA_ITEM_KEY;
    1414             :         else
    1415           0 :                 key.type = BTRFS_EXTENT_ITEM_KEY;
    1416             : 
    1417           0 :         path = btrfs_alloc_path();
    1418           0 :         if (!path)
    1419             :                 return -ENOMEM;
    1420           0 :         if (!ctx->trans) {
    1421           0 :                 path->search_commit_root = 1;
    1422           0 :                 path->skip_locking = 1;
    1423             :         }
    1424             : 
    1425           0 :         if (ctx->time_seq == BTRFS_SEQ_LAST)
    1426           0 :                 path->skip_locking = 1;
    1427             : 
    1428           0 : again:
    1429           0 :         head = NULL;
    1430             : 
    1431           0 :         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
    1432           0 :         if (ret < 0)
    1433           0 :                 goto out;
    1434           0 :         if (ret == 0) {
    1435             :                 /* This shouldn't happen, indicates a bug or fs corruption. */
    1436           0 :                 ASSERT(ret != 0);
    1437           0 :                 ret = -EUCLEAN;
    1438           0 :                 goto out;
    1439             :         }
    1440             : 
    1441           0 :         if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
    1442           0 :             ctx->time_seq != BTRFS_SEQ_LAST) {
    1443             :                 /*
    1444             :                  * We have a specific time_seq we care about and trans which
    1445             :                  * means we have the path lock, we need to grab the ref head and
    1446             :                  * lock it so we have a consistent view of the refs at the given
    1447             :                  * time.
    1448             :                  */
    1449           0 :                 delayed_refs = &ctx->trans->transaction->delayed_refs;
    1450           0 :                 spin_lock(&delayed_refs->lock);
    1451           0 :                 head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
    1452           0 :                 if (head) {
    1453           0 :                         if (!mutex_trylock(&head->mutex)) {
    1454           0 :                                 refcount_inc(&head->refs);
    1455           0 :                                 spin_unlock(&delayed_refs->lock);
    1456             : 
    1457           0 :                                 btrfs_release_path(path);
    1458             : 
    1459             :                                 /*
    1460             :                                  * Mutex was contended, block until it's
    1461             :                                  * released and try again
    1462             :                                  */
    1463           0 :                                 mutex_lock(&head->mutex);
    1464           0 :                                 mutex_unlock(&head->mutex);
    1465           0 :                                 btrfs_put_delayed_ref_head(head);
    1466           0 :                                 goto again;
    1467             :                         }
    1468           0 :                         spin_unlock(&delayed_refs->lock);
    1469           0 :                         ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
    1470             :                                                &preftrees, sc);
    1471           0 :                         mutex_unlock(&head->mutex);
    1472           0 :                         if (ret)
    1473           0 :                                 goto out;
    1474             :                 } else {
    1475           0 :                         spin_unlock(&delayed_refs->lock);
    1476             :                 }
    1477             :         }
    1478             : 
    1479           0 :         if (path->slots[0]) {
    1480           0 :                 struct extent_buffer *leaf;
    1481           0 :                 int slot;
    1482             : 
    1483           0 :                 path->slots[0]--;
    1484           0 :                 leaf = path->nodes[0];
    1485           0 :                 slot = path->slots[0];
    1486           0 :                 btrfs_item_key_to_cpu(leaf, &key, slot);
    1487           0 :                 if (key.objectid == ctx->bytenr &&
    1488           0 :                     (key.type == BTRFS_EXTENT_ITEM_KEY ||
    1489             :                      key.type == BTRFS_METADATA_ITEM_KEY)) {
    1490           0 :                         ret = add_inline_refs(ctx, path, &info_level,
    1491             :                                               &preftrees, sc);
    1492           0 :                         if (ret)
    1493           0 :                                 goto out;
    1494           0 :                         ret = add_keyed_refs(ctx, root, path, info_level,
    1495             :                                              &preftrees, sc);
    1496           0 :                         if (ret)
    1497           0 :                                 goto out;
    1498             :                 }
    1499             :         }
    1500             : 
    1501             :         /*
    1502             :          * If we have a share context and we reached here, it means the extent
    1503             :          * is not directly shared (no multiple reference items for it),
    1504             :          * otherwise we would have exited earlier with a return value of
    1505             :          * BACKREF_FOUND_SHARED after processing delayed references or while
    1506             :          * processing inline or keyed references from the extent tree.
    1507             :          * The extent may however be indirectly shared through shared subtrees
    1508             :          * as a result from creating snapshots, so we determine below what is
    1509             :          * its parent node, in case we are dealing with a metadata extent, or
    1510             :          * what's the leaf (or leaves), from a fs tree, that has a file extent
    1511             :          * item pointing to it in case we are dealing with a data extent.
    1512             :          */
    1513           0 :         ASSERT(extent_is_shared(sc) == 0);
    1514             : 
    1515             :         /*
    1516             :          * If we are here for a data extent and we have a share_check structure
    1517             :          * it means the data extent is not directly shared (does not have
    1518             :          * multiple reference items), so we have to check if a path in the fs
    1519             :          * tree (going from the root node down to the leaf that has the file
    1520             :          * extent item pointing to the data extent) is shared, that is, if any
    1521             :          * of the extent buffers in the path is referenced by other trees.
    1522             :          */
    1523           0 :         if (sc && ctx->bytenr == sc->data_bytenr) {
    1524             :                 /*
    1525             :                  * If our data extent is from a generation more recent than the
    1526             :                  * last generation used to snapshot the root, then we know that
    1527             :                  * it can not be shared through subtrees, so we can skip
    1528             :                  * resolving indirect references, there's no point in
    1529             :                  * determining the extent buffers for the path from the fs tree
    1530             :                  * root node down to the leaf that has the file extent item that
    1531             :                  * points to the data extent.
    1532             :                  */
    1533           0 :                 if (sc->data_extent_gen >
    1534           0 :                     btrfs_root_last_snapshot(&sc->root->root_item)) {
    1535           0 :                         ret = BACKREF_FOUND_NOT_SHARED;
    1536           0 :                         goto out;
    1537             :                 }
    1538             : 
    1539             :                 /*
    1540             :                  * If we are only determining if a data extent is shared or not
    1541             :                  * and the corresponding file extent item is located in the same
    1542             :                  * leaf as the previous file extent item, we can skip resolving
    1543             :                  * indirect references for a data extent, since the fs tree path
    1544             :                  * is the same (same leaf, so same path). We skip as long as the
    1545             :                  * cached result for the leaf is valid and only if there's only
    1546             :                  * one file extent item pointing to the data extent, because in
    1547             :                  * the case of multiple file extent items, they may be located
    1548             :                  * in different leaves and therefore we have multiple paths.
    1549             :                  */
    1550           0 :                 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
    1551           0 :                     sc->self_ref_count == 1) {
    1552           0 :                         bool cached;
    1553           0 :                         bool is_shared;
    1554             : 
    1555           0 :                         cached = lookup_backref_shared_cache(sc->ctx, sc->root,
    1556             :                                                      sc->ctx->curr_leaf_bytenr,
    1557             :                                                      0, &is_shared);
    1558           0 :                         if (cached) {
    1559           0 :                                 if (is_shared)
    1560             :                                         ret = BACKREF_FOUND_SHARED;
    1561             :                                 else
    1562           0 :                                         ret = BACKREF_FOUND_NOT_SHARED;
    1563           0 :                                 goto out;
    1564             :                         }
    1565             :                 }
    1566             :         }
    1567             : 
    1568           0 :         btrfs_release_path(path);
    1569             : 
    1570           0 :         ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
    1571           0 :         if (ret)
    1572           0 :                 goto out;
    1573             : 
    1574           0 :         WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
    1575             : 
    1576           0 :         ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
    1577           0 :         if (ret)
    1578           0 :                 goto out;
    1579             : 
    1580           0 :         WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
    1581             : 
    1582             :         /*
    1583             :          * This walks the tree of merged and resolved refs. Tree blocks are
    1584             :          * read in as needed. Unique entries are added to the ulist, and
    1585             :          * the list of found roots is updated.
    1586             :          *
    1587             :          * We release the entire tree in one go before returning.
    1588             :          */
    1589           0 :         node = rb_first_cached(&preftrees.direct.root);
    1590           0 :         while (node) {
    1591           0 :                 ref = rb_entry(node, struct prelim_ref, rbnode);
    1592           0 :                 node = rb_next(&ref->rbnode);
    1593             :                 /*
    1594             :                  * ref->count < 0 can happen here if there are delayed
    1595             :                  * refs with a node->action of BTRFS_DROP_DELAYED_REF.
    1596             :                  * prelim_ref_insert() relies on this when merging
    1597             :                  * identical refs to keep the overall count correct.
    1598             :                  * prelim_ref_insert() will merge only those refs
    1599             :                  * which compare identically.  Any refs having
    1600             :                  * e.g. different offsets would not be merged,
    1601             :                  * and would retain their original ref->count < 0.
    1602             :                  */
    1603           0 :                 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
    1604             :                         /* no parent == root of tree */
    1605           0 :                         ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
    1606           0 :                         if (ret < 0)
    1607           0 :                                 goto out;
    1608             :                 }
    1609           0 :                 if (ref->count && ref->parent) {
    1610           0 :                         if (!ctx->skip_inode_ref_list && !ref->inode_list &&
    1611           0 :                             ref->level == 0) {
    1612           0 :                                 struct btrfs_tree_parent_check check = { 0 };
    1613           0 :                                 struct extent_buffer *eb;
    1614             : 
    1615           0 :                                 check.level = ref->level;
    1616             : 
    1617           0 :                                 eb = read_tree_block(ctx->fs_info, ref->parent,
    1618             :                                                      &check);
    1619           0 :                                 if (IS_ERR(eb)) {
    1620           0 :                                         ret = PTR_ERR(eb);
    1621           0 :                                         goto out;
    1622             :                                 }
    1623           0 :                                 if (!extent_buffer_uptodate(eb)) {
    1624           0 :                                         free_extent_buffer(eb);
    1625           0 :                                         ret = -EIO;
    1626           0 :                                         goto out;
    1627             :                                 }
    1628             : 
    1629           0 :                                 if (!path->skip_locking)
    1630           0 :                                         btrfs_tree_read_lock(eb);
    1631           0 :                                 ret = find_extent_in_eb(ctx, eb, &eie);
    1632           0 :                                 if (!path->skip_locking)
    1633           0 :                                         btrfs_tree_read_unlock(eb);
    1634           0 :                                 free_extent_buffer(eb);
    1635           0 :                                 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
    1636           0 :                                     ret < 0)
    1637           0 :                                         goto out;
    1638           0 :                                 ref->inode_list = eie;
    1639             :                                 /*
    1640             :                                  * We transferred the list ownership to the ref,
    1641             :                                  * so set to NULL to avoid a double free in case
    1642             :                                  * an error happens after this.
    1643             :                                  */
    1644           0 :                                 eie = NULL;
    1645             :                         }
    1646           0 :                         ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
    1647           0 :                                                   ref->inode_list,
    1648             :                                                   (void **)&eie, GFP_NOFS);
    1649           0 :                         if (ret < 0)
    1650           0 :                                 goto out;
    1651           0 :                         if (!ret && !ctx->skip_inode_ref_list) {
    1652             :                                 /*
    1653             :                                  * We've recorded that parent, so we must extend
    1654             :                                  * its inode list here.
    1655             :                                  *
    1656             :                                  * However if there was corruption we may not
    1657             :                                  * have found an eie, return an error in this
    1658             :                                  * case.
    1659             :                                  */
    1660           0 :                                 ASSERT(eie);
    1661           0 :                                 if (!eie) {
    1662           0 :                                         ret = -EUCLEAN;
    1663           0 :                                         goto out;
    1664             :                                 }
    1665           0 :                                 while (eie->next)
    1666           0 :                                         eie = eie->next;
    1667           0 :                                 eie->next = ref->inode_list;
    1668             :                         }
    1669           0 :                         eie = NULL;
    1670             :                         /*
    1671             :                          * We have transferred the inode list ownership from
    1672             :                          * this ref to the ref we added to the 'refs' ulist.
    1673             :                          * So set this ref's inode list to NULL to avoid
    1674             :                          * use-after-free when our caller uses it or double
    1675             :                          * frees in case an error happens before we return.
    1676             :                          */
    1677           0 :                         ref->inode_list = NULL;
    1678             :                 }
    1679           0 :                 cond_resched();
    1680             :         }
    1681             : 
    1682           0 : out:
    1683           0 :         btrfs_free_path(path);
    1684             : 
    1685           0 :         prelim_release(&preftrees.direct);
    1686           0 :         prelim_release(&preftrees.indirect);
    1687           0 :         prelim_release(&preftrees.indirect_missing_keys);
    1688             : 
    1689           0 :         if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
    1690           0 :                 free_inode_elem_list(eie);
    1691             :         return ret;
    1692             : }
    1693             : 
    1694             : /*
    1695             :  * Finds all leaves with a reference to the specified combination of
    1696             :  * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
    1697             :  * added to the ulist at @ctx->refs, and that ulist is allocated by this
    1698             :  * function. The caller should free the ulist with free_leaf_list() if
    1699             :  * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
    1700             :  * enough.
    1701             :  *
    1702             :  * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
    1703             :  */
    1704           0 : int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
    1705             : {
    1706           0 :         int ret;
    1707             : 
    1708           0 :         ASSERT(ctx->refs == NULL);
    1709             : 
    1710           0 :         ctx->refs = ulist_alloc(GFP_NOFS);
    1711           0 :         if (!ctx->refs)
    1712             :                 return -ENOMEM;
    1713             : 
    1714           0 :         ret = find_parent_nodes(ctx, NULL);
    1715           0 :         if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
    1716           0 :             (ret < 0 && ret != -ENOENT)) {
    1717           0 :                 free_leaf_list(ctx->refs);
    1718           0 :                 ctx->refs = NULL;
    1719           0 :                 return ret;
    1720             :         }
    1721             : 
    1722             :         return 0;
    1723             : }
    1724             : 
    1725             : /*
    1726             :  * Walk all backrefs for a given extent to find all roots that reference this
    1727             :  * extent. Walking a backref means finding all extents that reference this
    1728             :  * extent and in turn walk the backrefs of those, too. Naturally this is a
    1729             :  * recursive process, but here it is implemented in an iterative fashion: We
    1730             :  * find all referencing extents for the extent in question and put them on a
    1731             :  * list. In turn, we find all referencing extents for those, further appending
    1732             :  * to the list. The way we iterate the list allows adding more elements after
    1733             :  * the current while iterating. The process stops when we reach the end of the
    1734             :  * list.
    1735             :  *
    1736             :  * Found roots are added to @ctx->roots, which is allocated by this function if
    1737             :  * it points to NULL, in which case the caller is responsible for freeing it
    1738             :  * after it's not needed anymore.
    1739             :  * This function requires @ctx->refs to be NULL, as it uses it for allocating a
    1740             :  * ulist to do temporary work, and frees it before returning.
    1741             :  *
    1742             :  * Returns 0 on success, < 0 on error.
    1743             :  */
    1744           0 : static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
    1745             : {
    1746           0 :         const u64 orig_bytenr = ctx->bytenr;
    1747           0 :         const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
    1748           0 :         bool roots_ulist_allocated = false;
    1749           0 :         struct ulist_iterator uiter;
    1750           0 :         int ret = 0;
    1751             : 
    1752           0 :         ASSERT(ctx->refs == NULL);
    1753             : 
    1754           0 :         ctx->refs = ulist_alloc(GFP_NOFS);
    1755           0 :         if (!ctx->refs)
    1756             :                 return -ENOMEM;
    1757             : 
    1758           0 :         if (!ctx->roots) {
    1759           0 :                 ctx->roots = ulist_alloc(GFP_NOFS);
    1760           0 :                 if (!ctx->roots) {
    1761           0 :                         ulist_free(ctx->refs);
    1762           0 :                         ctx->refs = NULL;
    1763           0 :                         return -ENOMEM;
    1764             :                 }
    1765             :                 roots_ulist_allocated = true;
    1766             :         }
    1767             : 
    1768           0 :         ctx->skip_inode_ref_list = true;
    1769             : 
    1770           0 :         ULIST_ITER_INIT(&uiter);
    1771           0 :         while (1) {
    1772           0 :                 struct ulist_node *node;
    1773             : 
    1774           0 :                 ret = find_parent_nodes(ctx, NULL);
    1775           0 :                 if (ret < 0 && ret != -ENOENT) {
    1776           0 :                         if (roots_ulist_allocated) {
    1777           0 :                                 ulist_free(ctx->roots);
    1778           0 :                                 ctx->roots = NULL;
    1779             :                         }
    1780             :                         break;
    1781             :                 }
    1782           0 :                 ret = 0;
    1783           0 :                 node = ulist_next(ctx->refs, &uiter);
    1784           0 :                 if (!node)
    1785             :                         break;
    1786           0 :                 ctx->bytenr = node->val;
    1787           0 :                 cond_resched();
    1788             :         }
    1789             : 
    1790           0 :         ulist_free(ctx->refs);
    1791           0 :         ctx->refs = NULL;
    1792           0 :         ctx->bytenr = orig_bytenr;
    1793           0 :         ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
    1794             : 
    1795           0 :         return ret;
    1796             : }
    1797             : 
    1798           0 : int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
    1799             :                          bool skip_commit_root_sem)
    1800             : {
    1801           0 :         int ret;
    1802             : 
    1803           0 :         if (!ctx->trans && !skip_commit_root_sem)
    1804           0 :                 down_read(&ctx->fs_info->commit_root_sem);
    1805           0 :         ret = btrfs_find_all_roots_safe(ctx);
    1806           0 :         if (!ctx->trans && !skip_commit_root_sem)
    1807           0 :                 up_read(&ctx->fs_info->commit_root_sem);
    1808           0 :         return ret;
    1809             : }
    1810             : 
    1811           0 : struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
    1812             : {
    1813           0 :         struct btrfs_backref_share_check_ctx *ctx;
    1814             : 
    1815           0 :         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
    1816           0 :         if (!ctx)
    1817             :                 return NULL;
    1818             : 
    1819           0 :         ulist_init(&ctx->refs);
    1820             : 
    1821           0 :         return ctx;
    1822             : }
    1823             : 
    1824           0 : void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
    1825             : {
    1826           0 :         if (!ctx)
    1827             :                 return;
    1828             : 
    1829           0 :         ulist_release(&ctx->refs);
    1830           0 :         kfree(ctx);
    1831             : }
    1832             : 
    1833             : /*
    1834             :  * Check if a data extent is shared or not.
    1835             :  *
    1836             :  * @inode:       The inode whose extent we are checking.
    1837             :  * @bytenr:      Logical bytenr of the extent we are checking.
    1838             :  * @extent_gen:  Generation of the extent (file extent item) or 0 if it is
    1839             :  *               not known.
    1840             :  * @ctx:         A backref sharedness check context.
    1841             :  *
    1842             :  * btrfs_is_data_extent_shared uses the backref walking code but will short
    1843             :  * circuit as soon as it finds a root or inode that doesn't match the
    1844             :  * one passed in. This provides a significant performance benefit for
    1845             :  * callers (such as fiemap) which want to know whether the extent is
    1846             :  * shared but do not need a ref count.
    1847             :  *
    1848             :  * This attempts to attach to the running transaction in order to account for
    1849             :  * delayed refs, but continues on even when no running transaction exists.
    1850             :  *
    1851             :  * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
    1852             :  */
    1853           0 : int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
    1854             :                                 u64 extent_gen,
    1855             :                                 struct btrfs_backref_share_check_ctx *ctx)
    1856             : {
    1857           0 :         struct btrfs_backref_walk_ctx walk_ctx = { 0 };
    1858           0 :         struct btrfs_root *root = inode->root;
    1859           0 :         struct btrfs_fs_info *fs_info = root->fs_info;
    1860           0 :         struct btrfs_trans_handle *trans;
    1861           0 :         struct ulist_iterator uiter;
    1862           0 :         struct ulist_node *node;
    1863           0 :         struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
    1864           0 :         int ret = 0;
    1865           0 :         struct share_check shared = {
    1866             :                 .ctx = ctx,
    1867             :                 .root = root,
    1868             :                 .inum = btrfs_ino(inode),
    1869             :                 .data_bytenr = bytenr,
    1870             :                 .data_extent_gen = extent_gen,
    1871             :                 .share_count = 0,
    1872             :                 .self_ref_count = 0,
    1873             :                 .have_delayed_delete_refs = false,
    1874             :         };
    1875           0 :         int level;
    1876           0 :         bool leaf_cached;
    1877           0 :         bool leaf_is_shared;
    1878             : 
    1879           0 :         for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
    1880           0 :                 if (ctx->prev_extents_cache[i].bytenr == bytenr)
    1881           0 :                         return ctx->prev_extents_cache[i].is_shared;
    1882             :         }
    1883             : 
    1884           0 :         ulist_init(&ctx->refs);
    1885             : 
    1886           0 :         trans = btrfs_join_transaction_nostart(root);
    1887           0 :         if (IS_ERR(trans)) {
    1888           0 :                 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
    1889           0 :                         ret = PTR_ERR(trans);
    1890           0 :                         goto out;
    1891             :                 }
    1892           0 :                 trans = NULL;
    1893           0 :                 down_read(&fs_info->commit_root_sem);
    1894             :         } else {
    1895           0 :                 btrfs_get_tree_mod_seq(fs_info, &elem);
    1896           0 :                 walk_ctx.time_seq = elem.seq;
    1897             :         }
    1898             : 
    1899           0 :         ctx->use_path_cache = true;
    1900             : 
    1901             :         /*
    1902             :          * We may have previously determined that the current leaf is shared.
    1903             :          * If it is, then we have a data extent that is shared due to a shared
    1904             :          * subtree (caused by snapshotting) and we don't need to check for data
    1905             :          * backrefs. If the leaf is not shared, then we must do backref walking
    1906             :          * to determine if the data extent is shared through reflinks.
    1907             :          */
    1908           0 :         leaf_cached = lookup_backref_shared_cache(ctx, root,
    1909             :                                                   ctx->curr_leaf_bytenr, 0,
    1910             :                                                   &leaf_is_shared);
    1911           0 :         if (leaf_cached && leaf_is_shared) {
    1912           0 :                 ret = 1;
    1913           0 :                 goto out_trans;
    1914             :         }
    1915             : 
    1916           0 :         walk_ctx.skip_inode_ref_list = true;
    1917           0 :         walk_ctx.trans = trans;
    1918           0 :         walk_ctx.fs_info = fs_info;
    1919           0 :         walk_ctx.refs = &ctx->refs;
    1920             : 
    1921             :         /* -1 means we are in the bytenr of the data extent. */
    1922           0 :         level = -1;
    1923           0 :         ULIST_ITER_INIT(&uiter);
    1924           0 :         while (1) {
    1925           0 :                 const unsigned long prev_ref_count = ctx->refs.nnodes;
    1926             : 
    1927           0 :                 walk_ctx.bytenr = bytenr;
    1928           0 :                 ret = find_parent_nodes(&walk_ctx, &shared);
    1929           0 :                 if (ret == BACKREF_FOUND_SHARED ||
    1930             :                     ret == BACKREF_FOUND_NOT_SHARED) {
    1931             :                         /* If shared must return 1, otherwise return 0. */
    1932           0 :                         ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
    1933           0 :                         if (level >= 0)
    1934           0 :                                 store_backref_shared_cache(ctx, root, bytenr,
    1935             :                                                            level, ret == 1);
    1936             :                         break;
    1937             :                 }
    1938           0 :                 if (ret < 0 && ret != -ENOENT)
    1939             :                         break;
    1940           0 :                 ret = 0;
    1941             : 
    1942             :                 /*
    1943             :                  * More than one extent buffer (bytenr) may have been added to
    1944             :                  * the ctx->refs ulist, in which case we have to check multiple
    1945             :                  * tree paths in case the first one is not shared, so we can not
    1946             :                  * use the path cache which is made for a single path. Multiple
    1947             :                  * extent buffers at the current level happen when:
    1948             :                  *
    1949             :                  * 1) level -1, the data extent: If our data extent was not
    1950             :                  *    directly shared (without multiple reference items), then
    1951             :                  *    it might have a single reference item with a count > 1 for
    1952             :                  *    the same offset, which means there are 2 (or more) file
    1953             :                  *    extent items that point to the data extent - this happens
    1954             :                  *    when a file extent item needs to be split and then one
    1955             :                  *    item gets moved to another leaf due to a b+tree leaf split
    1956             :                  *    when inserting some item. In this case the file extent
    1957             :                  *    items may be located in different leaves and therefore
    1958             :                  *    some of the leaves may be referenced through shared
    1959             :                  *    subtrees while others are not. Since our extent buffer
    1960             :                  *    cache only works for a single path (by far the most common
    1961             :                  *    case and simpler to deal with), we can not use it if we
    1962             :                  *    have multiple leaves (which implies multiple paths).
    1963             :                  *
    1964             :                  * 2) level >= 0, a tree node/leaf: We can have a mix of direct
    1965             :                  *    and indirect references on a b+tree node/leaf, so we have
    1966             :                  *    to check multiple paths, and the extent buffer (the
    1967             :                  *    current bytenr) may be shared or not. One example is
    1968             :                  *    during relocation as we may get a shared tree block ref
    1969             :                  *    (direct ref) and a non-shared tree block ref (indirect
    1970             :                  *    ref) for the same node/leaf.
    1971             :                  */
    1972           0 :                 if ((ctx->refs.nnodes - prev_ref_count) > 1)
    1973           0 :                         ctx->use_path_cache = false;
    1974             : 
    1975           0 :                 if (level >= 0)
    1976           0 :                         store_backref_shared_cache(ctx, root, bytenr,
    1977             :                                                    level, false);
    1978           0 :                 node = ulist_next(&ctx->refs, &uiter);
    1979           0 :                 if (!node)
    1980             :                         break;
    1981           0 :                 bytenr = node->val;
    1982           0 :                 if (ctx->use_path_cache) {
    1983           0 :                         bool is_shared;
    1984           0 :                         bool cached;
    1985             : 
    1986           0 :                         level++;
    1987           0 :                         cached = lookup_backref_shared_cache(ctx, root, bytenr,
    1988             :                                                              level, &is_shared);
    1989           0 :                         if (cached) {
    1990           0 :                                 ret = (is_shared ? 1 : 0);
    1991           0 :                                 break;
    1992             :                         }
    1993             :                 }
    1994           0 :                 shared.share_count = 0;
    1995           0 :                 shared.have_delayed_delete_refs = false;
    1996           0 :                 cond_resched();
    1997             :         }
    1998             : 
    1999             :         /*
    2000             :          * If the path cache is disabled, then it means at some tree level we
    2001             :          * got multiple parents due to a mix of direct and indirect backrefs or
    2002             :          * multiple leaves with file extent items pointing to the same data
    2003             :          * extent. We have to invalidate the cache and cache only the sharedness
    2004             :          * result for the levels where we got only one node/reference.
    2005             :          */
    2006           0 :         if (!ctx->use_path_cache) {
    2007           0 :                 int i = 0;
    2008             : 
    2009           0 :                 level--;
    2010           0 :                 if (ret >= 0 && level >= 0) {
    2011           0 :                         bytenr = ctx->path_cache_entries[level].bytenr;
    2012           0 :                         ctx->use_path_cache = true;
    2013           0 :                         store_backref_shared_cache(ctx, root, bytenr, level, ret);
    2014           0 :                         i = level + 1;
    2015             :                 }
    2016             : 
    2017           0 :                 for ( ; i < BTRFS_MAX_LEVEL; i++)
    2018           0 :                         ctx->path_cache_entries[i].bytenr = 0;
    2019             :         }
    2020             : 
    2021             :         /*
    2022             :          * Cache the sharedness result for the data extent if we know our inode
    2023             :          * has more than 1 file extent item that refers to the data extent.
    2024             :          */
    2025           0 :         if (ret >= 0 && shared.self_ref_count > 1) {
    2026           0 :                 int slot = ctx->prev_extents_cache_slot;
    2027             : 
    2028           0 :                 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
    2029           0 :                 ctx->prev_extents_cache[slot].is_shared = (ret == 1);
    2030             : 
    2031           0 :                 slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
    2032           0 :                 ctx->prev_extents_cache_slot = slot;
    2033             :         }
    2034             : 
    2035           0 : out_trans:
    2036           0 :         if (trans) {
    2037           0 :                 btrfs_put_tree_mod_seq(fs_info, &elem);
    2038           0 :                 btrfs_end_transaction(trans);
    2039             :         } else {
    2040           0 :                 up_read(&fs_info->commit_root_sem);
    2041             :         }
    2042           0 : out:
    2043           0 :         ulist_release(&ctx->refs);
    2044           0 :         ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
    2045             : 
    2046           0 :         return ret;
    2047             : }
    2048             : 
    2049           0 : int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
    2050             :                           u64 start_off, struct btrfs_path *path,
    2051             :                           struct btrfs_inode_extref **ret_extref,
    2052             :                           u64 *found_off)
    2053             : {
    2054           0 :         int ret, slot;
    2055           0 :         struct btrfs_key key;
    2056           0 :         struct btrfs_key found_key;
    2057           0 :         struct btrfs_inode_extref *extref;
    2058           0 :         const struct extent_buffer *leaf;
    2059           0 :         unsigned long ptr;
    2060             : 
    2061           0 :         key.objectid = inode_objectid;
    2062           0 :         key.type = BTRFS_INODE_EXTREF_KEY;
    2063           0 :         key.offset = start_off;
    2064             : 
    2065           0 :         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
    2066           0 :         if (ret < 0)
    2067             :                 return ret;
    2068             : 
    2069           0 :         while (1) {
    2070           0 :                 leaf = path->nodes[0];
    2071           0 :                 slot = path->slots[0];
    2072           0 :                 if (slot >= btrfs_header_nritems(leaf)) {
    2073             :                         /*
    2074             :                          * If the item at offset is not found,
    2075             :                          * btrfs_search_slot will point us to the slot
    2076             :                          * where it should be inserted. In our case
    2077             :                          * that will be the slot directly before the
    2078             :                          * next INODE_REF_KEY_V2 item. In the case
    2079             :                          * that we're pointing to the last slot in a
    2080             :                          * leaf, we must move one leaf over.
    2081             :                          */
    2082           0 :                         ret = btrfs_next_leaf(root, path);
    2083           0 :                         if (ret) {
    2084           0 :                                 if (ret >= 1)
    2085           0 :                                         ret = -ENOENT;
    2086             :                                 break;
    2087             :                         }
    2088           0 :                         continue;
    2089             :                 }
    2090             : 
    2091           0 :                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
    2092             : 
    2093             :                 /*
    2094             :                  * Check that we're still looking at an extended ref key for
    2095             :                  * this particular objectid. If we have different
    2096             :                  * objectid or type then there are no more to be found
    2097             :                  * in the tree and we can exit.
    2098             :                  */
    2099           0 :                 ret = -ENOENT;
    2100           0 :                 if (found_key.objectid != inode_objectid)
    2101             :                         break;
    2102           0 :                 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
    2103             :                         break;
    2104             : 
    2105           0 :                 ret = 0;
    2106           0 :                 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
    2107           0 :                 extref = (struct btrfs_inode_extref *)ptr;
    2108           0 :                 *ret_extref = extref;
    2109           0 :                 if (found_off)
    2110           0 :                         *found_off = found_key.offset;
    2111             :                 break;
    2112             :         }
    2113             : 
    2114             :         return ret;
    2115             : }
    2116             : 
    2117             : /*
    2118             :  * this iterates to turn a name (from iref/extref) into a full filesystem path.
    2119             :  * Elements of the path are separated by '/' and the path is guaranteed to be
    2120             :  * 0-terminated. the path is only given within the current file system.
    2121             :  * Therefore, it never starts with a '/'. the caller is responsible to provide
    2122             :  * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
    2123             :  * the start point of the resulting string is returned. this pointer is within
    2124             :  * dest, normally.
    2125             :  * in case the path buffer would overflow, the pointer is decremented further
    2126             :  * as if output was written to the buffer, though no more output is actually
    2127             :  * generated. that way, the caller can determine how much space would be
    2128             :  * required for the path to fit into the buffer. in that case, the returned
    2129             :  * value will be smaller than dest. callers must check this!
    2130             :  */
    2131           0 : char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
    2132             :                         u32 name_len, unsigned long name_off,
    2133             :                         struct extent_buffer *eb_in, u64 parent,
    2134             :                         char *dest, u32 size)
    2135             : {
    2136           0 :         int slot;
    2137           0 :         u64 next_inum;
    2138           0 :         int ret;
    2139           0 :         s64 bytes_left = ((s64)size) - 1;
    2140           0 :         struct extent_buffer *eb = eb_in;
    2141           0 :         struct btrfs_key found_key;
    2142           0 :         struct btrfs_inode_ref *iref;
    2143             : 
    2144           0 :         if (bytes_left >= 0)
    2145           0 :                 dest[bytes_left] = '\0';
    2146             : 
    2147           0 :         while (1) {
    2148           0 :                 bytes_left -= name_len;
    2149           0 :                 if (bytes_left >= 0)
    2150           0 :                         read_extent_buffer(eb, dest + bytes_left,
    2151             :                                            name_off, name_len);
    2152           0 :                 if (eb != eb_in) {
    2153           0 :                         if (!path->skip_locking)
    2154           0 :                                 btrfs_tree_read_unlock(eb);
    2155           0 :                         free_extent_buffer(eb);
    2156             :                 }
    2157           0 :                 ret = btrfs_find_item(fs_root, path, parent, 0,
    2158             :                                 BTRFS_INODE_REF_KEY, &found_key);
    2159           0 :                 if (ret > 0)
    2160             :                         ret = -ENOENT;
    2161           0 :                 if (ret)
    2162             :                         break;
    2163             : 
    2164           0 :                 next_inum = found_key.offset;
    2165             : 
    2166             :                 /* regular exit ahead */
    2167           0 :                 if (parent == next_inum)
    2168             :                         break;
    2169             : 
    2170           0 :                 slot = path->slots[0];
    2171           0 :                 eb = path->nodes[0];
    2172             :                 /* make sure we can use eb after releasing the path */
    2173           0 :                 if (eb != eb_in) {
    2174           0 :                         path->nodes[0] = NULL;
    2175           0 :                         path->locks[0] = 0;
    2176             :                 }
    2177           0 :                 btrfs_release_path(path);
    2178           0 :                 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
    2179             : 
    2180           0 :                 name_len = btrfs_inode_ref_name_len(eb, iref);
    2181           0 :                 name_off = (unsigned long)(iref + 1);
    2182             : 
    2183           0 :                 parent = next_inum;
    2184           0 :                 --bytes_left;
    2185           0 :                 if (bytes_left >= 0)
    2186           0 :                         dest[bytes_left] = '/';
    2187             :         }
    2188             : 
    2189           0 :         btrfs_release_path(path);
    2190             : 
    2191           0 :         if (ret)
    2192           0 :                 return ERR_PTR(ret);
    2193             : 
    2194           0 :         return dest + bytes_left;
    2195             : }
    2196             : 
    2197             : /*
    2198             :  * this makes the path point to (logical EXTENT_ITEM *)
    2199             :  * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
    2200             :  * tree blocks and <0 on error.
    2201             :  */
    2202           0 : int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
    2203             :                         struct btrfs_path *path, struct btrfs_key *found_key,
    2204             :                         u64 *flags_ret)
    2205             : {
    2206           0 :         struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
    2207           0 :         int ret;
    2208           0 :         u64 flags;
    2209           0 :         u64 size = 0;
    2210           0 :         u32 item_size;
    2211           0 :         const struct extent_buffer *eb;
    2212           0 :         struct btrfs_extent_item *ei;
    2213           0 :         struct btrfs_key key;
    2214             : 
    2215           0 :         if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
    2216           0 :                 key.type = BTRFS_METADATA_ITEM_KEY;
    2217             :         else
    2218           0 :                 key.type = BTRFS_EXTENT_ITEM_KEY;
    2219           0 :         key.objectid = logical;
    2220           0 :         key.offset = (u64)-1;
    2221             : 
    2222           0 :         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
    2223           0 :         if (ret < 0)
    2224             :                 return ret;
    2225             : 
    2226           0 :         ret = btrfs_previous_extent_item(extent_root, path, 0);
    2227           0 :         if (ret) {
    2228           0 :                 if (ret > 0)
    2229           0 :                         ret = -ENOENT;
    2230           0 :                 return ret;
    2231             :         }
    2232           0 :         btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
    2233           0 :         if (found_key->type == BTRFS_METADATA_ITEM_KEY)
    2234           0 :                 size = fs_info->nodesize;
    2235           0 :         else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
    2236           0 :                 size = found_key->offset;
    2237             : 
    2238           0 :         if (found_key->objectid > logical ||
    2239           0 :             found_key->objectid + size <= logical) {
    2240             :                 btrfs_debug(fs_info,
    2241             :                         "logical %llu is not within any extent", logical);
    2242             :                 return -ENOENT;
    2243             :         }
    2244             : 
    2245           0 :         eb = path->nodes[0];
    2246           0 :         item_size = btrfs_item_size(eb, path->slots[0]);
    2247           0 :         BUG_ON(item_size < sizeof(*ei));
    2248             : 
    2249           0 :         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
    2250           0 :         flags = btrfs_extent_flags(eb, ei);
    2251             : 
    2252           0 :         btrfs_debug(fs_info,
    2253             :                 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
    2254             :                  logical, logical - found_key->objectid, found_key->objectid,
    2255             :                  found_key->offset, flags, item_size);
    2256             : 
    2257           0 :         WARN_ON(!flags_ret);
    2258           0 :         if (flags_ret) {
    2259           0 :                 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
    2260           0 :                         *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
    2261           0 :                 else if (flags & BTRFS_EXTENT_FLAG_DATA)
    2262           0 :                         *flags_ret = BTRFS_EXTENT_FLAG_DATA;
    2263             :                 else
    2264           0 :                         BUG();
    2265           0 :                 return 0;
    2266             :         }
    2267             : 
    2268             :         return -EIO;
    2269             : }
    2270             : 
    2271             : /*
    2272             :  * helper function to iterate extent inline refs. ptr must point to a 0 value
    2273             :  * for the first call and may be modified. it is used to track state.
    2274             :  * if more refs exist, 0 is returned and the next call to
    2275             :  * get_extent_inline_ref must pass the modified ptr parameter to get the
    2276             :  * next ref. after the last ref was processed, 1 is returned.
    2277             :  * returns <0 on error
    2278             :  */
    2279           0 : static int get_extent_inline_ref(unsigned long *ptr,
    2280             :                                  const struct extent_buffer *eb,
    2281             :                                  const struct btrfs_key *key,
    2282             :                                  const struct btrfs_extent_item *ei,
    2283             :                                  u32 item_size,
    2284             :                                  struct btrfs_extent_inline_ref **out_eiref,
    2285             :                                  int *out_type)
    2286             : {
    2287           0 :         unsigned long end;
    2288           0 :         u64 flags;
    2289           0 :         struct btrfs_tree_block_info *info;
    2290             : 
    2291           0 :         if (!*ptr) {
    2292             :                 /* first call */
    2293           0 :                 flags = btrfs_extent_flags(eb, ei);
    2294           0 :                 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
    2295           0 :                         if (key->type == BTRFS_METADATA_ITEM_KEY) {
    2296             :                                 /* a skinny metadata extent */
    2297           0 :                                 *out_eiref =
    2298           0 :                                      (struct btrfs_extent_inline_ref *)(ei + 1);
    2299             :                         } else {
    2300           0 :                                 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
    2301           0 :                                 info = (struct btrfs_tree_block_info *)(ei + 1);
    2302           0 :                                 *out_eiref =
    2303           0 :                                    (struct btrfs_extent_inline_ref *)(info + 1);
    2304             :                         }
    2305             :                 } else {
    2306           0 :                         *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
    2307             :                 }
    2308           0 :                 *ptr = (unsigned long)*out_eiref;
    2309           0 :                 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
    2310             :                         return -ENOENT;
    2311             :         }
    2312             : 
    2313           0 :         end = (unsigned long)ei + item_size;
    2314           0 :         *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
    2315           0 :         *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
    2316             :                                                      BTRFS_REF_TYPE_ANY);
    2317           0 :         if (*out_type == BTRFS_REF_TYPE_INVALID)
    2318             :                 return -EUCLEAN;
    2319             : 
    2320           0 :         *ptr += btrfs_extent_inline_ref_size(*out_type);
    2321           0 :         WARN_ON(*ptr > end);
    2322           0 :         if (*ptr == end)
    2323           0 :                 return 1; /* last */
    2324             : 
    2325             :         return 0;
    2326             : }
    2327             : 
    2328             : /*
    2329             :  * reads the tree block backref for an extent. tree level and root are returned
    2330             :  * through out_level and out_root. ptr must point to a 0 value for the first
    2331             :  * call and may be modified (see get_extent_inline_ref comment).
    2332             :  * returns 0 if data was provided, 1 if there was no more data to provide or
    2333             :  * <0 on error.
    2334             :  */
    2335           0 : int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
    2336             :                             struct btrfs_key *key, struct btrfs_extent_item *ei,
    2337             :                             u32 item_size, u64 *out_root, u8 *out_level)
    2338             : {
    2339           0 :         int ret;
    2340           0 :         int type;
    2341           0 :         struct btrfs_extent_inline_ref *eiref;
    2342             : 
    2343           0 :         if (*ptr == (unsigned long)-1)
    2344             :                 return 1;
    2345             : 
    2346           0 :         while (1) {
    2347           0 :                 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
    2348             :                                               &eiref, &type);
    2349           0 :                 if (ret < 0)
    2350           0 :                         return ret;
    2351             : 
    2352           0 :                 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
    2353             :                     type == BTRFS_SHARED_BLOCK_REF_KEY)
    2354             :                         break;
    2355             : 
    2356           0 :                 if (ret == 1)
    2357             :                         return 1;
    2358             :         }
    2359             : 
    2360             :         /* we can treat both ref types equally here */
    2361           0 :         *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
    2362             : 
    2363           0 :         if (key->type == BTRFS_EXTENT_ITEM_KEY) {
    2364           0 :                 struct btrfs_tree_block_info *info;
    2365             : 
    2366           0 :                 info = (struct btrfs_tree_block_info *)(ei + 1);
    2367           0 :                 *out_level = btrfs_tree_block_level(eb, info);
    2368             :         } else {
    2369           0 :                 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
    2370           0 :                 *out_level = (u8)key->offset;
    2371             :         }
    2372             : 
    2373           0 :         if (ret == 1)
    2374           0 :                 *ptr = (unsigned long)-1;
    2375             : 
    2376             :         return 0;
    2377             : }
    2378             : 
    2379           0 : static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
    2380             :                              struct extent_inode_elem *inode_list,
    2381             :                              u64 root, u64 extent_item_objectid,
    2382             :                              iterate_extent_inodes_t *iterate, void *ctx)
    2383             : {
    2384           0 :         struct extent_inode_elem *eie;
    2385           0 :         int ret = 0;
    2386             : 
    2387           0 :         for (eie = inode_list; eie; eie = eie->next) {
    2388           0 :                 btrfs_debug(fs_info,
    2389             :                             "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
    2390             :                             extent_item_objectid, eie->inum,
    2391             :                             eie->offset, root);
    2392           0 :                 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
    2393           0 :                 if (ret) {
    2394             :                         btrfs_debug(fs_info,
    2395             :                                     "stopping iteration for %llu due to ret=%d",
    2396             :                                     extent_item_objectid, ret);
    2397             :                         break;
    2398             :                 }
    2399             :         }
    2400             : 
    2401           0 :         return ret;
    2402             : }
    2403             : 
    2404             : /*
    2405             :  * calls iterate() for every inode that references the extent identified by
    2406             :  * the given parameters.
    2407             :  * when the iterator function returns a non-zero value, iteration stops.
    2408             :  */
    2409           0 : int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
    2410             :                           bool search_commit_root,
    2411             :                           iterate_extent_inodes_t *iterate, void *user_ctx)
    2412             : {
    2413           0 :         int ret;
    2414           0 :         struct ulist *refs;
    2415           0 :         struct ulist_node *ref_node;
    2416           0 :         struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
    2417           0 :         struct ulist_iterator ref_uiter;
    2418             : 
    2419           0 :         btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
    2420             :                     ctx->bytenr);
    2421             : 
    2422           0 :         ASSERT(ctx->trans == NULL);
    2423           0 :         ASSERT(ctx->roots == NULL);
    2424             : 
    2425           0 :         if (!search_commit_root) {
    2426           0 :                 struct btrfs_trans_handle *trans;
    2427             : 
    2428           0 :                 trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
    2429           0 :                 if (IS_ERR(trans)) {
    2430           0 :                         if (PTR_ERR(trans) != -ENOENT &&
    2431             :                             PTR_ERR(trans) != -EROFS)
    2432           0 :                                 return PTR_ERR(trans);
    2433             :                         trans = NULL;
    2434             :                 }
    2435           0 :                 ctx->trans = trans;
    2436             :         }
    2437             : 
    2438           0 :         if (ctx->trans) {
    2439           0 :                 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
    2440           0 :                 ctx->time_seq = seq_elem.seq;
    2441             :         } else {
    2442           0 :                 down_read(&ctx->fs_info->commit_root_sem);
    2443             :         }
    2444             : 
    2445           0 :         ret = btrfs_find_all_leafs(ctx);
    2446           0 :         if (ret)
    2447           0 :                 goto out;
    2448           0 :         refs = ctx->refs;
    2449           0 :         ctx->refs = NULL;
    2450             : 
    2451           0 :         ULIST_ITER_INIT(&ref_uiter);
    2452           0 :         while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
    2453           0 :                 const u64 leaf_bytenr = ref_node->val;
    2454           0 :                 struct ulist_node *root_node;
    2455           0 :                 struct ulist_iterator root_uiter;
    2456           0 :                 struct extent_inode_elem *inode_list;
    2457             : 
    2458           0 :                 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
    2459             : 
    2460           0 :                 if (ctx->cache_lookup) {
    2461           0 :                         const u64 *root_ids;
    2462           0 :                         int root_count;
    2463           0 :                         bool cached;
    2464             : 
    2465           0 :                         cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
    2466             :                                                    &root_ids, &root_count);
    2467           0 :                         if (cached) {
    2468           0 :                                 for (int i = 0; i < root_count; i++) {
    2469           0 :                                         ret = iterate_leaf_refs(ctx->fs_info,
    2470             :                                                                 inode_list,
    2471           0 :                                                                 root_ids[i],
    2472             :                                                                 leaf_bytenr,
    2473             :                                                                 iterate,
    2474             :                                                                 user_ctx);
    2475           0 :                                         if (ret)
    2476             :                                                 break;
    2477             :                                 }
    2478           0 :                                 continue;
    2479             :                         }
    2480             :                 }
    2481             : 
    2482           0 :                 if (!ctx->roots) {
    2483           0 :                         ctx->roots = ulist_alloc(GFP_NOFS);
    2484           0 :                         if (!ctx->roots) {
    2485             :                                 ret = -ENOMEM;
    2486           0 :                                 break;
    2487             :                         }
    2488             :                 }
    2489             : 
    2490           0 :                 ctx->bytenr = leaf_bytenr;
    2491           0 :                 ret = btrfs_find_all_roots_safe(ctx);
    2492           0 :                 if (ret)
    2493             :                         break;
    2494             : 
    2495           0 :                 if (ctx->cache_store)
    2496           0 :                         ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
    2497             : 
    2498           0 :                 ULIST_ITER_INIT(&root_uiter);
    2499           0 :                 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
    2500           0 :                         btrfs_debug(ctx->fs_info,
    2501             :                                     "root %llu references leaf %llu, data list %#llx",
    2502             :                                     root_node->val, ref_node->val,
    2503             :                                     ref_node->aux);
    2504           0 :                         ret = iterate_leaf_refs(ctx->fs_info, inode_list,
    2505             :                                                 root_node->val, ctx->bytenr,
    2506             :                                                 iterate, user_ctx);
    2507             :                 }
    2508           0 :                 ulist_reinit(ctx->roots);
    2509             :         }
    2510             : 
    2511           0 :         free_leaf_list(refs);
    2512           0 : out:
    2513           0 :         if (ctx->trans) {
    2514           0 :                 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
    2515           0 :                 btrfs_end_transaction(ctx->trans);
    2516           0 :                 ctx->trans = NULL;
    2517             :         } else {
    2518           0 :                 up_read(&ctx->fs_info->commit_root_sem);
    2519             :         }
    2520             : 
    2521           0 :         ulist_free(ctx->roots);
    2522           0 :         ctx->roots = NULL;
    2523             : 
    2524           0 :         if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
    2525           0 :                 ret = 0;
    2526             : 
    2527             :         return ret;
    2528             : }
    2529             : 
    2530           0 : static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
    2531             : {
    2532           0 :         struct btrfs_data_container *inodes = ctx;
    2533           0 :         const size_t c = 3 * sizeof(u64);
    2534             : 
    2535           0 :         if (inodes->bytes_left >= c) {
    2536           0 :                 inodes->bytes_left -= c;
    2537           0 :                 inodes->val[inodes->elem_cnt] = inum;
    2538           0 :                 inodes->val[inodes->elem_cnt + 1] = offset;
    2539           0 :                 inodes->val[inodes->elem_cnt + 2] = root;
    2540           0 :                 inodes->elem_cnt += 3;
    2541             :         } else {
    2542           0 :                 inodes->bytes_missing += c - inodes->bytes_left;
    2543           0 :                 inodes->bytes_left = 0;
    2544           0 :                 inodes->elem_missed += 3;
    2545             :         }
    2546             : 
    2547           0 :         return 0;
    2548             : }
    2549             : 
    2550           0 : int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
    2551             :                                 struct btrfs_path *path,
    2552             :                                 void *ctx, bool ignore_offset)
    2553             : {
    2554           0 :         struct btrfs_backref_walk_ctx walk_ctx = { 0 };
    2555           0 :         int ret;
    2556           0 :         u64 flags = 0;
    2557           0 :         struct btrfs_key found_key;
    2558           0 :         int search_commit_root = path->search_commit_root;
    2559             : 
    2560           0 :         ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
    2561           0 :         btrfs_release_path(path);
    2562           0 :         if (ret < 0)
    2563             :                 return ret;
    2564           0 :         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
    2565             :                 return -EINVAL;
    2566             : 
    2567           0 :         walk_ctx.bytenr = found_key.objectid;
    2568           0 :         if (ignore_offset)
    2569           0 :                 walk_ctx.ignore_extent_item_pos = true;
    2570             :         else
    2571           0 :                 walk_ctx.extent_item_pos = logical - found_key.objectid;
    2572           0 :         walk_ctx.fs_info = fs_info;
    2573             : 
    2574           0 :         return iterate_extent_inodes(&walk_ctx, search_commit_root,
    2575             :                                      build_ino_list, ctx);
    2576             : }
    2577             : 
    2578             : static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
    2579             :                          struct extent_buffer *eb, struct inode_fs_paths *ipath);
    2580             : 
    2581           0 : static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
    2582             : {
    2583           0 :         int ret = 0;
    2584           0 :         int slot;
    2585           0 :         u32 cur;
    2586           0 :         u32 len;
    2587           0 :         u32 name_len;
    2588           0 :         u64 parent = 0;
    2589           0 :         int found = 0;
    2590           0 :         struct btrfs_root *fs_root = ipath->fs_root;
    2591           0 :         struct btrfs_path *path = ipath->btrfs_path;
    2592           0 :         struct extent_buffer *eb;
    2593           0 :         struct btrfs_inode_ref *iref;
    2594           0 :         struct btrfs_key found_key;
    2595             : 
    2596           0 :         while (!ret) {
    2597           0 :                 ret = btrfs_find_item(fs_root, path, inum,
    2598             :                                 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
    2599             :                                 &found_key);
    2600             : 
    2601           0 :                 if (ret < 0)
    2602             :                         break;
    2603           0 :                 if (ret) {
    2604           0 :                         ret = found ? 0 : -ENOENT;
    2605             :                         break;
    2606             :                 }
    2607           0 :                 ++found;
    2608             : 
    2609           0 :                 parent = found_key.offset;
    2610           0 :                 slot = path->slots[0];
    2611           0 :                 eb = btrfs_clone_extent_buffer(path->nodes[0]);
    2612           0 :                 if (!eb) {
    2613             :                         ret = -ENOMEM;
    2614             :                         break;
    2615             :                 }
    2616           0 :                 btrfs_release_path(path);
    2617             : 
    2618           0 :                 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
    2619             : 
    2620           0 :                 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
    2621           0 :                         name_len = btrfs_inode_ref_name_len(eb, iref);
    2622             :                         /* path must be released before calling iterate()! */
    2623           0 :                         btrfs_debug(fs_root->fs_info,
    2624             :                                 "following ref at offset %u for inode %llu in tree %llu",
    2625             :                                 cur, found_key.objectid,
    2626             :                                 fs_root->root_key.objectid);
    2627           0 :                         ret = inode_to_path(parent, name_len,
    2628           0 :                                       (unsigned long)(iref + 1), eb, ipath);
    2629           0 :                         if (ret)
    2630             :                                 break;
    2631           0 :                         len = sizeof(*iref) + name_len;
    2632           0 :                         iref = (struct btrfs_inode_ref *)((char *)iref + len);
    2633             :                 }
    2634           0 :                 free_extent_buffer(eb);
    2635             :         }
    2636             : 
    2637           0 :         btrfs_release_path(path);
    2638             : 
    2639           0 :         return ret;
    2640             : }
    2641             : 
    2642           0 : static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
    2643             : {
    2644           0 :         int ret;
    2645           0 :         int slot;
    2646           0 :         u64 offset = 0;
    2647           0 :         u64 parent;
    2648           0 :         int found = 0;
    2649           0 :         struct btrfs_root *fs_root = ipath->fs_root;
    2650           0 :         struct btrfs_path *path = ipath->btrfs_path;
    2651           0 :         struct extent_buffer *eb;
    2652           0 :         struct btrfs_inode_extref *extref;
    2653           0 :         u32 item_size;
    2654           0 :         u32 cur_offset;
    2655           0 :         unsigned long ptr;
    2656             : 
    2657           0 :         while (1) {
    2658           0 :                 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
    2659             :                                             &offset);
    2660           0 :                 if (ret < 0)
    2661             :                         break;
    2662           0 :                 if (ret) {
    2663           0 :                         ret = found ? 0 : -ENOENT;
    2664             :                         break;
    2665             :                 }
    2666           0 :                 ++found;
    2667             : 
    2668           0 :                 slot = path->slots[0];
    2669           0 :                 eb = btrfs_clone_extent_buffer(path->nodes[0]);
    2670           0 :                 if (!eb) {
    2671             :                         ret = -ENOMEM;
    2672             :                         break;
    2673             :                 }
    2674           0 :                 btrfs_release_path(path);
    2675             : 
    2676           0 :                 item_size = btrfs_item_size(eb, slot);
    2677           0 :                 ptr = btrfs_item_ptr_offset(eb, slot);
    2678           0 :                 cur_offset = 0;
    2679             : 
    2680           0 :                 while (cur_offset < item_size) {
    2681           0 :                         u32 name_len;
    2682             : 
    2683           0 :                         extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
    2684           0 :                         parent = btrfs_inode_extref_parent(eb, extref);
    2685           0 :                         name_len = btrfs_inode_extref_name_len(eb, extref);
    2686           0 :                         ret = inode_to_path(parent, name_len,
    2687           0 :                                       (unsigned long)&extref->name, eb, ipath);
    2688           0 :                         if (ret)
    2689             :                                 break;
    2690             : 
    2691           0 :                         cur_offset += btrfs_inode_extref_name_len(eb, extref);
    2692           0 :                         cur_offset += sizeof(*extref);
    2693             :                 }
    2694           0 :                 free_extent_buffer(eb);
    2695             : 
    2696           0 :                 offset++;
    2697             :         }
    2698             : 
    2699           0 :         btrfs_release_path(path);
    2700             : 
    2701           0 :         return ret;
    2702             : }
    2703             : 
    2704             : /*
    2705             :  * returns 0 if the path could be dumped (probably truncated)
    2706             :  * returns <0 in case of an error
    2707             :  */
    2708           0 : static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
    2709             :                          struct extent_buffer *eb, struct inode_fs_paths *ipath)
    2710             : {
    2711           0 :         char *fspath;
    2712           0 :         char *fspath_min;
    2713           0 :         int i = ipath->fspath->elem_cnt;
    2714           0 :         const int s_ptr = sizeof(char *);
    2715           0 :         u32 bytes_left;
    2716             : 
    2717           0 :         bytes_left = ipath->fspath->bytes_left > s_ptr ?
    2718           0 :                                         ipath->fspath->bytes_left - s_ptr : 0;
    2719             : 
    2720           0 :         fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
    2721           0 :         fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
    2722             :                                    name_off, eb, inum, fspath_min, bytes_left);
    2723           0 :         if (IS_ERR(fspath))
    2724           0 :                 return PTR_ERR(fspath);
    2725             : 
    2726           0 :         if (fspath > fspath_min) {
    2727           0 :                 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
    2728           0 :                 ++ipath->fspath->elem_cnt;
    2729           0 :                 ipath->fspath->bytes_left = fspath - fspath_min;
    2730             :         } else {
    2731           0 :                 ++ipath->fspath->elem_missed;
    2732           0 :                 ipath->fspath->bytes_missing += fspath_min - fspath;
    2733           0 :                 ipath->fspath->bytes_left = 0;
    2734             :         }
    2735             : 
    2736             :         return 0;
    2737             : }
    2738             : 
    2739             : /*
    2740             :  * this dumps all file system paths to the inode into the ipath struct, provided
    2741             :  * is has been created large enough. each path is zero-terminated and accessed
    2742             :  * from ipath->fspath->val[i].
    2743             :  * when it returns, there are ipath->fspath->elem_cnt number of paths available
    2744             :  * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
    2745             :  * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
    2746             :  * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
    2747             :  * have been needed to return all paths.
    2748             :  */
    2749           0 : int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
    2750             : {
    2751           0 :         int ret;
    2752           0 :         int found_refs = 0;
    2753             : 
    2754           0 :         ret = iterate_inode_refs(inum, ipath);
    2755           0 :         if (!ret)
    2756             :                 ++found_refs;
    2757           0 :         else if (ret != -ENOENT)
    2758             :                 return ret;
    2759             : 
    2760           0 :         ret = iterate_inode_extrefs(inum, ipath);
    2761           0 :         if (ret == -ENOENT && found_refs)
    2762           0 :                 return 0;
    2763             : 
    2764             :         return ret;
    2765             : }
    2766             : 
    2767           0 : struct btrfs_data_container *init_data_container(u32 total_bytes)
    2768             : {
    2769           0 :         struct btrfs_data_container *data;
    2770           0 :         size_t alloc_bytes;
    2771             : 
    2772           0 :         alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
    2773           0 :         data = kvmalloc(alloc_bytes, GFP_KERNEL);
    2774           0 :         if (!data)
    2775             :                 return ERR_PTR(-ENOMEM);
    2776             : 
    2777           0 :         if (total_bytes >= sizeof(*data)) {
    2778           0 :                 data->bytes_left = total_bytes - sizeof(*data);
    2779           0 :                 data->bytes_missing = 0;
    2780             :         } else {
    2781           0 :                 data->bytes_missing = sizeof(*data) - total_bytes;
    2782           0 :                 data->bytes_left = 0;
    2783             :         }
    2784             : 
    2785           0 :         data->elem_cnt = 0;
    2786           0 :         data->elem_missed = 0;
    2787             : 
    2788           0 :         return data;
    2789             : }
    2790             : 
    2791             : /*
    2792             :  * allocates space to return multiple file system paths for an inode.
    2793             :  * total_bytes to allocate are passed, note that space usable for actual path
    2794             :  * information will be total_bytes - sizeof(struct inode_fs_paths).
    2795             :  * the returned pointer must be freed with free_ipath() in the end.
    2796             :  */
    2797           0 : struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
    2798             :                                         struct btrfs_path *path)
    2799             : {
    2800           0 :         struct inode_fs_paths *ifp;
    2801           0 :         struct btrfs_data_container *fspath;
    2802             : 
    2803           0 :         fspath = init_data_container(total_bytes);
    2804           0 :         if (IS_ERR(fspath))
    2805             :                 return ERR_CAST(fspath);
    2806             : 
    2807           0 :         ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
    2808           0 :         if (!ifp) {
    2809           0 :                 kvfree(fspath);
    2810           0 :                 return ERR_PTR(-ENOMEM);
    2811             :         }
    2812             : 
    2813           0 :         ifp->btrfs_path = path;
    2814           0 :         ifp->fspath = fspath;
    2815           0 :         ifp->fs_root = fs_root;
    2816             : 
    2817           0 :         return ifp;
    2818             : }
    2819             : 
    2820           0 : void free_ipath(struct inode_fs_paths *ipath)
    2821             : {
    2822           0 :         if (!ipath)
    2823             :                 return;
    2824           0 :         kvfree(ipath->fspath);
    2825           0 :         kfree(ipath);
    2826             : }
    2827             : 
    2828           0 : struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
    2829             : {
    2830           0 :         struct btrfs_backref_iter *ret;
    2831             : 
    2832           0 :         ret = kzalloc(sizeof(*ret), GFP_NOFS);
    2833           0 :         if (!ret)
    2834             :                 return NULL;
    2835             : 
    2836           0 :         ret->path = btrfs_alloc_path();
    2837           0 :         if (!ret->path) {
    2838           0 :                 kfree(ret);
    2839           0 :                 return NULL;
    2840             :         }
    2841             : 
    2842             :         /* Current backref iterator only supports iteration in commit root */
    2843           0 :         ret->path->search_commit_root = 1;
    2844           0 :         ret->path->skip_locking = 1;
    2845           0 :         ret->fs_info = fs_info;
    2846             : 
    2847           0 :         return ret;
    2848             : }
    2849             : 
    2850           0 : int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
    2851             : {
    2852           0 :         struct btrfs_fs_info *fs_info = iter->fs_info;
    2853           0 :         struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
    2854           0 :         struct btrfs_path *path = iter->path;
    2855           0 :         struct btrfs_extent_item *ei;
    2856           0 :         struct btrfs_key key;
    2857           0 :         int ret;
    2858             : 
    2859           0 :         key.objectid = bytenr;
    2860           0 :         key.type = BTRFS_METADATA_ITEM_KEY;
    2861           0 :         key.offset = (u64)-1;
    2862           0 :         iter->bytenr = bytenr;
    2863             : 
    2864           0 :         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
    2865           0 :         if (ret < 0)
    2866             :                 return ret;
    2867           0 :         if (ret == 0) {
    2868           0 :                 ret = -EUCLEAN;
    2869           0 :                 goto release;
    2870             :         }
    2871           0 :         if (path->slots[0] == 0) {
    2872           0 :                 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
    2873           0 :                 ret = -EUCLEAN;
    2874           0 :                 goto release;
    2875             :         }
    2876           0 :         path->slots[0]--;
    2877             : 
    2878           0 :         btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
    2879           0 :         if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
    2880           0 :              key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
    2881           0 :                 ret = -ENOENT;
    2882           0 :                 goto release;
    2883             :         }
    2884           0 :         memcpy(&iter->cur_key, &key, sizeof(key));
    2885           0 :         iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
    2886             :                                                     path->slots[0]);
    2887           0 :         iter->end_ptr = (u32)(iter->item_ptr +
    2888           0 :                         btrfs_item_size(path->nodes[0], path->slots[0]));
    2889           0 :         ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
    2890             :                             struct btrfs_extent_item);
    2891             : 
    2892             :         /*
    2893             :          * Only support iteration on tree backref yet.
    2894             :          *
    2895             :          * This is an extra precaution for non skinny-metadata, where
    2896             :          * EXTENT_ITEM is also used for tree blocks, that we can only use
    2897             :          * extent flags to determine if it's a tree block.
    2898             :          */
    2899           0 :         if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
    2900           0 :                 ret = -ENOTSUPP;
    2901           0 :                 goto release;
    2902             :         }
    2903           0 :         iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
    2904             : 
    2905             :         /* If there is no inline backref, go search for keyed backref */
    2906           0 :         if (iter->cur_ptr >= iter->end_ptr) {
    2907           0 :                 ret = btrfs_next_item(extent_root, path);
    2908             : 
    2909             :                 /* No inline nor keyed ref */
    2910           0 :                 if (ret > 0) {
    2911           0 :                         ret = -ENOENT;
    2912           0 :                         goto release;
    2913             :                 }
    2914           0 :                 if (ret < 0)
    2915           0 :                         goto release;
    2916             : 
    2917           0 :                 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
    2918             :                                 path->slots[0]);
    2919           0 :                 if (iter->cur_key.objectid != bytenr ||
    2920           0 :                     (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
    2921             :                      iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
    2922           0 :                         ret = -ENOENT;
    2923           0 :                         goto release;
    2924             :                 }
    2925           0 :                 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
    2926             :                                                            path->slots[0]);
    2927           0 :                 iter->item_ptr = iter->cur_ptr;
    2928           0 :                 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
    2929           0 :                                       path->nodes[0], path->slots[0]));
    2930             :         }
    2931             : 
    2932             :         return 0;
    2933           0 : release:
    2934           0 :         btrfs_backref_iter_release(iter);
    2935           0 :         return ret;
    2936             : }
    2937             : 
    2938             : /*
    2939             :  * Go to the next backref item of current bytenr, can be either inlined or
    2940             :  * keyed.
    2941             :  *
    2942             :  * Caller needs to check whether it's inline ref or not by iter->cur_key.
    2943             :  *
    2944             :  * Return 0 if we get next backref without problem.
    2945             :  * Return >0 if there is no extra backref for this bytenr.
    2946             :  * Return <0 if there is something wrong happened.
    2947             :  */
    2948           0 : int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
    2949             : {
    2950           0 :         struct extent_buffer *eb = btrfs_backref_get_eb(iter);
    2951           0 :         struct btrfs_root *extent_root;
    2952           0 :         struct btrfs_path *path = iter->path;
    2953           0 :         struct btrfs_extent_inline_ref *iref;
    2954           0 :         int ret;
    2955           0 :         u32 size;
    2956             : 
    2957           0 :         if (btrfs_backref_iter_is_inline_ref(iter)) {
    2958             :                 /* We're still inside the inline refs */
    2959           0 :                 ASSERT(iter->cur_ptr < iter->end_ptr);
    2960             : 
    2961           0 :                 if (btrfs_backref_has_tree_block_info(iter)) {
    2962             :                         /* First tree block info */
    2963             :                         size = sizeof(struct btrfs_tree_block_info);
    2964             :                 } else {
    2965             :                         /* Use inline ref type to determine the size */
    2966           0 :                         int type;
    2967             : 
    2968           0 :                         iref = (struct btrfs_extent_inline_ref *)
    2969           0 :                                 ((unsigned long)iter->cur_ptr);
    2970           0 :                         type = btrfs_extent_inline_ref_type(eb, iref);
    2971             : 
    2972           0 :                         size = btrfs_extent_inline_ref_size(type);
    2973             :                 }
    2974           0 :                 iter->cur_ptr += size;
    2975           0 :                 if (iter->cur_ptr < iter->end_ptr)
    2976             :                         return 0;
    2977             : 
    2978             :                 /* All inline items iterated, fall through */
    2979             :         }
    2980             : 
    2981             :         /* We're at keyed items, there is no inline item, go to the next one */
    2982           0 :         extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
    2983           0 :         ret = btrfs_next_item(extent_root, iter->path);
    2984           0 :         if (ret)
    2985             :                 return ret;
    2986             : 
    2987           0 :         btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
    2988           0 :         if (iter->cur_key.objectid != iter->bytenr ||
    2989           0 :             (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
    2990             :              iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
    2991             :                 return 1;
    2992           0 :         iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
    2993             :                                         path->slots[0]);
    2994           0 :         iter->cur_ptr = iter->item_ptr;
    2995           0 :         iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
    2996             :                                                 path->slots[0]);
    2997           0 :         return 0;
    2998             : }
    2999             : 
    3000           0 : void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
    3001             :                               struct btrfs_backref_cache *cache, int is_reloc)
    3002             : {
    3003           0 :         int i;
    3004             : 
    3005           0 :         cache->rb_root = RB_ROOT;
    3006           0 :         for (i = 0; i < BTRFS_MAX_LEVEL; i++)
    3007           0 :                 INIT_LIST_HEAD(&cache->pending[i]);
    3008           0 :         INIT_LIST_HEAD(&cache->changed);
    3009           0 :         INIT_LIST_HEAD(&cache->detached);
    3010           0 :         INIT_LIST_HEAD(&cache->leaves);
    3011           0 :         INIT_LIST_HEAD(&cache->pending_edge);
    3012           0 :         INIT_LIST_HEAD(&cache->useless_node);
    3013           0 :         cache->fs_info = fs_info;
    3014           0 :         cache->is_reloc = is_reloc;
    3015           0 : }
    3016             : 
    3017           0 : struct btrfs_backref_node *btrfs_backref_alloc_node(
    3018             :                 struct btrfs_backref_cache *cache, u64 bytenr, int level)
    3019             : {
    3020           0 :         struct btrfs_backref_node *node;
    3021             : 
    3022           0 :         ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
    3023           0 :         node = kzalloc(sizeof(*node), GFP_NOFS);
    3024           0 :         if (!node)
    3025             :                 return node;
    3026             : 
    3027           0 :         INIT_LIST_HEAD(&node->list);
    3028           0 :         INIT_LIST_HEAD(&node->upper);
    3029           0 :         INIT_LIST_HEAD(&node->lower);
    3030           0 :         RB_CLEAR_NODE(&node->rb_node);
    3031           0 :         cache->nr_nodes++;
    3032           0 :         node->level = level;
    3033           0 :         node->bytenr = bytenr;
    3034             : 
    3035           0 :         return node;
    3036             : }
    3037             : 
    3038           0 : struct btrfs_backref_edge *btrfs_backref_alloc_edge(
    3039             :                 struct btrfs_backref_cache *cache)
    3040             : {
    3041           0 :         struct btrfs_backref_edge *edge;
    3042             : 
    3043           0 :         edge = kzalloc(sizeof(*edge), GFP_NOFS);
    3044           0 :         if (edge)
    3045           0 :                 cache->nr_edges++;
    3046           0 :         return edge;
    3047             : }
    3048             : 
    3049             : /*
    3050             :  * Drop the backref node from cache, also cleaning up all its
    3051             :  * upper edges and any uncached nodes in the path.
    3052             :  *
    3053             :  * This cleanup happens bottom up, thus the node should either
    3054             :  * be the lowest node in the cache or a detached node.
    3055             :  */
    3056           0 : void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
    3057             :                                 struct btrfs_backref_node *node)
    3058             : {
    3059           0 :         struct btrfs_backref_node *upper;
    3060           0 :         struct btrfs_backref_edge *edge;
    3061             : 
    3062           0 :         if (!node)
    3063             :                 return;
    3064             : 
    3065           0 :         BUG_ON(!node->lowest && !node->detached);
    3066           0 :         while (!list_empty(&node->upper)) {
    3067           0 :                 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
    3068             :                                   list[LOWER]);
    3069           0 :                 upper = edge->node[UPPER];
    3070           0 :                 list_del(&edge->list[LOWER]);
    3071           0 :                 list_del(&edge->list[UPPER]);
    3072           0 :                 btrfs_backref_free_edge(cache, edge);
    3073             : 
    3074             :                 /*
    3075             :                  * Add the node to leaf node list if no other child block
    3076             :                  * cached.
    3077             :                  */
    3078           0 :                 if (list_empty(&upper->lower)) {
    3079           0 :                         list_add_tail(&upper->lower, &cache->leaves);
    3080           0 :                         upper->lowest = 1;
    3081             :                 }
    3082             :         }
    3083             : 
    3084           0 :         btrfs_backref_drop_node(cache, node);
    3085             : }
    3086             : 
    3087             : /*
    3088             :  * Release all nodes/edges from current cache
    3089             :  */
    3090           0 : void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
    3091             : {
    3092           0 :         struct btrfs_backref_node *node;
    3093           0 :         int i;
    3094             : 
    3095           0 :         while (!list_empty(&cache->detached)) {
    3096           0 :                 node = list_entry(cache->detached.next,
    3097             :                                   struct btrfs_backref_node, list);
    3098           0 :                 btrfs_backref_cleanup_node(cache, node);
    3099             :         }
    3100             : 
    3101           0 :         while (!list_empty(&cache->leaves)) {
    3102           0 :                 node = list_entry(cache->leaves.next,
    3103             :                                   struct btrfs_backref_node, lower);
    3104           0 :                 btrfs_backref_cleanup_node(cache, node);
    3105             :         }
    3106             : 
    3107           0 :         cache->last_trans = 0;
    3108             : 
    3109           0 :         for (i = 0; i < BTRFS_MAX_LEVEL; i++)
    3110           0 :                 ASSERT(list_empty(&cache->pending[i]));
    3111           0 :         ASSERT(list_empty(&cache->pending_edge));
    3112           0 :         ASSERT(list_empty(&cache->useless_node));
    3113           0 :         ASSERT(list_empty(&cache->changed));
    3114           0 :         ASSERT(list_empty(&cache->detached));
    3115           0 :         ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
    3116           0 :         ASSERT(!cache->nr_nodes);
    3117           0 :         ASSERT(!cache->nr_edges);
    3118           0 : }
    3119             : 
    3120             : /*
    3121             :  * Handle direct tree backref
    3122             :  *
    3123             :  * Direct tree backref means, the backref item shows its parent bytenr
    3124             :  * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
    3125             :  *
    3126             :  * @ref_key:    The converted backref key.
    3127             :  *              For keyed backref, it's the item key.
    3128             :  *              For inlined backref, objectid is the bytenr,
    3129             :  *              type is btrfs_inline_ref_type, offset is
    3130             :  *              btrfs_inline_ref_offset.
    3131             :  */
    3132           0 : static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
    3133             :                                       struct btrfs_key *ref_key,
    3134             :                                       struct btrfs_backref_node *cur)
    3135             : {
    3136           0 :         struct btrfs_backref_edge *edge;
    3137           0 :         struct btrfs_backref_node *upper;
    3138           0 :         struct rb_node *rb_node;
    3139             : 
    3140           0 :         ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
    3141             : 
    3142             :         /* Only reloc root uses backref pointing to itself */
    3143           0 :         if (ref_key->objectid == ref_key->offset) {
    3144           0 :                 struct btrfs_root *root;
    3145             : 
    3146           0 :                 cur->is_reloc_root = 1;
    3147             :                 /* Only reloc backref cache cares about a specific root */
    3148           0 :                 if (cache->is_reloc) {
    3149           0 :                         root = find_reloc_root(cache->fs_info, cur->bytenr);
    3150           0 :                         if (!root)
    3151             :                                 return -ENOENT;
    3152           0 :                         cur->root = root;
    3153             :                 } else {
    3154             :                         /*
    3155             :                          * For generic purpose backref cache, reloc root node
    3156             :                          * is useless.
    3157             :                          */
    3158           0 :                         list_add(&cur->list, &cache->useless_node);
    3159             :                 }
    3160           0 :                 return 0;
    3161             :         }
    3162             : 
    3163           0 :         edge = btrfs_backref_alloc_edge(cache);
    3164           0 :         if (!edge)
    3165             :                 return -ENOMEM;
    3166             : 
    3167           0 :         rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
    3168           0 :         if (!rb_node) {
    3169             :                 /* Parent node not yet cached */
    3170           0 :                 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
    3171           0 :                                            cur->level + 1);
    3172           0 :                 if (!upper) {
    3173           0 :                         btrfs_backref_free_edge(cache, edge);
    3174           0 :                         return -ENOMEM;
    3175             :                 }
    3176             : 
    3177             :                 /*
    3178             :                  *  Backrefs for the upper level block isn't cached, add the
    3179             :                  *  block to pending list
    3180             :                  */
    3181           0 :                 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
    3182             :         } else {
    3183             :                 /* Parent node already cached */
    3184           0 :                 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
    3185           0 :                 ASSERT(upper->checked);
    3186           0 :                 INIT_LIST_HEAD(&edge->list[UPPER]);
    3187             :         }
    3188           0 :         btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
    3189             :         return 0;
    3190             : }
    3191             : 
    3192             : /*
    3193             :  * Handle indirect tree backref
    3194             :  *
    3195             :  * Indirect tree backref means, we only know which tree the node belongs to.
    3196             :  * We still need to do a tree search to find out the parents. This is for
    3197             :  * TREE_BLOCK_REF backref (keyed or inlined).
    3198             :  *
    3199             :  * @ref_key:    The same as @ref_key in  handle_direct_tree_backref()
    3200             :  * @tree_key:   The first key of this tree block.
    3201             :  * @path:       A clean (released) path, to avoid allocating path every time
    3202             :  *              the function get called.
    3203             :  */
    3204           0 : static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
    3205             :                                         struct btrfs_path *path,
    3206             :                                         struct btrfs_key *ref_key,
    3207             :                                         struct btrfs_key *tree_key,
    3208             :                                         struct btrfs_backref_node *cur)
    3209             : {
    3210           0 :         struct btrfs_fs_info *fs_info = cache->fs_info;
    3211           0 :         struct btrfs_backref_node *upper;
    3212           0 :         struct btrfs_backref_node *lower;
    3213           0 :         struct btrfs_backref_edge *edge;
    3214           0 :         struct extent_buffer *eb;
    3215           0 :         struct btrfs_root *root;
    3216           0 :         struct rb_node *rb_node;
    3217           0 :         int level;
    3218           0 :         bool need_check = true;
    3219           0 :         int ret;
    3220             : 
    3221           0 :         root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
    3222           0 :         if (IS_ERR(root))
    3223           0 :                 return PTR_ERR(root);
    3224           0 :         if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
    3225           0 :                 cur->cowonly = 1;
    3226             : 
    3227           0 :         if (btrfs_root_level(&root->root_item) == cur->level) {
    3228             :                 /* Tree root */
    3229           0 :                 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
    3230             :                 /*
    3231             :                  * For reloc backref cache, we may ignore reloc root.  But for
    3232             :                  * general purpose backref cache, we can't rely on
    3233             :                  * btrfs_should_ignore_reloc_root() as it may conflict with
    3234             :                  * current running relocation and lead to missing root.
    3235             :                  *
    3236             :                  * For general purpose backref cache, reloc root detection is
    3237             :                  * completely relying on direct backref (key->offset is parent
    3238             :                  * bytenr), thus only do such check for reloc cache.
    3239             :                  */
    3240           0 :                 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
    3241           0 :                         btrfs_put_root(root);
    3242           0 :                         list_add(&cur->list, &cache->useless_node);
    3243             :                 } else {
    3244           0 :                         cur->root = root;
    3245             :                 }
    3246           0 :                 return 0;
    3247             :         }
    3248             : 
    3249           0 :         level = cur->level + 1;
    3250             : 
    3251             :         /* Search the tree to find parent blocks referring to the block */
    3252           0 :         path->search_commit_root = 1;
    3253           0 :         path->skip_locking = 1;
    3254           0 :         path->lowest_level = level;
    3255           0 :         ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
    3256           0 :         path->lowest_level = 0;
    3257           0 :         if (ret < 0) {
    3258           0 :                 btrfs_put_root(root);
    3259           0 :                 return ret;
    3260             :         }
    3261           0 :         if (ret > 0 && path->slots[level] > 0)
    3262           0 :                 path->slots[level]--;
    3263             : 
    3264           0 :         eb = path->nodes[level];
    3265           0 :         if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
    3266           0 :                 btrfs_err(fs_info,
    3267             : "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
    3268             :                           cur->bytenr, level - 1, root->root_key.objectid,
    3269             :                           tree_key->objectid, tree_key->type, tree_key->offset);
    3270           0 :                 btrfs_put_root(root);
    3271           0 :                 ret = -ENOENT;
    3272           0 :                 goto out;
    3273             :         }
    3274             :         lower = cur;
    3275             : 
    3276             :         /* Add all nodes and edges in the path */
    3277           0 :         for (; level < BTRFS_MAX_LEVEL; level++) {
    3278           0 :                 if (!path->nodes[level]) {
    3279           0 :                         ASSERT(btrfs_root_bytenr(&root->root_item) ==
    3280             :                                lower->bytenr);
    3281             :                         /* Same as previous should_ignore_reloc_root() call */
    3282           0 :                         if (btrfs_should_ignore_reloc_root(root) &&
    3283           0 :                             cache->is_reloc) {
    3284           0 :                                 btrfs_put_root(root);
    3285           0 :                                 list_add(&lower->list, &cache->useless_node);
    3286             :                         } else {
    3287           0 :                                 lower->root = root;
    3288             :                         }
    3289             :                         break;
    3290             :                 }
    3291             : 
    3292           0 :                 edge = btrfs_backref_alloc_edge(cache);
    3293           0 :                 if (!edge) {
    3294           0 :                         btrfs_put_root(root);
    3295           0 :                         ret = -ENOMEM;
    3296           0 :                         goto out;
    3297             :                 }
    3298             : 
    3299           0 :                 eb = path->nodes[level];
    3300           0 :                 rb_node = rb_simple_search(&cache->rb_root, eb->start);
    3301           0 :                 if (!rb_node) {
    3302           0 :                         upper = btrfs_backref_alloc_node(cache, eb->start,
    3303           0 :                                                          lower->level + 1);
    3304           0 :                         if (!upper) {
    3305           0 :                                 btrfs_put_root(root);
    3306           0 :                                 btrfs_backref_free_edge(cache, edge);
    3307           0 :                                 ret = -ENOMEM;
    3308           0 :                                 goto out;
    3309             :                         }
    3310           0 :                         upper->owner = btrfs_header_owner(eb);
    3311           0 :                         if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
    3312           0 :                                 upper->cowonly = 1;
    3313             : 
    3314             :                         /*
    3315             :                          * If we know the block isn't shared we can avoid
    3316             :                          * checking its backrefs.
    3317             :                          */
    3318           0 :                         if (btrfs_block_can_be_shared(root, eb))
    3319           0 :                                 upper->checked = 0;
    3320             :                         else
    3321           0 :                                 upper->checked = 1;
    3322             : 
    3323             :                         /*
    3324             :                          * Add the block to pending list if we need to check its
    3325             :                          * backrefs, we only do this once while walking up a
    3326             :                          * tree as we will catch anything else later on.
    3327             :                          */
    3328           0 :                         if (!upper->checked && need_check) {
    3329           0 :                                 need_check = false;
    3330           0 :                                 list_add_tail(&edge->list[UPPER],
    3331             :                                               &cache->pending_edge);
    3332             :                         } else {
    3333           0 :                                 if (upper->checked)
    3334           0 :                                         need_check = true;
    3335           0 :                                 INIT_LIST_HEAD(&edge->list[UPPER]);
    3336             :                         }
    3337             :                 } else {
    3338           0 :                         upper = rb_entry(rb_node, struct btrfs_backref_node,
    3339             :                                          rb_node);
    3340           0 :                         ASSERT(upper->checked);
    3341           0 :                         INIT_LIST_HEAD(&edge->list[UPPER]);
    3342           0 :                         if (!upper->owner)
    3343           0 :                                 upper->owner = btrfs_header_owner(eb);
    3344             :                 }
    3345           0 :                 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
    3346             : 
    3347           0 :                 if (rb_node) {
    3348           0 :                         btrfs_put_root(root);
    3349           0 :                         break;
    3350             :                 }
    3351           0 :                 lower = upper;
    3352           0 :                 upper = NULL;
    3353             :         }
    3354           0 : out:
    3355           0 :         btrfs_release_path(path);
    3356           0 :         return ret;
    3357             : }
    3358             : 
    3359             : /*
    3360             :  * Add backref node @cur into @cache.
    3361             :  *
    3362             :  * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
    3363             :  *       links aren't yet bi-directional. Needs to finish such links.
    3364             :  *       Use btrfs_backref_finish_upper_links() to finish such linkage.
    3365             :  *
    3366             :  * @path:       Released path for indirect tree backref lookup
    3367             :  * @iter:       Released backref iter for extent tree search
    3368             :  * @node_key:   The first key of the tree block
    3369             :  */
    3370           0 : int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
    3371             :                                 struct btrfs_path *path,
    3372             :                                 struct btrfs_backref_iter *iter,
    3373             :                                 struct btrfs_key *node_key,
    3374             :                                 struct btrfs_backref_node *cur)
    3375             : {
    3376           0 :         struct btrfs_fs_info *fs_info = cache->fs_info;
    3377           0 :         struct btrfs_backref_edge *edge;
    3378           0 :         struct btrfs_backref_node *exist;
    3379           0 :         int ret;
    3380             : 
    3381           0 :         ret = btrfs_backref_iter_start(iter, cur->bytenr);
    3382           0 :         if (ret < 0)
    3383             :                 return ret;
    3384             :         /*
    3385             :          * We skip the first btrfs_tree_block_info, as we don't use the key
    3386             :          * stored in it, but fetch it from the tree block
    3387             :          */
    3388           0 :         if (btrfs_backref_has_tree_block_info(iter)) {
    3389           0 :                 ret = btrfs_backref_iter_next(iter);
    3390           0 :                 if (ret < 0)
    3391           0 :                         goto out;
    3392             :                 /* No extra backref? This means the tree block is corrupted */
    3393           0 :                 if (ret > 0) {
    3394           0 :                         ret = -EUCLEAN;
    3395           0 :                         goto out;
    3396             :                 }
    3397             :         }
    3398           0 :         WARN_ON(cur->checked);
    3399           0 :         if (!list_empty(&cur->upper)) {
    3400             :                 /*
    3401             :                  * The backref was added previously when processing backref of
    3402             :                  * type BTRFS_TREE_BLOCK_REF_KEY
    3403             :                  */
    3404           0 :                 ASSERT(list_is_singular(&cur->upper));
    3405           0 :                 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
    3406             :                                   list[LOWER]);
    3407           0 :                 ASSERT(list_empty(&edge->list[UPPER]));
    3408           0 :                 exist = edge->node[UPPER];
    3409             :                 /*
    3410             :                  * Add the upper level block to pending list if we need check
    3411             :                  * its backrefs
    3412             :                  */
    3413           0 :                 if (!exist->checked)
    3414           0 :                         list_add_tail(&edge->list[UPPER], &cache->pending_edge);
    3415             :         } else {
    3416             :                 exist = NULL;
    3417             :         }
    3418             : 
    3419           0 :         for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
    3420           0 :                 struct extent_buffer *eb;
    3421           0 :                 struct btrfs_key key;
    3422           0 :                 int type;
    3423             : 
    3424           0 :                 cond_resched();
    3425           0 :                 eb = btrfs_backref_get_eb(iter);
    3426             : 
    3427           0 :                 key.objectid = iter->bytenr;
    3428           0 :                 if (btrfs_backref_iter_is_inline_ref(iter)) {
    3429           0 :                         struct btrfs_extent_inline_ref *iref;
    3430             : 
    3431             :                         /* Update key for inline backref */
    3432           0 :                         iref = (struct btrfs_extent_inline_ref *)
    3433           0 :                                 ((unsigned long)iter->cur_ptr);
    3434           0 :                         type = btrfs_get_extent_inline_ref_type(eb, iref,
    3435             :                                                         BTRFS_REF_TYPE_BLOCK);
    3436           0 :                         if (type == BTRFS_REF_TYPE_INVALID) {
    3437           0 :                                 ret = -EUCLEAN;
    3438           0 :                                 goto out;
    3439             :                         }
    3440           0 :                         key.type = type;
    3441           0 :                         key.offset = btrfs_extent_inline_ref_offset(eb, iref);
    3442             :                 } else {
    3443           0 :                         key.type = iter->cur_key.type;
    3444           0 :                         key.offset = iter->cur_key.offset;
    3445             :                 }
    3446             : 
    3447             :                 /*
    3448             :                  * Parent node found and matches current inline ref, no need to
    3449             :                  * rebuild this node for this inline ref
    3450             :                  */
    3451           0 :                 if (exist &&
    3452           0 :                     ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
    3453           0 :                       exist->owner == key.offset) ||
    3454           0 :                      (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
    3455           0 :                       exist->bytenr == key.offset))) {
    3456           0 :                         exist = NULL;
    3457           0 :                         continue;
    3458             :                 }
    3459             : 
    3460             :                 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
    3461           0 :                 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
    3462           0 :                         ret = handle_direct_tree_backref(cache, &key, cur);
    3463           0 :                         if (ret < 0)
    3464           0 :                                 goto out;
    3465           0 :                         continue;
    3466           0 :                 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
    3467           0 :                         ret = -EINVAL;
    3468           0 :                         btrfs_print_v0_err(fs_info);
    3469           0 :                         btrfs_handle_fs_error(fs_info, ret, NULL);
    3470           0 :                         goto out;
    3471           0 :                 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
    3472           0 :                         continue;
    3473             :                 }
    3474             : 
    3475             :                 /*
    3476             :                  * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
    3477             :                  * means the root objectid. We need to search the tree to get
    3478             :                  * its parent bytenr.
    3479             :                  */
    3480           0 :                 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
    3481             :                                                    cur);
    3482           0 :                 if (ret < 0)
    3483           0 :                         goto out;
    3484             :         }
    3485           0 :         ret = 0;
    3486           0 :         cur->checked = 1;
    3487           0 :         WARN_ON(exist);
    3488           0 : out:
    3489           0 :         btrfs_backref_iter_release(iter);
    3490           0 :         return ret;
    3491             : }
    3492             : 
    3493             : /*
    3494             :  * Finish the upwards linkage created by btrfs_backref_add_tree_node()
    3495             :  */
    3496           0 : int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
    3497             :                                      struct btrfs_backref_node *start)
    3498             : {
    3499           0 :         struct list_head *useless_node = &cache->useless_node;
    3500           0 :         struct btrfs_backref_edge *edge;
    3501           0 :         struct rb_node *rb_node;
    3502           0 :         LIST_HEAD(pending_edge);
    3503             : 
    3504           0 :         ASSERT(start->checked);
    3505             : 
    3506             :         /* Insert this node to cache if it's not COW-only */
    3507           0 :         if (!start->cowonly) {
    3508           0 :                 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
    3509             :                                            &start->rb_node);
    3510           0 :                 if (rb_node)
    3511           0 :                         btrfs_backref_panic(cache->fs_info, start->bytenr,
    3512             :                                             -EEXIST);
    3513           0 :                 list_add_tail(&start->lower, &cache->leaves);
    3514             :         }
    3515             : 
    3516             :         /*
    3517             :          * Use breadth first search to iterate all related edges.
    3518             :          *
    3519             :          * The starting points are all the edges of this node
    3520             :          */
    3521           0 :         list_for_each_entry(edge, &start->upper, list[LOWER])
    3522           0 :                 list_add_tail(&edge->list[UPPER], &pending_edge);
    3523             : 
    3524           0 :         while (!list_empty(&pending_edge)) {
    3525           0 :                 struct btrfs_backref_node *upper;
    3526           0 :                 struct btrfs_backref_node *lower;
    3527             : 
    3528           0 :                 edge = list_first_entry(&pending_edge,
    3529             :                                 struct btrfs_backref_edge, list[UPPER]);
    3530           0 :                 list_del_init(&edge->list[UPPER]);
    3531           0 :                 upper = edge->node[UPPER];
    3532           0 :                 lower = edge->node[LOWER];
    3533             : 
    3534             :                 /* Parent is detached, no need to keep any edges */
    3535           0 :                 if (upper->detached) {
    3536           0 :                         list_del(&edge->list[LOWER]);
    3537           0 :                         btrfs_backref_free_edge(cache, edge);
    3538             : 
    3539             :                         /* Lower node is orphan, queue for cleanup */
    3540           0 :                         if (list_empty(&lower->upper))
    3541           0 :                                 list_add(&lower->list, useless_node);
    3542           0 :                         continue;
    3543             :                 }
    3544             : 
    3545             :                 /*
    3546             :                  * All new nodes added in current build_backref_tree() haven't
    3547             :                  * been linked to the cache rb tree.
    3548             :                  * So if we have upper->rb_node populated, this means a cache
    3549             :                  * hit. We only need to link the edge, as @upper and all its
    3550             :                  * parents have already been linked.
    3551             :                  */
    3552           0 :                 if (!RB_EMPTY_NODE(&upper->rb_node)) {
    3553           0 :                         if (upper->lowest) {
    3554           0 :                                 list_del_init(&upper->lower);
    3555           0 :                                 upper->lowest = 0;
    3556             :                         }
    3557             : 
    3558           0 :                         list_add_tail(&edge->list[UPPER], &upper->lower);
    3559           0 :                         continue;
    3560             :                 }
    3561             : 
    3562             :                 /* Sanity check, we shouldn't have any unchecked nodes */
    3563           0 :                 if (!upper->checked) {
    3564             :                         ASSERT(0);
    3565             :                         return -EUCLEAN;
    3566             :                 }
    3567             : 
    3568             :                 /* Sanity check, COW-only node has non-COW-only parent */
    3569           0 :                 if (start->cowonly != upper->cowonly) {
    3570             :                         ASSERT(0);
    3571             :                         return -EUCLEAN;
    3572             :                 }
    3573             : 
    3574             :                 /* Only cache non-COW-only (subvolume trees) tree blocks */
    3575           0 :                 if (!upper->cowonly) {
    3576           0 :                         rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
    3577             :                                                    &upper->rb_node);
    3578           0 :                         if (rb_node) {
    3579           0 :                                 btrfs_backref_panic(cache->fs_info,
    3580             :                                                 upper->bytenr, -EEXIST);
    3581             :                                 return -EUCLEAN;
    3582             :                         }
    3583             :                 }
    3584             : 
    3585           0 :                 list_add_tail(&edge->list[UPPER], &upper->lower);
    3586             : 
    3587             :                 /*
    3588             :                  * Also queue all the parent edges of this uncached node
    3589             :                  * to finish the upper linkage
    3590             :                  */
    3591           0 :                 list_for_each_entry(edge, &upper->upper, list[LOWER])
    3592           0 :                         list_add_tail(&edge->list[UPPER], &pending_edge);
    3593             :         }
    3594             :         return 0;
    3595             : }
    3596             : 
    3597           0 : void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
    3598             :                                  struct btrfs_backref_node *node)
    3599             : {
    3600           0 :         struct btrfs_backref_node *lower;
    3601           0 :         struct btrfs_backref_node *upper;
    3602           0 :         struct btrfs_backref_edge *edge;
    3603             : 
    3604           0 :         while (!list_empty(&cache->useless_node)) {
    3605           0 :                 lower = list_first_entry(&cache->useless_node,
    3606             :                                    struct btrfs_backref_node, list);
    3607           0 :                 list_del_init(&lower->list);
    3608             :         }
    3609           0 :         while (!list_empty(&cache->pending_edge)) {
    3610           0 :                 edge = list_first_entry(&cache->pending_edge,
    3611             :                                 struct btrfs_backref_edge, list[UPPER]);
    3612           0 :                 list_del(&edge->list[UPPER]);
    3613           0 :                 list_del(&edge->list[LOWER]);
    3614           0 :                 lower = edge->node[LOWER];
    3615           0 :                 upper = edge->node[UPPER];
    3616           0 :                 btrfs_backref_free_edge(cache, edge);
    3617             : 
    3618             :                 /*
    3619             :                  * Lower is no longer linked to any upper backref nodes and
    3620             :                  * isn't in the cache, we can free it ourselves.
    3621             :                  */
    3622           0 :                 if (list_empty(&lower->upper) &&
    3623           0 :                     RB_EMPTY_NODE(&lower->rb_node))
    3624           0 :                         list_add(&lower->list, &cache->useless_node);
    3625             : 
    3626           0 :                 if (!RB_EMPTY_NODE(&upper->rb_node))
    3627           0 :                         continue;
    3628             : 
    3629             :                 /* Add this guy's upper edges to the list to process */
    3630           0 :                 list_for_each_entry(edge, &upper->upper, list[LOWER])
    3631           0 :                         list_add_tail(&edge->list[UPPER],
    3632             :                                       &cache->pending_edge);
    3633           0 :                 if (list_empty(&upper->upper))
    3634           0 :                         list_add(&upper->list, &cache->useless_node);
    3635             :         }
    3636             : 
    3637           0 :         while (!list_empty(&cache->useless_node)) {
    3638           0 :                 lower = list_first_entry(&cache->useless_node,
    3639             :                                    struct btrfs_backref_node, list);
    3640           0 :                 list_del_init(&lower->list);
    3641           0 :                 if (lower == node)
    3642           0 :                         node = NULL;
    3643           0 :                 btrfs_backref_drop_node(cache, lower);
    3644             :         }
    3645             : 
    3646           0 :         btrfs_backref_cleanup_node(cache, node);
    3647           0 :         ASSERT(list_empty(&cache->useless_node) &&
    3648             :                list_empty(&cache->pending_edge));
    3649           0 : }

Generated by: LCOV version 1.14