LCOV - code coverage report
Current view: top level - fs - mbcache.c (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 156 167 93.4 %
Date: 2023-07-31 20:08:34 Functions: 16 17 94.1 %

          Line data    Source code
       1             : // SPDX-License-Identifier: GPL-2.0-only
       2             : #include <linux/spinlock.h>
       3             : #include <linux/slab.h>
       4             : #include <linux/list.h>
       5             : #include <linux/list_bl.h>
       6             : #include <linux/module.h>
       7             : #include <linux/sched.h>
       8             : #include <linux/workqueue.h>
       9             : #include <linux/mbcache.h>
      10             : 
      11             : /*
      12             :  * Mbcache is a simple key-value store. Keys need not be unique, however
      13             :  * key-value pairs are expected to be unique (we use this fact in
      14             :  * mb_cache_entry_delete_or_get()).
      15             :  *
      16             :  * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
      17             :  * Ext4 also uses it for deduplication of xattr values stored in inodes.
      18             :  * They use hash of data as a key and provide a value that may represent a
      19             :  * block or inode number. That's why keys need not be unique (hash of different
      20             :  * data may be the same). However user provided value always uniquely
      21             :  * identifies a cache entry.
      22             :  *
      23             :  * We provide functions for creation and removal of entries, search by key,
      24             :  * and a special "delete entry with given key-value pair" operation. Fixed
      25             :  * size hash table is used for fast key lookups.
      26             :  */
      27             : 
      28             : struct mb_cache {
      29             :         /* Hash table of entries */
      30             :         struct hlist_bl_head    *c_hash;
      31             :         /* log2 of hash table size */
      32             :         int                     c_bucket_bits;
      33             :         /* Maximum entries in cache to avoid degrading hash too much */
      34             :         unsigned long           c_max_entries;
      35             :         /* Protects c_list, c_entry_count */
      36             :         spinlock_t              c_list_lock;
      37             :         struct list_head        c_list;
      38             :         /* Number of entries in cache */
      39             :         unsigned long           c_entry_count;
      40             :         struct shrinker         c_shrink;
      41             :         /* Work for shrinking when the cache has too many entries */
      42             :         struct work_struct      c_shrink_work;
      43             : };
      44             : 
      45             : static struct kmem_cache *mb_entry_cache;
      46             : 
      47             : static unsigned long mb_cache_shrink(struct mb_cache *cache,
      48             :                                      unsigned long nr_to_scan);
      49             : 
      50             : static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
      51             :                                                         u32 key)
      52             : {
      53     1566339 :         return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
      54             : }
      55             : 
      56             : /*
      57             :  * Number of entries to reclaim synchronously when there are too many entries
      58             :  * in cache
      59             :  */
      60             : #define SYNC_SHRINK_BATCH 64
      61             : 
      62             : /*
      63             :  * mb_cache_entry_create - create entry in cache
      64             :  * @cache - cache where the entry should be created
      65             :  * @mask - gfp mask with which the entry should be allocated
      66             :  * @key - key of the entry
      67             :  * @value - value of the entry
      68             :  * @reusable - is the entry reusable by others?
      69             :  *
      70             :  * Creates entry in @cache with key @key and value @value. The function returns
      71             :  * -EBUSY if entry with the same key and value already exists in cache.
      72             :  * Otherwise 0 is returned.
      73             :  */
      74      270740 : int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
      75             :                           u64 value, bool reusable)
      76             : {
      77      270740 :         struct mb_cache_entry *entry, *dup;
      78      270740 :         struct hlist_bl_node *dup_node;
      79      270740 :         struct hlist_bl_head *head;
      80             : 
      81             :         /* Schedule background reclaim if there are too many entries */
      82      270740 :         if (cache->c_entry_count >= cache->c_max_entries)
      83           4 :                 schedule_work(&cache->c_shrink_work);
      84             :         /* Do some sync reclaim if background reclaim cannot keep up */
      85      270740 :         if (cache->c_entry_count >= 2*cache->c_max_entries)
      86           0 :                 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
      87             : 
      88      270740 :         entry = kmem_cache_alloc(mb_entry_cache, mask);
      89      270814 :         if (!entry)
      90             :                 return -ENOMEM;
      91             : 
      92      270814 :         INIT_LIST_HEAD(&entry->e_list);
      93             :         /*
      94             :          * We create entry with two references. One reference is kept by the
      95             :          * hash table, the other reference is used to protect us from
      96             :          * mb_cache_entry_delete_or_get() until the entry is fully setup. This
      97             :          * avoids nesting of cache->c_list_lock into hash table bit locks which
      98             :          * is problematic for RT.
      99             :          */
     100      270814 :         atomic_set(&entry->e_refcnt, 2);
     101      270814 :         entry->e_key = key;
     102      270814 :         entry->e_value = value;
     103      270814 :         entry->e_flags = 0;
     104      270814 :         if (reusable)
     105      270679 :                 set_bit(MBE_REUSABLE_B, &entry->e_flags);
     106      271075 :         head = mb_cache_entry_head(cache, key);
     107      270911 :         hlist_bl_lock(head);
     108     2043762 :         hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
     109     1874236 :                 if (dup->e_key == key && dup->e_value == value) {
     110      101346 :                         hlist_bl_unlock(head);
     111      101342 :                         kmem_cache_free(mb_entry_cache, entry);
     112      101342 :                         return -EBUSY;
     113             :                 }
     114             :         }
     115      169526 :         hlist_bl_add_head(&entry->e_hash_list, head);
     116      169477 :         hlist_bl_unlock(head);
     117      169480 :         spin_lock(&cache->c_list_lock);
     118      169611 :         list_add_tail(&entry->e_list, &cache->c_list);
     119      169611 :         cache->c_entry_count++;
     120      169611 :         spin_unlock(&cache->c_list_lock);
     121      169610 :         mb_cache_entry_put(cache, entry);
     122             : 
     123      169610 :         return 0;
     124             : }
     125             : EXPORT_SYMBOL(mb_cache_entry_create);
     126             : 
     127      169608 : void __mb_cache_entry_free(struct mb_cache *cache, struct mb_cache_entry *entry)
     128             : {
     129      169608 :         struct hlist_bl_head *head;
     130             : 
     131      169608 :         head = mb_cache_entry_head(cache, entry->e_key);
     132      169605 :         hlist_bl_lock(head);
     133      339210 :         hlist_bl_del(&entry->e_hash_list);
     134      169565 :         hlist_bl_unlock(head);
     135      169568 :         kmem_cache_free(mb_entry_cache, entry);
     136      169562 : }
     137             : EXPORT_SYMBOL(__mb_cache_entry_free);
     138             : 
     139             : /*
     140             :  * mb_cache_entry_wait_unused - wait to be the last user of the entry
     141             :  *
     142             :  * @entry - entry to work on
     143             :  *
     144             :  * Wait to be the last user of the entry.
     145             :  */
     146           9 : void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
     147             : {
     148          18 :         wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 2);
     149           9 : }
     150             : EXPORT_SYMBOL(mb_cache_entry_wait_unused);
     151             : 
     152      179753 : static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
     153             :                                            struct mb_cache_entry *entry,
     154             :                                            u32 key)
     155             : {
     156      179753 :         struct mb_cache_entry *old_entry = entry;
     157      179753 :         struct hlist_bl_node *node;
     158      179753 :         struct hlist_bl_head *head;
     159             : 
     160      179753 :         head = mb_cache_entry_head(cache, key);
     161      179730 :         hlist_bl_lock(head);
     162      179853 :         if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
     163           2 :                 node = entry->e_hash_list.next;
     164             :         else
     165      179851 :                 node = hlist_bl_first(head);
     166     1539597 :         while (node) {
     167     1363651 :                 entry = hlist_bl_entry(node, struct mb_cache_entry,
     168             :                                        e_hash_list);
     169     1367558 :                 if (entry->e_key == key &&
     170        7814 :                     test_bit(MBE_REUSABLE_B, &entry->e_flags) &&
     171             :                     atomic_inc_not_zero(&entry->e_refcnt))
     172        3907 :                         goto out;
     173     1359744 :                 node = node->next;
     174             :         }
     175             :         entry = NULL;
     176      179853 : out:
     177      179853 :         hlist_bl_unlock(head);
     178      179926 :         if (old_entry)
     179           2 :                 mb_cache_entry_put(cache, old_entry);
     180             : 
     181      179926 :         return entry;
     182             : }
     183             : 
     184             : /*
     185             :  * mb_cache_entry_find_first - find the first reusable entry with the given key
     186             :  * @cache: cache where we should search
     187             :  * @key: key to look for
     188             :  *
     189             :  * Search in @cache for a reusable entry with key @key. Grabs reference to the
     190             :  * first reusable entry found and returns the entry.
     191             :  */
     192      179739 : struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
     193             :                                                  u32 key)
     194             : {
     195      179739 :         return __entry_find(cache, NULL, key);
     196             : }
     197             : EXPORT_SYMBOL(mb_cache_entry_find_first);
     198             : 
     199             : /*
     200             :  * mb_cache_entry_find_next - find next reusable entry with the same key
     201             :  * @cache: cache where we should search
     202             :  * @entry: entry to start search from
     203             :  *
     204             :  * Finds next reusable entry in the hash chain which has the same key as @entry.
     205             :  * If @entry is unhashed (which can happen when deletion of entry races with the
     206             :  * search), finds the first reusable entry in the hash chain. The function drops
     207             :  * reference to @entry and returns with a reference to the found entry.
     208             :  */
     209           2 : struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
     210             :                                                 struct mb_cache_entry *entry)
     211             : {
     212           2 :         return __entry_find(cache, entry, entry->e_key);
     213             : }
     214             : EXPORT_SYMBOL(mb_cache_entry_find_next);
     215             : 
     216             : /*
     217             :  * mb_cache_entry_get - get a cache entry by value (and key)
     218             :  * @cache - cache we work with
     219             :  * @key - key
     220             :  * @value - value
     221             :  */
     222      162825 : struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
     223             :                                           u64 value)
     224             : {
     225      162825 :         struct hlist_bl_node *node;
     226      162825 :         struct hlist_bl_head *head;
     227      162825 :         struct mb_cache_entry *entry;
     228             : 
     229      162825 :         head = mb_cache_entry_head(cache, key);
     230      162832 :         hlist_bl_lock(head);
     231      892377 :         hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
     232     1017196 :                 if (entry->e_key == key && entry->e_value == value &&
     233             :                     atomic_inc_not_zero(&entry->e_refcnt))
     234      143888 :                         goto out;
     235             :         }
     236             :         entry = NULL;
     237      162968 : out:
     238      162968 :         hlist_bl_unlock(head);
     239      162894 :         return entry;
     240             : }
     241             : EXPORT_SYMBOL(mb_cache_entry_get);
     242             : 
     243             : /* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
     244             :  * @cache - cache we work with
     245             :  * @key - key
     246             :  * @value - value
     247             :  *
     248             :  * Remove entry from cache @cache with key @key and value @value. The removal
     249             :  * happens only if the entry is unused. The function returns NULL in case the
     250             :  * entry was successfully removed or there's no entry in cache. Otherwise the
     251             :  * function grabs reference of the entry that we failed to delete because it
     252             :  * still has users and return it.
     253             :  */
     254      162860 : struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
     255             :                                                     u32 key, u64 value)
     256             : {
     257      162860 :         struct mb_cache_entry *entry;
     258             : 
     259      162860 :         entry = mb_cache_entry_get(cache, key, value);
     260      162905 :         if (!entry)
     261             :                 return NULL;
     262             : 
     263             :         /*
     264             :          * Drop the ref we got from mb_cache_entry_get() and the initial hash
     265             :          * ref if we are the last user
     266             :          */
     267      143863 :         if (atomic_cmpxchg(&entry->e_refcnt, 2, 0) != 2)
     268             :                 return entry;
     269             : 
     270      143793 :         spin_lock(&cache->c_list_lock);
     271      143912 :         if (!list_empty(&entry->e_list))
     272      143912 :                 list_del_init(&entry->e_list);
     273      143912 :         cache->c_entry_count--;
     274      143912 :         spin_unlock(&cache->c_list_lock);
     275      143911 :         __mb_cache_entry_free(cache, entry);
     276      143911 :         return NULL;
     277             : }
     278             : EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
     279             : 
     280             : /* mb_cache_entry_touch - cache entry got used
     281             :  * @cache - cache the entry belongs to
     282             :  * @entry - entry that got used
     283             :  *
     284             :  * Marks entry as used to give hit higher chances of surviving in cache.
     285             :  */
     286        3905 : void mb_cache_entry_touch(struct mb_cache *cache,
     287             :                           struct mb_cache_entry *entry)
     288             : {
     289        3905 :         set_bit(MBE_REFERENCED_B, &entry->e_flags);
     290        3905 : }
     291             : EXPORT_SYMBOL(mb_cache_entry_touch);
     292             : 
     293         522 : static unsigned long mb_cache_count(struct shrinker *shrink,
     294             :                                     struct shrink_control *sc)
     295             : {
     296         522 :         struct mb_cache *cache = container_of(shrink, struct mb_cache,
     297             :                                               c_shrink);
     298             : 
     299         522 :         return cache->c_entry_count;
     300             : }
     301             : 
     302             : /* Shrink number of entries in cache */
     303           9 : static unsigned long mb_cache_shrink(struct mb_cache *cache,
     304             :                                      unsigned long nr_to_scan)
     305             : {
     306           9 :         struct mb_cache_entry *entry;
     307           9 :         unsigned long shrunk = 0;
     308             : 
     309           9 :         spin_lock(&cache->c_list_lock);
     310        4484 :         while (nr_to_scan-- && !list_empty(&cache->c_list)) {
     311        4475 :                 entry = list_first_entry(&cache->c_list,
     312             :                                          struct mb_cache_entry, e_list);
     313             :                 /* Drop initial hash reference if there is no user */
     314        8946 :                 if (test_bit(MBE_REFERENCED_B, &entry->e_flags) ||
     315             :                     atomic_cmpxchg(&entry->e_refcnt, 1, 0) != 1) {
     316           4 :                         clear_bit(MBE_REFERENCED_B, &entry->e_flags);
     317           4 :                         list_move_tail(&entry->e_list, &cache->c_list);
     318           4 :                         continue;
     319             :                 }
     320        4471 :                 list_del_init(&entry->e_list);
     321        4471 :                 cache->c_entry_count--;
     322        4471 :                 spin_unlock(&cache->c_list_lock);
     323        4471 :                 __mb_cache_entry_free(cache, entry);
     324        4471 :                 shrunk++;
     325        4471 :                 cond_resched();
     326        4471 :                 spin_lock(&cache->c_list_lock);
     327             :         }
     328           9 :         spin_unlock(&cache->c_list_lock);
     329             : 
     330           9 :         return shrunk;
     331             : }
     332             : 
     333           5 : static unsigned long mb_cache_scan(struct shrinker *shrink,
     334             :                                    struct shrink_control *sc)
     335             : {
     336           5 :         struct mb_cache *cache = container_of(shrink, struct mb_cache,
     337             :                                               c_shrink);
     338           5 :         return mb_cache_shrink(cache, sc->nr_to_scan);
     339             : }
     340             : 
     341             : /* We shrink 1/X of the cache when we have too many entries in it */
     342             : #define SHRINK_DIVISOR 16
     343             : 
     344           4 : static void mb_cache_shrink_worker(struct work_struct *work)
     345             : {
     346           4 :         struct mb_cache *cache = container_of(work, struct mb_cache,
     347             :                                               c_shrink_work);
     348           4 :         mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
     349           4 : }
     350             : 
     351             : /*
     352             :  * mb_cache_create - create cache
     353             :  * @bucket_bits: log2 of the hash table size
     354             :  *
     355             :  * Create cache for keys with 2^bucket_bits hash entries.
     356             :  */
     357        2546 : struct mb_cache *mb_cache_create(int bucket_bits)
     358             : {
     359        2546 :         struct mb_cache *cache;
     360        2546 :         unsigned long bucket_count = 1UL << bucket_bits;
     361        2546 :         unsigned long i;
     362             : 
     363        2546 :         cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
     364        2546 :         if (!cache)
     365           0 :                 goto err_out;
     366        2546 :         cache->c_bucket_bits = bucket_bits;
     367        2546 :         cache->c_max_entries = bucket_count << 4;
     368        2546 :         INIT_LIST_HEAD(&cache->c_list);
     369        2546 :         spin_lock_init(&cache->c_list_lock);
     370        2546 :         cache->c_hash = kmalloc_array(bucket_count,
     371             :                                       sizeof(struct hlist_bl_head),
     372             :                                       GFP_KERNEL);
     373        2546 :         if (!cache->c_hash) {
     374           0 :                 kfree(cache);
     375           0 :                 goto err_out;
     376             :         }
     377     2609650 :         for (i = 0; i < bucket_count; i++)
     378     2607104 :                 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
     379             : 
     380        2546 :         cache->c_shrink.count_objects = mb_cache_count;
     381        2546 :         cache->c_shrink.scan_objects = mb_cache_scan;
     382        2546 :         cache->c_shrink.seeks = DEFAULT_SEEKS;
     383        2546 :         if (register_shrinker(&cache->c_shrink, "mbcache-shrinker")) {
     384           0 :                 kfree(cache->c_hash);
     385           0 :                 kfree(cache);
     386           0 :                 goto err_out;
     387             :         }
     388             : 
     389        2546 :         INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
     390             : 
     391        2546 :         return cache;
     392             : 
     393             : err_out:
     394             :         return NULL;
     395             : }
     396             : EXPORT_SYMBOL(mb_cache_create);
     397             : 
     398             : /*
     399             :  * mb_cache_destroy - destroy cache
     400             :  * @cache: the cache to destroy
     401             :  *
     402             :  * Free all entries in cache and cache itself. Caller must make sure nobody
     403             :  * (except shrinker) can reach @cache when calling this.
     404             :  */
     405        2546 : void mb_cache_destroy(struct mb_cache *cache)
     406             : {
     407        2546 :         struct mb_cache_entry *entry, *next;
     408             : 
     409        2546 :         unregister_shrinker(&cache->c_shrink);
     410             : 
     411             :         /*
     412             :          * We don't bother with any locking. Cache must not be used at this
     413             :          * point.
     414             :          */
     415       23774 :         list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
     416       21228 :                 list_del(&entry->e_list);
     417       21228 :                 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
     418       21228 :                 mb_cache_entry_put(cache, entry);
     419             :         }
     420        2546 :         kfree(cache->c_hash);
     421        2546 :         kfree(cache);
     422        2546 : }
     423             : EXPORT_SYMBOL(mb_cache_destroy);
     424             : 
     425          12 : static int __init mbcache_init(void)
     426             : {
     427          12 :         mb_entry_cache = kmem_cache_create("mbcache",
     428             :                                 sizeof(struct mb_cache_entry), 0,
     429             :                                 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
     430          12 :         if (!mb_entry_cache)
     431           0 :                 return -ENOMEM;
     432             :         return 0;
     433             : }
     434             : 
     435           0 : static void __exit mbcache_exit(void)
     436             : {
     437           0 :         kmem_cache_destroy(mb_entry_cache);
     438           0 : }
     439             : 
     440             : module_init(mbcache_init)
     441             : module_exit(mbcache_exit)
     442             : 
     443             : MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
     444             : MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
     445             : MODULE_LICENSE("GPL");

Generated by: LCOV version 1.14