LCOV - code coverage report
Current view: top level - include/linux - buffer_head.h (source / functions) Hit Total Coverage
Test: fstests of 6.5.0-rc4-xfsx @ Mon Jul 31 20:08:34 PDT 2023 Lines: 72 77 93.5 %
Date: 2023-07-31 20:08:34 Functions: 7 7 100.0 %

          Line data    Source code
       1             : /* SPDX-License-Identifier: GPL-2.0 */
       2             : /*
       3             :  * include/linux/buffer_head.h
       4             :  *
       5             :  * Everything to do with buffer_heads.
       6             :  */
       7             : 
       8             : #ifndef _LINUX_BUFFER_HEAD_H
       9             : #define _LINUX_BUFFER_HEAD_H
      10             : 
      11             : #include <linux/types.h>
      12             : #include <linux/blk_types.h>
      13             : #include <linux/fs.h>
      14             : #include <linux/linkage.h>
      15             : #include <linux/pagemap.h>
      16             : #include <linux/wait.h>
      17             : #include <linux/atomic.h>
      18             : 
      19             : #ifdef CONFIG_BLOCK
      20             : 
      21             : enum bh_state_bits {
      22             :         BH_Uptodate,    /* Contains valid data */
      23             :         BH_Dirty,       /* Is dirty */
      24             :         BH_Lock,        /* Is locked */
      25             :         BH_Req,         /* Has been submitted for I/O */
      26             : 
      27             :         BH_Mapped,      /* Has a disk mapping */
      28             :         BH_New,         /* Disk mapping was newly created by get_block */
      29             :         BH_Async_Read,  /* Is under end_buffer_async_read I/O */
      30             :         BH_Async_Write, /* Is under end_buffer_async_write I/O */
      31             :         BH_Delay,       /* Buffer is not yet allocated on disk */
      32             :         BH_Boundary,    /* Block is followed by a discontiguity */
      33             :         BH_Write_EIO,   /* I/O error on write */
      34             :         BH_Unwritten,   /* Buffer is allocated on disk but not written */
      35             :         BH_Quiet,       /* Buffer Error Prinks to be quiet */
      36             :         BH_Meta,        /* Buffer contains metadata */
      37             :         BH_Prio,        /* Buffer should be submitted with REQ_PRIO */
      38             :         BH_Defer_Completion, /* Defer AIO completion to workqueue */
      39             : 
      40             :         BH_PrivateStart,/* not a state bit, but the first bit available
      41             :                          * for private allocation by other entities
      42             :                          */
      43             : };
      44             : 
      45             : #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
      46             : 
      47             : struct page;
      48             : struct buffer_head;
      49             : struct address_space;
      50             : typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
      51             : 
      52             : /*
      53             :  * Historically, a buffer_head was used to map a single block
      54             :  * within a page, and of course as the unit of I/O through the
      55             :  * filesystem and block layers.  Nowadays the basic I/O unit
      56             :  * is the bio, and buffer_heads are used for extracting block
      57             :  * mappings (via a get_block_t call), for tracking state within
      58             :  * a page (via a page_mapping) and for wrapping bio submission
      59             :  * for backward compatibility reasons (e.g. submit_bh).
      60             :  */
      61             : struct buffer_head {
      62             :         unsigned long b_state;          /* buffer state bitmap (see above) */
      63             :         struct buffer_head *b_this_page;/* circular list of page's buffers */
      64             :         union {
      65             :                 struct page *b_page;    /* the page this bh is mapped to */
      66             :                 struct folio *b_folio;  /* the folio this bh is mapped to */
      67             :         };
      68             : 
      69             :         sector_t b_blocknr;             /* start block number */
      70             :         size_t b_size;                  /* size of mapping */
      71             :         char *b_data;                   /* pointer to data within the page */
      72             : 
      73             :         struct block_device *b_bdev;
      74             :         bh_end_io_t *b_end_io;          /* I/O completion */
      75             :         void *b_private;                /* reserved for b_end_io */
      76             :         struct list_head b_assoc_buffers; /* associated with another mapping */
      77             :         struct address_space *b_assoc_map;      /* mapping this buffer is
      78             :                                                    associated with */
      79             :         atomic_t b_count;               /* users using this buffer_head */
      80             :         spinlock_t b_uptodate_lock;     /* Used by the first bh in a page, to
      81             :                                          * serialise IO completion of other
      82             :                                          * buffers in the page */
      83             : };
      84             : 
      85             : /*
      86             :  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
      87             :  * and buffer_foo() functions.
      88             :  * To avoid reset buffer flags that are already set, because that causes
      89             :  * a costly cache line transition, check the flag first.
      90             :  */
      91             : #define BUFFER_FNS(bit, name)                                           \
      92             : static __always_inline void set_buffer_##name(struct buffer_head *bh)   \
      93             : {                                                                       \
      94             :         if (!test_bit(BH_##bit, &(bh)->b_state))                 \
      95             :                 set_bit(BH_##bit, &(bh)->b_state);                       \
      96             : }                                                                       \
      97             : static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
      98             : {                                                                       \
      99             :         clear_bit(BH_##bit, &(bh)->b_state);                             \
     100             : }                                                                       \
     101             : static __always_inline int buffer_##name(const struct buffer_head *bh)  \
     102             : {                                                                       \
     103             :         return test_bit(BH_##bit, &(bh)->b_state);                       \
     104             : }
     105             : 
     106             : /*
     107             :  * test_set_buffer_foo() and test_clear_buffer_foo()
     108             :  */
     109             : #define TAS_BUFFER_FNS(bit, name)                                       \
     110             : static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
     111             : {                                                                       \
     112             :         return test_and_set_bit(BH_##bit, &(bh)->b_state);               \
     113             : }                                                                       \
     114             : static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
     115             : {                                                                       \
     116             :         return test_and_clear_bit(BH_##bit, &(bh)->b_state);             \
     117             : }                                                                       \
     118             : 
     119             : /*
     120             :  * Emit the buffer bitops functions.   Note that there are also functions
     121             :  * of the form "mark_buffer_foo()".  These are higher-level functions which
     122             :  * do something in addition to setting a b_state bit.
     123             :  */
     124   622826974 : BUFFER_FNS(Dirty, dirty)
     125    87877101 : TAS_BUFFER_FNS(Dirty, dirty)
     126    73898935 : BUFFER_FNS(Lock, locked)
     127      448187 : BUFFER_FNS(Req, req)
     128    22502708 : TAS_BUFFER_FNS(Req, req)
     129   531629201 : BUFFER_FNS(Mapped, mapped)
     130   469193907 : BUFFER_FNS(New, new)
     131      218869 : BUFFER_FNS(Async_Read, async_read)
     132   148468138 : BUFFER_FNS(Async_Write, async_write)
     133   217997891 : BUFFER_FNS(Delay, delay)
     134   141922743 : BUFFER_FNS(Boundary, boundary)
     135   109361934 : BUFFER_FNS(Write_EIO, write_io_error)
     136   116522879 : BUFFER_FNS(Unwritten, unwritten)
     137   153584802 : BUFFER_FNS(Meta, meta)
     138   153584857 : BUFFER_FNS(Prio, prio)
     139           0 : BUFFER_FNS(Defer_Completion, defer_completion)
     140             : 
     141             : static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
     142             : {
     143             :         /*
     144             :          * If somebody else already set this uptodate, they will
     145             :          * have done the memory barrier, and a reader will thus
     146             :          * see *some* valid buffer state.
     147             :          *
     148             :          * Any other serialization (with IO errors or whatever that
     149             :          * might clear the bit) has to come from other state (eg BH_Lock).
     150             :          */
     151   668293358 :         if (test_bit(BH_Uptodate, &bh->b_state))
     152             :                 return;
     153             : 
     154             :         /*
     155             :          * make it consistent with folio_mark_uptodate
     156             :          * pairs with smp_load_acquire in buffer_uptodate
     157             :          */
     158    55861487 :         smp_mb__before_atomic();
     159    55861487 :         set_bit(BH_Uptodate, &bh->b_state);
     160             : }
     161             : 
     162             : static __always_inline void clear_buffer_uptodate(struct buffer_head *bh)
     163             : {
     164      275988 :         clear_bit(BH_Uptodate, &bh->b_state);
     165          23 : }
     166             : 
     167             : static __always_inline int buffer_uptodate(const struct buffer_head *bh)
     168             : {
     169             :         /*
     170             :          * make it consistent with folio_test_uptodate
     171             :          * pairs with smp_mb__before_atomic in set_buffer_uptodate
     172             :          */
     173   953059376 :         return test_bit_acquire(BH_Uptodate, &bh->b_state);
     174             : }
     175             : 
     176             : #define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
     177             : 
     178             : /* If we *know* page->private refers to buffer_heads */
     179             : #define page_buffers(page)                                      \
     180             :         ({                                                      \
     181             :                 BUG_ON(!PagePrivate(page));                     \
     182             :                 ((struct buffer_head *)page_private(page));     \
     183             :         })
     184             : #define page_has_buffers(page)  PagePrivate(page)
     185             : #define folio_buffers(folio)            folio_get_private(folio)
     186             : 
     187             : void buffer_check_dirty_writeback(struct folio *folio,
     188             :                                      bool *dirty, bool *writeback);
     189             : 
     190             : /*
     191             :  * Declarations
     192             :  */
     193             : 
     194             : void mark_buffer_dirty(struct buffer_head *bh);
     195             : void mark_buffer_write_io_error(struct buffer_head *bh);
     196             : void touch_buffer(struct buffer_head *bh);
     197             : void set_bh_page(struct buffer_head *bh,
     198             :                 struct page *page, unsigned long offset);
     199             : void folio_set_bh(struct buffer_head *bh, struct folio *folio,
     200             :                   unsigned long offset);
     201             : bool try_to_free_buffers(struct folio *);
     202             : struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
     203             :                                         bool retry);
     204             : struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
     205             :                 bool retry);
     206             : void create_empty_buffers(struct page *, unsigned long,
     207             :                         unsigned long b_state);
     208             : void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
     209             :                                 unsigned long b_state);
     210             : void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
     211             : void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
     212             : void end_buffer_async_write(struct buffer_head *bh, int uptodate);
     213             : 
     214             : /* Things to do with buffers at mapping->private_list */
     215             : void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
     216             : int inode_has_buffers(struct inode *);
     217             : void invalidate_inode_buffers(struct inode *);
     218             : int remove_inode_buffers(struct inode *inode);
     219             : int sync_mapping_buffers(struct address_space *mapping);
     220             : int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
     221             :                                   bool datasync);
     222             : int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
     223             :                           bool datasync);
     224             : void clean_bdev_aliases(struct block_device *bdev, sector_t block,
     225             :                         sector_t len);
     226             : static inline void clean_bdev_bh_alias(struct buffer_head *bh)
     227             : {
     228    31815332 :         clean_bdev_aliases(bh->b_bdev, bh->b_blocknr, 1);
     229        1115 : }
     230             : 
     231             : void mark_buffer_async_write(struct buffer_head *bh);
     232             : void __wait_on_buffer(struct buffer_head *);
     233             : wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
     234             : struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
     235             :                         unsigned size);
     236             : struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
     237             :                                   unsigned size, gfp_t gfp);
     238             : void __brelse(struct buffer_head *);
     239             : void __bforget(struct buffer_head *);
     240             : void __breadahead(struct block_device *, sector_t block, unsigned int size);
     241             : struct buffer_head *__bread_gfp(struct block_device *,
     242             :                                 sector_t block, unsigned size, gfp_t gfp);
     243             : void invalidate_bh_lrus(void);
     244             : void invalidate_bh_lrus_cpu(void);
     245             : bool has_bh_in_lru(int cpu, void *dummy);
     246             : struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
     247             : void free_buffer_head(struct buffer_head * bh);
     248             : void unlock_buffer(struct buffer_head *bh);
     249             : void __lock_buffer(struct buffer_head *bh);
     250             : int sync_dirty_buffer(struct buffer_head *bh);
     251             : int __sync_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
     252             : void write_dirty_buffer(struct buffer_head *bh, blk_opf_t op_flags);
     253             : void submit_bh(blk_opf_t, struct buffer_head *);
     254             : void write_boundary_block(struct block_device *bdev,
     255             :                         sector_t bblock, unsigned blocksize);
     256             : int bh_uptodate_or_lock(struct buffer_head *bh);
     257             : int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
     258             : void __bh_read_batch(int nr, struct buffer_head *bhs[],
     259             :                      blk_opf_t op_flags, bool force_lock);
     260             : 
     261             : extern int buffer_heads_over_limit;
     262             : 
     263             : /*
     264             :  * Generic address_space_operations implementations for buffer_head-backed
     265             :  * address_spaces.
     266             :  */
     267             : void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
     268             : int block_write_full_page(struct page *page, get_block_t *get_block,
     269             :                                 struct writeback_control *wbc);
     270             : int __block_write_full_folio(struct inode *inode, struct folio *folio,
     271             :                         get_block_t *get_block, struct writeback_control *wbc,
     272             :                         bh_end_io_t *handler);
     273             : int block_read_full_folio(struct folio *, get_block_t *);
     274             : bool block_is_partially_uptodate(struct folio *, size_t from, size_t count);
     275             : int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
     276             :                 struct page **pagep, get_block_t *get_block);
     277             : int __block_write_begin(struct page *page, loff_t pos, unsigned len,
     278             :                 get_block_t *get_block);
     279             : int block_write_end(struct file *, struct address_space *,
     280             :                                 loff_t, unsigned, unsigned,
     281             :                                 struct page *, void *);
     282             : int generic_write_end(struct file *, struct address_space *,
     283             :                                 loff_t, unsigned, unsigned,
     284             :                                 struct page *, void *);
     285             : void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to);
     286             : void clean_page_buffers(struct page *page);
     287             : int cont_write_begin(struct file *, struct address_space *, loff_t,
     288             :                         unsigned, struct page **, void **,
     289             :                         get_block_t *, loff_t *);
     290             : int generic_cont_expand_simple(struct inode *inode, loff_t size);
     291             : int block_commit_write(struct page *page, unsigned from, unsigned to);
     292             : int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
     293             :                                 get_block_t get_block);
     294             : /* Convert errno to return value from ->page_mkwrite() call */
     295             : static inline vm_fault_t block_page_mkwrite_return(int err)
     296             : {
     297     8547348 :         if (err == 0)
     298             :                 return VM_FAULT_LOCKED;
     299       91944 :         if (err == -EFAULT || err == -EAGAIN)
     300             :                 return VM_FAULT_NOPAGE;
     301       78258 :         if (err == -ENOMEM)
     302           0 :                 return VM_FAULT_OOM;
     303             :         /* -ENOSPC, -EDQUOT, -EIO ... */
     304             :         return VM_FAULT_SIGBUS;
     305             : }
     306             : sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
     307             : int block_truncate_page(struct address_space *, loff_t, get_block_t *);
     308             : 
     309             : #ifdef CONFIG_MIGRATION
     310             : extern int buffer_migrate_folio(struct address_space *,
     311             :                 struct folio *dst, struct folio *src, enum migrate_mode);
     312             : extern int buffer_migrate_folio_norefs(struct address_space *,
     313             :                 struct folio *dst, struct folio *src, enum migrate_mode);
     314             : #else
     315             : #define buffer_migrate_folio NULL
     316             : #define buffer_migrate_folio_norefs NULL
     317             : #endif
     318             : 
     319             : void buffer_init(void);
     320             : 
     321             : /*
     322             :  * inline definitions
     323             :  */
     324             : 
     325             : static inline void get_bh(struct buffer_head *bh)
     326             : {
     327   247636664 :         atomic_inc(&bh->b_count);
     328     1881604 : }
     329             : 
     330             : static inline void put_bh(struct buffer_head *bh)
     331             : {
     332   255173174 :         smp_mb__before_atomic();
     333   255173174 :         atomic_dec(&bh->b_count);
     334       65890 : }
     335             : 
     336             : static inline void brelse(struct buffer_head *bh)
     337             : {
     338   192127600 :         if (bh)
     339   156866337 :                 __brelse(bh);
     340      460144 : }
     341             : 
     342             : static inline void bforget(struct buffer_head *bh)
     343             : {
     344           0 :         if (bh)
     345           0 :                 __bforget(bh);
     346             : }
     347             : 
     348             : static inline struct buffer_head *
     349             : sb_bread(struct super_block *sb, sector_t block)
     350             : {
     351             :         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
     352             : }
     353             : 
     354             : static inline struct buffer_head *
     355             : sb_bread_unmovable(struct super_block *sb, sector_t block)
     356             : {
     357             :         return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
     358             : }
     359             : 
     360             : static inline void
     361             : sb_breadahead(struct super_block *sb, sector_t block)
     362             : {
     363             :         __breadahead(sb->s_bdev, block, sb->s_blocksize);
     364             : }
     365             : 
     366             : static inline struct buffer_head *
     367             : sb_getblk(struct super_block *sb, sector_t block)
     368             : {
     369   111074304 :         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
     370             : }
     371             : 
     372             : 
     373             : static inline struct buffer_head *
     374             : sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
     375             : {
     376    32972411 :         return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
     377             : }
     378             : 
     379             : static inline struct buffer_head *
     380             : sb_find_get_block(struct super_block *sb, sector_t block)
     381             : {
     382      664233 :         return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
     383             : }
     384             : 
     385             : static inline void
     386    37572954 : map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
     387             : {
     388    37572954 :         set_buffer_mapped(bh);
     389    37653564 :         bh->b_bdev = sb->s_bdev;
     390    37653564 :         bh->b_blocknr = block;
     391    37653564 :         bh->b_size = sb->s_blocksize;
     392    37653564 : }
     393             : 
     394    10735570 : static inline void wait_on_buffer(struct buffer_head *bh)
     395             : {
     396    10735570 :         might_sleep();
     397    21472498 :         if (buffer_locked(bh))
     398     4231182 :                 __wait_on_buffer(bh);
     399    10736243 : }
     400             : 
     401    79670863 : static inline int trylock_buffer(struct buffer_head *bh)
     402             : {
     403    79670863 :         return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
     404             : }
     405             : 
     406    73605727 : static inline void lock_buffer(struct buffer_head *bh)
     407             : {
     408    73605727 :         might_sleep();
     409    73607095 :         if (!trylock_buffer(bh))
     410       80326 :                 __lock_buffer(bh);
     411    73611137 : }
     412             : 
     413             : static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
     414             :                                                    sector_t block,
     415             :                                                    unsigned size)
     416             : {
     417        2526 :         return __getblk_gfp(bdev, block, size, 0);
     418             : }
     419             : 
     420             : static inline struct buffer_head *__getblk(struct block_device *bdev,
     421             :                                            sector_t block,
     422             :                                            unsigned size)
     423             : {
     424      622591 :         return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
     425             : }
     426             : 
     427         498 : static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
     428             : {
     429         996 :         if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
     430          52 :                 if (!buffer_uptodate(bh))
     431          26 :                         __bh_read(bh, op_flags, false);
     432             :                 else
     433           0 :                         unlock_buffer(bh);
     434             :         }
     435         498 : }
     436             : 
     437     3626775 : static inline void bh_read_nowait(struct buffer_head *bh, blk_opf_t op_flags)
     438             : {
     439     3626775 :         if (!bh_uptodate_or_lock(bh))
     440     3622875 :                 __bh_read(bh, op_flags, false);
     441     3626777 : }
     442             : 
     443             : /* Returns 1 if buffer uptodated, 0 on success, and -EIO on error. */
     444        2526 : static inline int bh_read(struct buffer_head *bh, blk_opf_t op_flags)
     445             : {
     446        2526 :         if (bh_uptodate_or_lock(bh))
     447             :                 return 1;
     448        2509 :         return __bh_read(bh, op_flags, true);
     449             : }
     450             : 
     451             : static inline void bh_read_batch(int nr, struct buffer_head *bhs[])
     452             : {
     453             :         __bh_read_batch(nr, bhs, 0, true);
     454             : }
     455             : 
     456             : static inline void bh_readahead_batch(int nr, struct buffer_head *bhs[],
     457             :                                       blk_opf_t op_flags)
     458             : {
     459        6926 :         __bh_read_batch(nr, bhs, op_flags, false);
     460        1957 : }
     461             : 
     462             : /**
     463             :  *  __bread() - reads a specified block and returns the bh
     464             :  *  @bdev: the block_device to read from
     465             :  *  @block: number of block
     466             :  *  @size: size (in bytes) to read
     467             :  *
     468             :  *  Reads a specified block, and returns buffer head that contains it.
     469             :  *  The page cache is allocated from movable area so that it can be migrated.
     470             :  *  It returns NULL if the block was unreadable.
     471             :  */
     472             : static inline struct buffer_head *
     473             : __bread(struct block_device *bdev, sector_t block, unsigned size)
     474             : {
     475          12 :         return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
     476             : }
     477             : 
     478             : bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
     479             : 
     480             : #else /* CONFIG_BLOCK */
     481             : 
     482             : static inline void buffer_init(void) {}
     483             : static inline bool try_to_free_buffers(struct folio *folio) { return true; }
     484             : static inline int inode_has_buffers(struct inode *inode) { return 0; }
     485             : static inline void invalidate_inode_buffers(struct inode *inode) {}
     486             : static inline int remove_inode_buffers(struct inode *inode) { return 1; }
     487             : static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
     488             : static inline void invalidate_bh_lrus_cpu(void) {}
     489             : static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
     490             : #define buffer_heads_over_limit 0
     491             : 
     492             : #endif /* CONFIG_BLOCK */
     493             : #endif /* _LINUX_BUFFER_HEAD_H */

Generated by: LCOV version 1.14