Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * All Rights Reserved.
5 : */
6 : #ifndef __XFS_BUF_H__
7 : #define __XFS_BUF_H__
8 :
9 : #include <linux/list.h>
10 : #include <linux/types.h>
11 : #include <linux/spinlock.h>
12 : #include <linux/mm.h>
13 : #include <linux/fs.h>
14 : #include <linux/dax.h>
15 : #include <linux/uio.h>
16 : #include <linux/list_lru.h>
17 :
18 : extern struct kmem_cache *xfs_buf_cache;
19 :
20 : /*
21 : * Base types
22 : */
23 : struct xfs_buf;
24 : struct xfile;
25 :
26 : #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
27 :
28 : #define XBF_READ (1u << 0) /* buffer intended for reading from device */
29 : #define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
30 : #define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
31 : #define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
32 : #define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
33 : #define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
34 : #define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
35 : #define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
36 :
37 : /* buffer type flags for write callbacks */
38 : #define _XBF_INODES (1u << 16)/* inode buffer */
39 : #define _XBF_DQUOTS (1u << 17)/* dquot buffer */
40 : #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
41 :
42 : /* flags used only internally */
43 : #define _XBF_PAGES (1u << 20)/* backed by refcounted pages */
44 : #define _XBF_KMEM (1u << 21)/* backed by heap memory */
45 : #define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
46 : #ifdef CONFIG_XFS_IN_MEMORY_FILE
47 : # define _XBF_DIRECT_MAP (1u << 23)/* pages directly mapped to storage */
48 : #else
49 : # define _XBF_DIRECT_MAP (0)
50 : #endif
51 :
52 : /* flags used only as arguments to access routines */
53 : /*
54 : * Online fsck is scanning the buffer cache for live buffers. Do not warn
55 : * about length mismatches during lookups and do not return stale buffers.
56 : */
57 : #define XBF_LIVESCAN (1u << 28)
58 : #define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
59 : #define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
60 : #define XBF_UNMAPPED (1u << 31)/* do not map the buffer */
61 :
62 :
63 : typedef unsigned int xfs_buf_flags_t;
64 :
65 : #define XFS_BUF_FLAGS \
66 : { XBF_READ, "READ" }, \
67 : { XBF_WRITE, "WRITE" }, \
68 : { XBF_READ_AHEAD, "READ_AHEAD" }, \
69 : { XBF_NO_IOACCT, "NO_IOACCT" }, \
70 : { XBF_ASYNC, "ASYNC" }, \
71 : { XBF_DONE, "DONE" }, \
72 : { XBF_STALE, "STALE" }, \
73 : { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
74 : { _XBF_INODES, "INODES" }, \
75 : { _XBF_DQUOTS, "DQUOTS" }, \
76 : { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
77 : { _XBF_PAGES, "PAGES" }, \
78 : { _XBF_KMEM, "KMEM" }, \
79 : { _XBF_DELWRI_Q, "DELWRI_Q" }, \
80 : { _XBF_DIRECT_MAP, "DIRECT_MAP" }, \
81 : /* The following interface flags should never be set */ \
82 : { XBF_LIVESCAN, "LIVESCAN" }, \
83 : { XBF_INCORE, "INCORE" }, \
84 : { XBF_TRYLOCK, "TRYLOCK" }, \
85 : { XBF_UNMAPPED, "UNMAPPED" }
86 :
87 : /*
88 : * Internal state flags.
89 : */
90 : #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
91 : #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
92 :
93 : struct xfs_buf_cache {
94 : spinlock_t bc_lock;
95 : struct rhashtable bc_hash;
96 : };
97 :
98 : int xfs_buf_cache_init(struct xfs_buf_cache *bch);
99 : void xfs_buf_cache_destroy(struct xfs_buf_cache *bch);
100 :
101 : /*
102 : * The xfs_buftarg contains 2 notions of "sector size" -
103 : *
104 : * 1) The metadata sector size, which is the minimum unit and
105 : * alignment of IO which will be performed by metadata operations.
106 : * 2) The device logical sector size
107 : *
108 : * The first is specified at mkfs time, and is stored on-disk in the
109 : * superblock's sb_sectsize.
110 : *
111 : * The latter is derived from the underlying device, and controls direct IO
112 : * alignment constraints.
113 : */
114 : typedef struct xfs_buftarg {
115 : dev_t bt_dev;
116 : union {
117 : struct block_device *bt_bdev;
118 : struct xfile *bt_xfile;
119 : };
120 : struct dax_device *bt_daxdev;
121 : u64 bt_dax_part_off;
122 : struct xfs_mount *bt_mount;
123 : struct xfs_buf_cache *bt_cache;
124 : unsigned int bt_flags;
125 : unsigned int bt_meta_sectorsize;
126 : size_t bt_meta_sectormask;
127 : size_t bt_logical_sectorsize;
128 : size_t bt_logical_sectormask;
129 :
130 : /* LRU control structures */
131 : struct shrinker bt_shrinker;
132 : struct list_lru bt_lru;
133 :
134 : struct percpu_counter bt_io_count;
135 : struct ratelimit_state bt_ioerror_rl;
136 : } xfs_buftarg_t;
137 :
138 : #ifdef CONFIG_XFS_IN_MEMORY_FILE
139 : /* in-memory buftarg via bt_xfile */
140 : # define XFS_BUFTARG_XFILE (1U << 0)
141 : /*
142 : * Buffer pages are direct-mapped to the xfile; caller does not care about
143 : * transactional updates.
144 : */
145 : # define XFS_BUFTARG_DIRECT_MAP (1U << 1)
146 : #else
147 : # define XFS_BUFTARG_XFILE (0)
148 : # define XFS_BUFTARG_DIRECT_MAP (0)
149 : #endif
150 :
151 : #define XB_PAGES 2
152 :
153 : struct xfs_buf_map {
154 : xfs_daddr_t bm_bn; /* block number for I/O */
155 : int bm_len; /* size of I/O */
156 : unsigned int bm_flags;
157 : };
158 :
159 : /*
160 : * Online fsck is scanning the buffer cache for live buffers. Do not warn
161 : * about length mismatches during lookups and do not return stale buffers.
162 : */
163 : #define XBM_LIVESCAN (1U << 0)
164 :
165 : #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
166 : struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
167 :
168 : struct xfs_buf_ops {
169 : char *name;
170 : union {
171 : __be32 magic[2]; /* v4 and v5 on disk magic values */
172 : __be16 magic16[2]; /* v4 and v5 on disk magic values */
173 : };
174 : void (*verify_read)(struct xfs_buf *);
175 : void (*verify_write)(struct xfs_buf *);
176 : xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
177 : };
178 :
179 : struct xfs_buf {
180 : /*
181 : * first cacheline holds all the fields needed for an uncontended cache
182 : * hit to be fully processed. The semaphore straddles the cacheline
183 : * boundary, but the counter and lock sits on the first cacheline,
184 : * which is the only bit that is touched if we hit the semaphore
185 : * fast-path on locking.
186 : */
187 : struct rhash_head b_rhash_head; /* pag buffer hash node */
188 :
189 : xfs_daddr_t b_rhash_key; /* buffer cache index */
190 : int b_length; /* size of buffer in BBs */
191 : atomic_t b_hold; /* reference count */
192 : atomic_t b_lru_ref; /* lru reclaim ref count */
193 : xfs_buf_flags_t b_flags; /* status flags */
194 : struct semaphore b_sema; /* semaphore for lockables */
195 :
196 : /*
197 : * concurrent access to b_lru and b_lru_flags are protected by
198 : * bt_lru_lock and not by b_sema
199 : */
200 : struct list_head b_lru; /* lru list */
201 : spinlock_t b_lock; /* internal state lock */
202 : unsigned int b_state; /* internal state flags */
203 : int b_io_error; /* internal IO error state */
204 : wait_queue_head_t b_waiters; /* unpin waiters */
205 : struct list_head b_list;
206 : struct xfs_perag *b_pag; /* contains rbtree root */
207 : struct xfs_mount *b_mount;
208 : struct xfs_buftarg *b_target; /* buffer target (device) */
209 : void *b_addr; /* virtual address of buffer */
210 : struct work_struct b_ioend_work;
211 : struct completion b_iowait; /* queue for I/O waiters */
212 : struct xfs_buf_log_item *b_log_item;
213 : struct list_head b_li_list; /* Log items list head */
214 : struct xfs_trans *b_transp;
215 : struct page **b_pages; /* array of page pointers */
216 : struct page *b_page_array[XB_PAGES]; /* inline pages */
217 : struct xfs_buf_map *b_maps; /* compound buffer map */
218 : struct xfs_buf_map __b_map; /* inline compound buffer map */
219 : int b_map_count;
220 : atomic_t b_pin_count; /* pin count */
221 : atomic_t b_io_remaining; /* #outstanding I/O requests */
222 : unsigned int b_page_count; /* size of page array */
223 : unsigned int b_offset; /* page offset of b_addr,
224 : only for _XBF_KMEM buffers */
225 : int b_error; /* error code on I/O */
226 :
227 : /*
228 : * async write failure retry count. Initialised to zero on the first
229 : * failure, then when it exceeds the maximum configured without a
230 : * success the write is considered to be failed permanently and the
231 : * iodone handler will take appropriate action.
232 : *
233 : * For retry timeouts, we record the jiffie of the first failure. This
234 : * means that we can change the retry timeout for buffers already under
235 : * I/O and thus avoid getting stuck in a retry loop with a long timeout.
236 : *
237 : * last_error is used to ensure that we are getting repeated errors, not
238 : * different errors. e.g. a block device might change ENOSPC to EIO when
239 : * a failure timeout occurs, so we want to re-initialise the error
240 : * retry behaviour appropriately when that happens.
241 : */
242 : int b_retries;
243 : unsigned long b_first_retry_time; /* in jiffies */
244 : int b_last_error;
245 :
246 : const struct xfs_buf_ops *b_ops;
247 : struct xfs_buf_cache *b_cache;
248 : struct rcu_head b_rcu;
249 : };
250 :
251 : /* Finding and Reading Buffers */
252 : int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
253 : int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
254 : int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
255 : int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
256 : const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
257 : void xfs_buf_readahead_map(struct xfs_buftarg *target,
258 : struct xfs_buf_map *map, int nmaps,
259 : const struct xfs_buf_ops *ops);
260 :
261 : static inline int
262 424 : xfs_buf_incore(
263 : struct xfs_buftarg *target,
264 : xfs_daddr_t blkno,
265 : size_t numblks,
266 : xfs_buf_flags_t flags,
267 : struct xfs_buf **bpp)
268 : {
269 4034681 : DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
270 :
271 4034681 : return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
272 : }
273 :
274 : static inline int
275 51255 : xfs_buf_get(
276 : struct xfs_buftarg *target,
277 : xfs_daddr_t blkno,
278 : size_t numblks,
279 : struct xfs_buf **bpp)
280 : {
281 51255 : DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
282 :
283 51255 : return xfs_buf_get_map(target, &map, 1, 0, bpp);
284 : }
285 :
286 : static inline int
287 271706243 : xfs_buf_read(
288 : struct xfs_buftarg *target,
289 : xfs_daddr_t blkno,
290 : size_t numblks,
291 : xfs_buf_flags_t flags,
292 : struct xfs_buf **bpp,
293 : const struct xfs_buf_ops *ops)
294 : {
295 271706243 : DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
296 :
297 271706243 : return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
298 : __builtin_return_address(0));
299 : }
300 :
301 : static inline void
302 : xfs_buf_readahead(
303 : struct xfs_buftarg *target,
304 : xfs_daddr_t blkno,
305 : size_t numblks,
306 : const struct xfs_buf_ops *ops)
307 : {
308 4276914010 : DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
309 4276914010 : return xfs_buf_readahead_map(target, &map, 1, ops);
310 : }
311 :
312 : int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
313 : xfs_buf_flags_t flags, struct xfs_buf **bpp);
314 : int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
315 : size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
316 : const struct xfs_buf_ops *ops);
317 : int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
318 : void xfs_buf_hold(struct xfs_buf *bp);
319 :
320 : /* Releasing Buffers */
321 : extern void xfs_buf_rele(struct xfs_buf *);
322 :
323 : /* Locking and Unlocking Buffers */
324 : extern int xfs_buf_trylock(struct xfs_buf *);
325 : extern void xfs_buf_lock(struct xfs_buf *);
326 : extern void xfs_buf_unlock(struct xfs_buf *);
327 : #define xfs_buf_islocked(bp) \
328 : ((bp)->b_sema.count <= 0)
329 :
330 : static inline void xfs_buf_relse(struct xfs_buf *bp)
331 : {
332 18466536798 : xfs_buf_unlock(bp);
333 18465753890 : xfs_buf_rele(bp);
334 12818396762 : }
335 :
336 : /* Buffer Read and Write Routines */
337 : extern int xfs_bwrite(struct xfs_buf *bp);
338 :
339 : extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
340 : xfs_failaddr_t failaddr);
341 : #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
342 : extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
343 : void xfs_buf_ioend_fail(struct xfs_buf *);
344 : void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
345 : void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
346 : #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
347 :
348 : /* Buffer Utility Routines */
349 : extern void *xfs_buf_offset(struct xfs_buf *, size_t);
350 : extern void xfs_buf_stale(struct xfs_buf *bp);
351 :
352 : /* Delayed Write Buffer Routines */
353 : extern void xfs_buf_delwri_cancel(struct list_head *);
354 : extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
355 : void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl);
356 : extern int xfs_buf_delwri_submit(struct list_head *);
357 : extern int xfs_buf_delwri_submit_nowait(struct list_head *);
358 : extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
359 :
360 : static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
361 : {
362 >36138*10^7 : return bp->b_maps[0].bm_bn;
363 : }
364 :
365 : void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
366 :
367 : /*
368 : * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
369 : * up with a reference count of 0 so it will be tossed from the cache when
370 : * released.
371 : */
372 : static inline void xfs_buf_oneshot(struct xfs_buf *bp)
373 : {
374 154436 : if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
375 : return;
376 154436 : atomic_set(&bp->b_lru_ref, 0);
377 : }
378 :
379 : static inline int xfs_buf_ispinned(struct xfs_buf *bp)
380 : {
381 254791106 : return atomic_read(&bp->b_pin_count);
382 : }
383 :
384 : static inline int
385 : xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
386 : {
387 11596995 : return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
388 : cksum_offset);
389 : }
390 :
391 : static inline void
392 : xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
393 : {
394 52129036 : xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
395 : cksum_offset);
396 52128985 : }
397 :
398 : int xfs_buf_alloc_page_array(struct xfs_buf *bp, gfp_t gfp_mask);
399 : void xfs_buf_free_page_array(struct xfs_buf *bp);
400 :
401 : /*
402 : * Handling of buftargs.
403 : */
404 : struct xfs_buftarg *xfs_alloc_buftarg_common(struct xfs_mount *mp,
405 : const char *descr);
406 : struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
407 : struct block_device *bdev);
408 : extern void xfs_free_buftarg(struct xfs_buftarg *);
409 : extern void xfs_buftarg_wait(struct xfs_buftarg *);
410 : extern void xfs_buftarg_drain(struct xfs_buftarg *);
411 : extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
412 :
413 : static inline struct block_device *
414 : xfs_buftarg_bdev(struct xfs_buftarg *btp)
415 : {
416 332626701 : if (btp->bt_flags & XFS_BUFTARG_XFILE)
417 : return NULL;
418 332626701 : return btp->bt_bdev;
419 : }
420 :
421 : static inline unsigned int
422 : xfs_getsize_buftarg(struct xfs_buftarg *btp)
423 : {
424 24157 : if (btp->bt_flags & XFS_BUFTARG_XFILE)
425 : return SECTOR_SIZE;
426 24157 : return block_size(btp->bt_bdev);
427 : }
428 :
429 : static inline bool
430 232570 : xfs_readonly_buftarg(struct xfs_buftarg *btp)
431 : {
432 232570 : if (btp->bt_flags & XFS_BUFTARG_XFILE)
433 : return false;
434 232570 : return bdev_read_only(btp->bt_bdev);
435 : }
436 :
437 : static inline int
438 15486759 : xfs_buftarg_flush(struct xfs_buftarg *btp)
439 : {
440 15486759 : if (btp->bt_flags & XFS_BUFTARG_XFILE)
441 : return 0;
442 15486671 : return blkdev_issue_flush(btp->bt_bdev);
443 : }
444 :
445 : static inline int
446 0 : xfs_buftarg_zeroout(
447 : struct xfs_buftarg *btp,
448 : sector_t sector,
449 : sector_t nr_sects,
450 : gfp_t gfp_mask,
451 : unsigned flags)
452 : {
453 0 : if (btp->bt_flags & XFS_BUFTARG_XFILE)
454 : return -EOPNOTSUPP;
455 0 : return blkdev_issue_zeroout(btp->bt_bdev, sector, nr_sects, gfp_mask,
456 : flags);
457 : }
458 :
459 : xfs_daddr_t xfs_buftarg_nr_sectors(struct xfs_buftarg *btp);
460 :
461 : static inline bool
462 : xfs_buftarg_verify_daddr(
463 : struct xfs_buftarg *btp,
464 : xfs_daddr_t daddr)
465 : {
466 1336980121 : return daddr < xfs_buftarg_nr_sectors(btp);
467 : }
468 :
469 : int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
470 : bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
471 : bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
472 : bool xfs_buf_check_poisoned(struct xfs_buf *bp);
473 :
474 : #endif /* __XFS_BUF_H__ */
|