Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 : * Copyright (c) 2016-2018 Christoph Hellwig.
5 : * All Rights Reserved.
6 : */
7 : #include "xfs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_inode.h"
14 : #include "xfs_trans.h"
15 : #include "xfs_iomap.h"
16 : #include "xfs_trace.h"
17 : #include "xfs_bmap.h"
18 : #include "xfs_bmap_util.h"
19 : #include "xfs_reflink.h"
20 : #include "xfs_errortag.h"
21 : #include "xfs_error.h"
22 :
23 : struct xfs_writepage_ctx {
24 : struct iomap_writepage_ctx ctx;
25 : unsigned int data_seq;
26 : unsigned int cow_seq;
27 : };
28 :
29 : static inline struct xfs_writepage_ctx *
30 : XFS_WPC(struct iomap_writepage_ctx *ctx)
31 : {
32 : return container_of(ctx, struct xfs_writepage_ctx, ctx);
33 : }
34 :
35 : /*
36 : * Fast and loose check if this write could update the on-disk inode size.
37 : */
38 : static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
39 : {
40 83891272 : return ioend->io_offset + ioend->io_size >
41 83891272 : XFS_I(ioend->io_inode)->i_disk_size;
42 : }
43 :
44 : /*
45 : * Update on-disk file size now that data has been written to disk.
46 : */
47 : int
48 14960745 : xfs_setfilesize(
49 : struct xfs_inode *ip,
50 : xfs_off_t offset,
51 : size_t size)
52 : {
53 14960745 : struct xfs_mount *mp = ip->i_mount;
54 14960745 : struct xfs_trans *tp;
55 14960745 : xfs_fsize_t isize;
56 14960745 : int error;
57 :
58 14960745 : error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
59 14960747 : if (error)
60 : return error;
61 :
62 14960747 : xfs_ilock(ip, XFS_ILOCK_EXCL);
63 14960731 : isize = xfs_new_eof(ip, offset + size);
64 5791704 : if (!isize) {
65 9169027 : xfs_iunlock(ip, XFS_ILOCK_EXCL);
66 9169040 : xfs_trans_cancel(tp);
67 9169040 : return 0;
68 : }
69 :
70 5791704 : trace_xfs_setfilesize(ip, offset, size);
71 :
72 5791704 : ip->i_disk_size = isize;
73 5791704 : xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
74 5791704 : xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
75 :
76 5791704 : return xfs_trans_commit(tp);
77 : }
78 :
79 : /*
80 : * IO write completion.
81 : */
82 : STATIC void
83 39363658 : xfs_end_ioend(
84 : struct iomap_ioend *ioend)
85 : {
86 39363658 : struct xfs_inode *ip = XFS_I(ioend->io_inode);
87 39363658 : struct xfs_mount *mp = ip->i_mount;
88 39363658 : xfs_off_t offset = ioend->io_offset;
89 39363658 : size_t size = ioend->io_size;
90 39363658 : unsigned int nofs_flag;
91 39363658 : int error;
92 :
93 : /*
94 : * We can allocate memory here while doing writeback on behalf of
95 : * memory reclaim. To avoid memory allocation deadlocks set the
96 : * task-wide nofs context for the following operations.
97 : */
98 39363658 : nofs_flag = memalloc_nofs_save();
99 :
100 : /*
101 : * Just clean up the in-memory structures if the fs has been shut down.
102 : */
103 78727316 : if (xfs_is_shutdown(mp)) {
104 16718 : error = -EIO;
105 16718 : goto done;
106 : }
107 :
108 : /*
109 : * Clean up all COW blocks and underlying data fork delalloc blocks on
110 : * I/O error. The delalloc punch is required because this ioend was
111 : * mapped to blocks in the COW fork and the associated pages are no
112 : * longer dirty. If we don't remove delalloc blocks here, they become
113 : * stale and can corrupt free space accounting on unmount.
114 : */
115 39346940 : error = blk_status_to_errno(ioend->io_bio->bi_status);
116 39346957 : if (unlikely(error)) {
117 25829 : if (ioend->io_flags & IOMAP_F_SHARED) {
118 162 : xfs_reflink_cancel_cow_range(ip, offset, size, true);
119 162 : xfs_bmap_punch_delalloc_range(ip, offset,
120 162 : offset + size);
121 : }
122 25829 : goto done;
123 : }
124 :
125 : /*
126 : * Success: commit the COW or unwritten blocks if needed.
127 : */
128 39321128 : if (ioend->io_flags & IOMAP_F_SHARED)
129 611147 : error = xfs_reflink_end_cow(ip, offset, size);
130 38709981 : else if (ioend->io_type == IOMAP_UNWRITTEN)
131 32365391 : error = xfs_iomap_write_unwritten(ip, offset, size, false);
132 :
133 39321121 : if (!error && xfs_ioend_is_append(ioend))
134 14794552 : error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
135 24526569 : done:
136 39363665 : iomap_finish_ioends(ioend, error);
137 39363616 : memalloc_nofs_restore(nofs_flag);
138 39363616 : }
139 :
140 : /*
141 : * Finish all pending IO completions that require transactional modifications.
142 : *
143 : * We try to merge physical and logically contiguous ioends before completion to
144 : * minimise the number of transactions we need to perform during IO completion.
145 : * Both unwritten extent conversion and COW remapping need to iterate and modify
146 : * one physical extent at a time, so we gain nothing by merging physically
147 : * discontiguous extents here.
148 : *
149 : * The ioend chain length that we can be processing here is largely unbound in
150 : * length and we may have to perform significant amounts of work on each ioend
151 : * to complete it. Hence we have to be careful about holding the CPU for too
152 : * long in this loop.
153 : */
154 : void
155 32530937 : xfs_end_io(
156 : struct work_struct *work)
157 : {
158 32530937 : struct xfs_inode *ip =
159 32530937 : container_of(work, struct xfs_inode, i_ioend_work);
160 32530937 : struct iomap_ioend *ioend;
161 32530937 : struct list_head tmp;
162 32530937 : unsigned long flags;
163 :
164 32530937 : spin_lock_irqsave(&ip->i_ioend_lock, flags);
165 32530937 : list_replace_init(&ip->i_ioend_list, &tmp);
166 32530937 : spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
167 :
168 32530937 : iomap_sort_ioends(&tmp);
169 71894604 : while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
170 : io_list))) {
171 39363667 : list_del_init(&ioend->io_list);
172 39363666 : iomap_ioend_try_merge(ioend, &tmp);
173 39363668 : xfs_end_ioend(ioend);
174 39363635 : cond_resched();
175 : }
176 32530937 : }
177 :
178 : STATIC void
179 39368454 : xfs_end_bio(
180 : struct bio *bio)
181 : {
182 39368454 : struct iomap_ioend *ioend = bio->bi_private;
183 39368454 : struct xfs_inode *ip = XFS_I(ioend->io_inode);
184 39368454 : unsigned long flags;
185 :
186 39368454 : spin_lock_irqsave(&ip->i_ioend_lock, flags);
187 39368454 : if (list_empty(&ip->i_ioend_list))
188 32530937 : WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
189 : &ip->i_ioend_work));
190 39368454 : list_add_tail(&ioend->io_list, &ip->i_ioend_list);
191 39368454 : spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
192 39368454 : }
193 :
194 : /*
195 : * Fast revalidation of the cached writeback mapping. Return true if the current
196 : * mapping is valid, false otherwise.
197 : */
198 : static bool
199 601175038 : xfs_imap_valid(
200 : struct iomap_writepage_ctx *wpc,
201 : struct xfs_inode *ip,
202 : loff_t offset)
203 : {
204 601175038 : if (offset < wpc->iomap.offset ||
205 601175038 : offset >= wpc->iomap.offset + wpc->iomap.length)
206 : return false;
207 : /*
208 : * If this is a COW mapping, it is sufficient to check that the mapping
209 : * covers the offset. Be careful to check this first because the caller
210 : * can revalidate a COW mapping without updating the data seqno.
211 : */
212 505182469 : if (wpc->iomap.flags & IOMAP_F_SHARED)
213 : return true;
214 :
215 : /*
216 : * This is not a COW mapping. Check the sequence number of the data fork
217 : * because concurrent changes could have invalidated the extent. Check
218 : * the COW fork because concurrent changes since the last time we
219 : * checked (and found nothing at this offset) could have added
220 : * overlapping blocks.
221 : */
222 496219561 : if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) {
223 78289 : trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap,
224 : XFS_WPC(wpc)->data_seq, XFS_DATA_FORK);
225 78289 : return false;
226 : }
227 992282544 : if (xfs_inode_has_cow_data(ip) &&
228 39045291 : XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) {
229 37681081 : trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap,
230 : XFS_WPC(wpc)->cow_seq, XFS_COW_FORK);
231 37681081 : return false;
232 : }
233 : return true;
234 : }
235 :
236 : /*
237 : * Pass in a dellalloc extent and convert it to real extents, return the real
238 : * extent that maps offset_fsb in wpc->iomap.
239 : *
240 : * The current page is held locked so nothing could have removed the block
241 : * backing offset_fsb, although it could have moved from the COW to the data
242 : * fork by another thread.
243 : */
244 : static int
245 17091987 : xfs_convert_blocks(
246 : struct iomap_writepage_ctx *wpc,
247 : struct xfs_inode *ip,
248 : int whichfork,
249 : loff_t offset)
250 : {
251 17091987 : int error;
252 17091987 : unsigned *seq;
253 :
254 17091987 : if (whichfork == XFS_COW_FORK)
255 604452 : seq = &XFS_WPC(wpc)->cow_seq;
256 : else
257 16487535 : seq = &XFS_WPC(wpc)->data_seq;
258 :
259 : /*
260 : * Attempt to allocate whatever delalloc extent currently backs offset
261 : * and put the result into wpc->iomap. Allocate in a loop because it
262 : * may take several attempts to allocate real blocks for a contiguous
263 : * delalloc extent if free space is sufficiently fragmented.
264 : */
265 17093657 : do {
266 17093657 : error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
267 : &wpc->iomap, seq);
268 17093586 : if (error)
269 1433 : return error;
270 17092153 : } while (wpc->iomap.offset + wpc->iomap.length <= offset);
271 :
272 : return 0;
273 : }
274 :
275 : static int
276 534769593 : xfs_map_blocks(
277 : struct iomap_writepage_ctx *wpc,
278 : struct inode *inode,
279 : loff_t offset)
280 : {
281 534769593 : struct xfs_inode *ip = XFS_I(inode);
282 534769593 : struct xfs_mount *mp = ip->i_mount;
283 534769593 : ssize_t count = i_blocksize(inode);
284 534769593 : xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
285 534769593 : xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
286 534769593 : xfs_fileoff_t cow_fsb;
287 534769593 : int whichfork;
288 534769593 : struct xfs_bmbt_irec imap;
289 534769593 : struct xfs_iext_cursor icur;
290 534769593 : int retries = 0;
291 534769593 : int error = 0;
292 :
293 1069539186 : if (xfs_is_shutdown(mp))
294 : return -EIO;
295 :
296 534600664 : XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS);
297 :
298 : /*
299 : * COW fork blocks can overlap data fork blocks even if the blocks
300 : * aren't shared. COW I/O always takes precedent, so we must always
301 : * check for overlap on reflink inodes unless the mapping is already a
302 : * COW one, or the COW fork hasn't changed from the last time we looked
303 : * at it.
304 : *
305 : * It's safe to check the COW fork if_seq here without the ILOCK because
306 : * we've indirectly protected against concurrent updates: writeback has
307 : * the page locked, which prevents concurrent invalidations by reflink
308 : * and directio and prevents concurrent buffered writes to the same
309 : * page. Changes to if_seq always happen under i_lock, which protects
310 : * against concurrent updates and provides a memory barrier on the way
311 : * out that ensures that we always see the current value.
312 : */
313 534598772 : if (xfs_imap_valid(wpc, ip, offset))
314 : return 0;
315 :
316 : /*
317 : * If we don't have a valid map, now it's time to get a new one for this
318 : * offset. This will convert delayed allocations (including COW ones)
319 : * into real extents. If we return without a valid map, it means we
320 : * landed in a hole and we skip the block.
321 : */
322 67178248 : retry:
323 67178248 : cow_fsb = NULLFILEOFF;
324 67178248 : whichfork = XFS_DATA_FORK;
325 67178248 : xfs_ilock(ip, XFS_ILOCK_SHARED);
326 67178271 : ASSERT(!xfs_need_iread_extents(&ip->i_df));
327 :
328 : /*
329 : * Check if this is offset is covered by a COW extents, and if yes use
330 : * it directly instead of looking up anything in the data fork.
331 : */
332 157194739 : if (xfs_inode_has_cow_data(ip) &&
333 22838347 : xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
334 7448438 : cow_fsb = imap.br_startoff;
335 67178206 : if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
336 604558 : XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
337 604558 : xfs_iunlock(ip, XFS_ILOCK_SHARED);
338 :
339 604508 : whichfork = XFS_COW_FORK;
340 604508 : goto allocate_blocks;
341 : }
342 :
343 : /*
344 : * No COW extent overlap. Revalidate now that we may have updated
345 : * ->cow_seq. If the data mapping is still valid, we're done.
346 : */
347 66573648 : if (xfs_imap_valid(wpc, ip, offset)) {
348 0 : xfs_iunlock(ip, XFS_ILOCK_SHARED);
349 0 : return 0;
350 : }
351 :
352 : /*
353 : * If we don't have a valid map, now it's time to get a new one for this
354 : * offset. This will convert delayed allocations (including COW ones)
355 : * into real extents.
356 : */
357 66573865 : if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
358 1606181 : imap.br_startoff = end_fsb; /* fake a hole past EOF */
359 66573869 : XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
360 66573869 : xfs_iunlock(ip, XFS_ILOCK_SHARED);
361 :
362 : /* landed in a hole or beyond EOF? */
363 66573877 : if (imap.br_startoff > offset_fsb) {
364 5156885 : imap.br_blockcount = imap.br_startoff - offset_fsb;
365 5156885 : imap.br_startoff = offset_fsb;
366 5156885 : imap.br_startblock = HOLESTARTBLOCK;
367 5156885 : imap.br_state = XFS_EXT_NORM;
368 : }
369 :
370 : /*
371 : * Truncate to the next COW extent if there is one. This is the only
372 : * opportunity to do this because we can skip COW fork lookups for the
373 : * subsequent blocks in the mapping; however, the requirement to treat
374 : * the COW range separately remains.
375 : */
376 66573877 : if (cow_fsb != NULLFILEOFF &&
377 6843881 : cow_fsb < imap.br_startoff + imap.br_blockcount)
378 629986 : imap.br_blockcount = cow_fsb - imap.br_startoff;
379 :
380 : /* got a delalloc extent? */
381 66573877 : if (imap.br_startblock != HOLESTARTBLOCK &&
382 : isnullstartblock(imap.br_startblock))
383 16487528 : goto allocate_blocks;
384 :
385 50086374 : xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
386 50086353 : trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
387 50086353 : return 0;
388 17092011 : allocate_blocks:
389 17092011 : error = xfs_convert_blocks(wpc, ip, whichfork, offset);
390 17092004 : if (error) {
391 : /*
392 : * If we failed to find the extent in the COW fork we might have
393 : * raced with a COW to data fork conversion or truncate.
394 : * Restart the lookup to catch the extent in the data fork for
395 : * the former case, but prevent additional retries to avoid
396 : * looping forever for the latter case.
397 : */
398 1433 : if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
399 0 : goto retry;
400 1433 : ASSERT(error != -EAGAIN);
401 1433 : return error;
402 : }
403 :
404 : /*
405 : * Due to merging the return real extent might be larger than the
406 : * original delalloc one. Trim the return extent to the next COW
407 : * boundary again to force a re-lookup.
408 : */
409 17090571 : if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
410 32678 : loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
411 :
412 32678 : if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
413 3899 : wpc->iomap.length = cow_offset - wpc->iomap.offset;
414 : }
415 :
416 17090571 : ASSERT(wpc->iomap.offset <= offset);
417 17090571 : ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
418 17090571 : trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
419 17090571 : return 0;
420 : }
421 :
422 : static int
423 44570769 : xfs_prepare_ioend(
424 : struct iomap_ioend *ioend,
425 : int status)
426 : {
427 44570769 : unsigned int nofs_flag;
428 :
429 : /*
430 : * We can allocate memory here while doing writeback on behalf of
431 : * memory reclaim. To avoid memory allocation deadlocks set the
432 : * task-wide nofs context for the following operations.
433 : */
434 44570769 : nofs_flag = memalloc_nofs_save();
435 :
436 : /* Convert CoW extents to regular */
437 44570769 : if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
438 611515 : status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
439 611515 : ioend->io_offset, ioend->io_size);
440 : }
441 :
442 44570550 : memalloc_nofs_restore(nofs_flag);
443 :
444 : /* send ioends that might require a transaction to the completion wq */
445 44570550 : if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
446 5202511 : (ioend->io_flags & IOMAP_F_SHARED))
447 39368039 : ioend->io_bio->bi_end_io = xfs_end_bio;
448 44570550 : return status;
449 : }
450 :
451 : /*
452 : * If the folio has delalloc blocks on it, the caller is asking us to punch them
453 : * out. If we don't, we can leave a stale delalloc mapping covered by a clean
454 : * page that needs to be dirtied again before the delalloc mapping can be
455 : * converted. This stale delalloc mapping can trip up a later direct I/O read
456 : * operation on the same region.
457 : *
458 : * We prevent this by truncating away the delalloc regions on the folio. Because
459 : * they are delalloc, we can do this without needing a transaction. Indeed - if
460 : * we get ENOSPC errors, we have to be able to do this truncation without a
461 : * transaction as there is no space left for block reservation (typically why
462 : * we see a ENOSPC in writeback).
463 : */
464 : static void
465 173862 : xfs_discard_folio(
466 : struct folio *folio,
467 : loff_t pos)
468 : {
469 173862 : struct xfs_inode *ip = XFS_I(folio->mapping->host);
470 173862 : struct xfs_mount *mp = ip->i_mount;
471 173862 : int error;
472 :
473 347724 : if (xfs_is_shutdown(mp))
474 : return;
475 :
476 840 : xfs_alert_ratelimited(mp,
477 : "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
478 : folio, ip->i_ino, pos);
479 :
480 : /*
481 : * The end of the punch range is always the offset of the the first
482 : * byte of the next folio. Hence the end offset is only dependent on the
483 : * folio itself and not the start offset that is passed in.
484 : */
485 840 : error = xfs_bmap_punch_delalloc_range(ip, pos,
486 841 : folio_pos(folio) + folio_size(folio));
487 :
488 840 : if (error && !xfs_is_shutdown(mp))
489 0 : xfs_alert(mp, "page discard unable to remove delalloc mapping.");
490 : }
491 :
492 : static const struct iomap_writeback_ops xfs_writeback_ops = {
493 : .map_blocks = xfs_map_blocks,
494 : .prepare_ioend = xfs_prepare_ioend,
495 : .discard_folio = xfs_discard_folio,
496 : };
497 :
498 : /*
499 : * Extend the writeback range to allocation unit granularity and alignment.
500 : * This is a requirement for blocksize > pagesize scenarios such as realtime
501 : * copy on write, since we can only share full rt extents.
502 : */
503 : static inline void
504 0 : xfs_vm_writepages_extend(
505 : struct xfs_inode *ip,
506 : struct writeback_control *wbc)
507 : {
508 0 : unsigned int bsize = xfs_inode_alloc_unitsize(ip);
509 0 : long long int pages_to_write;
510 0 : loff_t next = wbc->range_end + 1;
511 :
512 0 : wbc->range_start = rounddown_64(wbc->range_start, bsize);
513 0 : if (wbc->range_end != LLONG_MAX)
514 0 : wbc->range_end = roundup_64(next, bsize) - 1;
515 :
516 0 : if (wbc->nr_to_write != LONG_MAX) {
517 0 : pgoff_t pg_start = wbc->range_start >> PAGE_SHIFT;
518 0 : pgoff_t pg_next = (wbc->range_end + 1) >> PAGE_SHIFT;
519 :
520 0 : pages_to_write = pg_next - pg_start;
521 0 : if (pages_to_write >= LONG_MAX)
522 : pages_to_write = LONG_MAX;
523 0 : if (wbc->nr_to_write < pages_to_write)
524 0 : wbc->nr_to_write = pages_to_write;
525 : }
526 :
527 0 : trace_xfs_vm_writepages_extend(ip, wbc);
528 0 : }
529 :
530 : STATIC int
531 44377558 : xfs_vm_writepages(
532 : struct address_space *mapping,
533 : struct writeback_control *wbc)
534 : {
535 44377558 : struct xfs_writepage_ctx wpc = { };
536 44377558 : struct xfs_inode *ip = XFS_I(mapping->host);
537 :
538 : /*
539 : * Writing back data in a transaction context can result in recursive
540 : * transactions. This is bad, so issue a warning and get out of here.
541 : */
542 44377558 : if (WARN_ON_ONCE(current->journal_info))
543 : return 0;
544 :
545 44377558 : trace_xfs_vm_writepages(ip, wbc);
546 :
547 44377368 : if (xfs_inode_needs_cow_around(ip))
548 0 : xfs_vm_writepages_extend(ip, wbc);
549 :
550 44377488 : xfs_iflags_clear(ip, XFS_ITRUNCATED);
551 44377406 : return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
552 : }
553 :
554 : STATIC int
555 0 : xfs_dax_writepages(
556 : struct address_space *mapping,
557 : struct writeback_control *wbc)
558 : {
559 0 : struct xfs_inode *ip = XFS_I(mapping->host);
560 :
561 0 : trace_xfs_dax_writepages(ip, wbc);
562 :
563 0 : if (xfs_inode_needs_cow_around(ip))
564 0 : xfs_vm_writepages_extend(ip, wbc);
565 :
566 0 : xfs_iflags_clear(ip, XFS_ITRUNCATED);
567 0 : return dax_writeback_mapping_range(mapping,
568 : xfs_inode_buftarg(ip)->bt_daxdev, wbc);
569 : }
570 :
571 : STATIC sector_t
572 3844 : xfs_vm_bmap(
573 : struct address_space *mapping,
574 : sector_t block)
575 : {
576 3844 : struct xfs_inode *ip = XFS_I(mapping->host);
577 :
578 3844 : trace_xfs_vm_bmap(ip);
579 :
580 : /*
581 : * The swap code (ab-)uses ->bmap to get a block mapping and then
582 : * bypasses the file system for actual I/O. We really can't allow
583 : * that on reflinks inodes, so we have to skip out here. And yes,
584 : * 0 is the magic code for a bmap error.
585 : *
586 : * Since we don't pass back blockdev info, we can't return bmap
587 : * information for rt files either.
588 : */
589 3844 : if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
590 : return 0;
591 3844 : return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
592 : }
593 :
594 : STATIC int
595 285128383 : xfs_vm_read_folio(
596 : struct file *unused,
597 : struct folio *folio)
598 : {
599 285128383 : return iomap_read_folio(folio, &xfs_read_iomap_ops);
600 : }
601 :
602 : STATIC void
603 13182578 : xfs_vm_readahead(
604 : struct readahead_control *rac)
605 : {
606 13182578 : iomap_readahead(rac, &xfs_read_iomap_ops);
607 13182600 : }
608 :
609 : static int
610 66 : xfs_iomap_swapfile_activate(
611 : struct swap_info_struct *sis,
612 : struct file *swap_file,
613 : sector_t *span)
614 : {
615 66 : struct xfs_inode *ip = XFS_I(file_inode(swap_file));
616 66 : struct xfs_buftarg *btp = xfs_inode_buftarg(ip);
617 :
618 66 : sis->bdev = xfs_buftarg_bdev(btp);
619 66 : return iomap_swapfile_activate(sis, swap_file, span,
620 : &xfs_read_iomap_ops);
621 : }
622 :
623 : const struct address_space_operations xfs_address_space_operations = {
624 : .read_folio = xfs_vm_read_folio,
625 : .readahead = xfs_vm_readahead,
626 : .writepages = xfs_vm_writepages,
627 : .dirty_folio = filemap_dirty_folio,
628 : .release_folio = iomap_release_folio,
629 : .invalidate_folio = iomap_invalidate_folio,
630 : .bmap = xfs_vm_bmap,
631 : .migrate_folio = filemap_migrate_folio,
632 : .is_partially_uptodate = iomap_is_partially_uptodate,
633 : .error_remove_page = generic_error_remove_page,
634 : .swap_activate = xfs_iomap_swapfile_activate,
635 : };
636 :
637 : const struct address_space_operations xfs_dax_aops = {
638 : .writepages = xfs_dax_writepages,
639 : .dirty_folio = noop_dirty_folio,
640 : .swap_activate = xfs_iomap_swapfile_activate,
641 : };
|