Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * linux/mm/filemap.c
4 : *
5 : * Copyright (C) 1994-1999 Linus Torvalds
6 : */
7 :
8 : /*
9 : * This file handles the generic file mmap semantics used by
10 : * most "normal" filesystems (but you don't /have/ to use this:
11 : * the NFS filesystem used to do this differently, for example)
12 : */
13 : #include <linux/export.h>
14 : #include <linux/compiler.h>
15 : #include <linux/dax.h>
16 : #include <linux/fs.h>
17 : #include <linux/sched/signal.h>
18 : #include <linux/uaccess.h>
19 : #include <linux/capability.h>
20 : #include <linux/kernel_stat.h>
21 : #include <linux/gfp.h>
22 : #include <linux/mm.h>
23 : #include <linux/swap.h>
24 : #include <linux/swapops.h>
25 : #include <linux/syscalls.h>
26 : #include <linux/mman.h>
27 : #include <linux/pagemap.h>
28 : #include <linux/file.h>
29 : #include <linux/uio.h>
30 : #include <linux/error-injection.h>
31 : #include <linux/hash.h>
32 : #include <linux/writeback.h>
33 : #include <linux/backing-dev.h>
34 : #include <linux/pagevec.h>
35 : #include <linux/security.h>
36 : #include <linux/cpuset.h>
37 : #include <linux/hugetlb.h>
38 : #include <linux/memcontrol.h>
39 : #include <linux/shmem_fs.h>
40 : #include <linux/rmap.h>
41 : #include <linux/delayacct.h>
42 : #include <linux/psi.h>
43 : #include <linux/ramfs.h>
44 : #include <linux/page_idle.h>
45 : #include <linux/migrate.h>
46 : #include <linux/pipe_fs_i.h>
47 : #include <linux/splice.h>
48 : #include <asm/pgalloc.h>
49 : #include <asm/tlbflush.h>
50 : #include "internal.h"
51 :
52 : #define CREATE_TRACE_POINTS
53 : #include <trace/events/filemap.h>
54 :
55 : /*
56 : * FIXME: remove all knowledge of the buffer layer from the core VM
57 : */
58 : #include <linux/buffer_head.h> /* for try_to_free_buffers */
59 :
60 : #include <asm/mman.h>
61 :
62 : #include "swap.h"
63 :
64 : /*
65 : * Shared mappings implemented 30.11.1994. It's not fully working yet,
66 : * though.
67 : *
68 : * Shared mappings now work. 15.8.1995 Bruno.
69 : *
70 : * finished 'unifying' the page and buffer cache and SMP-threaded the
71 : * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
72 : *
73 : * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
74 : */
75 :
76 : /*
77 : * Lock ordering:
78 : *
79 : * ->i_mmap_rwsem (truncate_pagecache)
80 : * ->private_lock (__free_pte->block_dirty_folio)
81 : * ->swap_lock (exclusive_swap_page, others)
82 : * ->i_pages lock
83 : *
84 : * ->i_rwsem
85 : * ->invalidate_lock (acquired by fs in truncate path)
86 : * ->i_mmap_rwsem (truncate->unmap_mapping_range)
87 : *
88 : * ->mmap_lock
89 : * ->i_mmap_rwsem
90 : * ->page_table_lock or pte_lock (various, mainly in memory.c)
91 : * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
92 : *
93 : * ->mmap_lock
94 : * ->invalidate_lock (filemap_fault)
95 : * ->lock_page (filemap_fault, access_process_vm)
96 : *
97 : * ->i_rwsem (generic_perform_write)
98 : * ->mmap_lock (fault_in_readable->do_page_fault)
99 : *
100 : * bdi->wb.list_lock
101 : * sb_lock (fs/fs-writeback.c)
102 : * ->i_pages lock (__sync_single_inode)
103 : *
104 : * ->i_mmap_rwsem
105 : * ->anon_vma.lock (vma_merge)
106 : *
107 : * ->anon_vma.lock
108 : * ->page_table_lock or pte_lock (anon_vma_prepare and various)
109 : *
110 : * ->page_table_lock or pte_lock
111 : * ->swap_lock (try_to_unmap_one)
112 : * ->private_lock (try_to_unmap_one)
113 : * ->i_pages lock (try_to_unmap_one)
114 : * ->lruvec->lru_lock (follow_page->mark_page_accessed)
115 : * ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
116 : * ->private_lock (page_remove_rmap->set_page_dirty)
117 : * ->i_pages lock (page_remove_rmap->set_page_dirty)
118 : * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
119 : * ->inode->i_lock (page_remove_rmap->set_page_dirty)
120 : * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
121 : * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
122 : * ->inode->i_lock (zap_pte_range->set_page_dirty)
123 : * ->private_lock (zap_pte_range->block_dirty_folio)
124 : *
125 : * ->i_mmap_rwsem
126 : * ->tasklist_lock (memory_failure, collect_procs_ao)
127 : */
128 :
129 251082719 : static void page_cache_delete(struct address_space *mapping,
130 : struct folio *folio, void *shadow)
131 : {
132 251082719 : XA_STATE(xas, &mapping->i_pages, folio->index);
133 251082719 : long nr = 1;
134 :
135 502165438 : mapping_set_update(&xas, mapping);
136 :
137 : /* hugetlb pages are represented by a single entry in the xarray */
138 251082719 : if (!folio_test_hugetlb(folio)) {
139 251082633 : xas_set_order(&xas, folio->index, folio_order(folio));
140 251082061 : nr = folio_nr_pages(folio);
141 : }
142 :
143 251081769 : VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
144 :
145 251081526 : xas_store(&xas, shadow);
146 251082855 : xas_init_marks(&xas);
147 :
148 251082823 : folio->mapping = NULL;
149 : /* Leave page->index set: truncation lookup relies upon it */
150 251082823 : mapping->nrpages -= nr;
151 251082823 : }
152 :
153 2558004485 : static void filemap_unaccount_folio(struct address_space *mapping,
154 : struct folio *folio)
155 : {
156 2558004485 : long nr;
157 :
158 2558004485 : VM_BUG_ON_FOLIO(folio_mapped(folio), folio);
159 2557851126 : if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {
160 : pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",
161 : current->comm, folio_pfn(folio));
162 : dump_page(&folio->page, "still mapped when deleted");
163 : dump_stack();
164 : add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
165 :
166 : if (mapping_exiting(mapping) && !folio_test_large(folio)) {
167 : int mapcount = page_mapcount(&folio->page);
168 :
169 : if (folio_ref_count(folio) >= mapcount + 2) {
170 : /*
171 : * All vmas have already been torn down, so it's
172 : * a good bet that actually the page is unmapped
173 : * and we'd rather not leak it: if we're wrong,
174 : * another bad page check should catch it later.
175 : */
176 : page_mapcount_reset(&folio->page);
177 : folio_ref_sub(folio, mapcount);
178 : }
179 : }
180 : }
181 :
182 : /* hugetlb folios do not participate in page cache accounting. */
183 2557851126 : if (folio_test_hugetlb(folio))
184 : return;
185 :
186 2557896777 : nr = folio_nr_pages(folio);
187 :
188 2557705525 : __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
189 2558309514 : if (folio_test_swapbacked(folio)) {
190 8978289 : __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
191 8978288 : if (folio_test_pmd_mappable(folio))
192 0 : __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
193 2549293187 : } else if (folio_test_pmd_mappable(folio)) {
194 239321 : __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
195 239323 : filemap_nr_thps_dec(mapping);
196 : }
197 :
198 : /*
199 : * At this point folio must be either written or cleaned by
200 : * truncate. Dirty folio here signals a bug and loss of
201 : * unwritten data - on ordinary filesystems.
202 : *
203 : * But it's harmless on in-memory filesystems like tmpfs; and can
204 : * occur when a driver which did get_user_pages() sets page dirty
205 : * before putting it, while the inode is being finally evicted.
206 : *
207 : * Below fixes dirty accounting after removing the folio entirely
208 : * but leaves the dirty flag set: it has no effect for truncated
209 : * folio and anyway will be cleared before returning folio to
210 : * buddy allocator.
211 : */
212 2558158259 : if (WARN_ON_ONCE(folio_test_dirty(folio) &&
213 : mapping_can_writeback(mapping)))
214 0 : folio_account_cleaned(folio, inode_to_wb(mapping->host));
215 : }
216 :
217 : /*
218 : * Delete a page from the page cache and free it. Caller has to make
219 : * sure the page is locked and that nobody else uses it - or that usage
220 : * is safe. The caller must hold the i_pages lock.
221 : */
222 251083089 : void __filemap_remove_folio(struct folio *folio, void *shadow)
223 : {
224 251083089 : struct address_space *mapping = folio->mapping;
225 :
226 251083089 : trace_mm_filemap_delete_from_page_cache(folio);
227 251082913 : filemap_unaccount_folio(mapping, folio);
228 251082764 : page_cache_delete(mapping, folio, shadow);
229 251082510 : }
230 :
231 2364959881 : void filemap_free_folio(struct address_space *mapping, struct folio *folio)
232 : {
233 2364959881 : void (*free_folio)(struct folio *);
234 2364959881 : int refs = 1;
235 :
236 2364959881 : free_folio = mapping->a_ops->free_folio;
237 2364959881 : if (free_folio)
238 0 : free_folio(folio);
239 :
240 2364959881 : if (folio_test_large(folio) && !folio_test_hugetlb(folio))
241 126425437 : refs = folio_nr_pages(folio);
242 2364928904 : folio_put_refs(folio, refs);
243 2364983408 : }
244 :
245 : /**
246 : * filemap_remove_folio - Remove folio from page cache.
247 : * @folio: The folio.
248 : *
249 : * This must be called only on folios that are locked and have been
250 : * verified to be in the page cache. It will never put the folio into
251 : * the free list because the caller has a reference on the page.
252 : */
253 34594430 : void filemap_remove_folio(struct folio *folio)
254 : {
255 34594430 : struct address_space *mapping = folio->mapping;
256 :
257 34594430 : BUG_ON(!folio_test_locked(folio));
258 34594392 : spin_lock(&mapping->host->i_lock);
259 34594509 : xa_lock_irq(&mapping->i_pages);
260 34594518 : __filemap_remove_folio(folio, NULL);
261 34594273 : xa_unlock_irq(&mapping->i_pages);
262 34594335 : if (mapping_shrinkable(mapping))
263 1228608 : inode_add_lru(mapping->host);
264 34594139 : spin_unlock(&mapping->host->i_lock);
265 :
266 34594313 : filemap_free_folio(mapping, folio);
267 34594424 : }
268 :
269 : /*
270 : * page_cache_delete_batch - delete several folios from page cache
271 : * @mapping: the mapping to which folios belong
272 : * @fbatch: batch of folios to delete
273 : *
274 : * The function walks over mapping->i_pages and removes folios passed in
275 : * @fbatch from the mapping. The function expects @fbatch to be sorted
276 : * by page index and is optimised for it to be dense.
277 : * It tolerates holes in @fbatch (mapping entries at those indices are not
278 : * modified).
279 : *
280 : * The function expects the i_pages lock to be held.
281 : */
282 229963295 : static void page_cache_delete_batch(struct address_space *mapping,
283 : struct folio_batch *fbatch)
284 : {
285 229963295 : XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
286 229963295 : long total_pages = 0;
287 229963295 : int i = 0;
288 229963295 : struct folio *folio;
289 :
290 459926590 : mapping_set_update(&xas, mapping);
291 2541189145 : xas_for_each(&xas, folio, ULONG_MAX) {
292 2500616777 : if (i >= folio_batch_count(fbatch))
293 : break;
294 :
295 : /* A swap/dax/shadow entry got inserted? Skip it. */
296 2311227906 : if (xa_is_value(folio))
297 95 : continue;
298 : /*
299 : * A page got inserted in our range? Skip it. We have our
300 : * pages locked so they are protected from being removed.
301 : * If we see a page whose index is higher than ours, it
302 : * means our page has been removed, which shouldn't be
303 : * possible because we're holding the PageLock.
304 : */
305 2311227811 : if (folio != fbatch->folios[i]) {
306 4168153 : VM_BUG_ON_FOLIO(folio->index >
307 : fbatch->folios[i]->index, folio);
308 4168153 : continue;
309 : }
310 :
311 2307059658 : WARN_ON_ONCE(!folio_test_locked(folio));
312 :
313 2306758100 : folio->mapping = NULL;
314 : /* Leave folio->index set: truncation lookup relies on it */
315 :
316 2306758100 : i++;
317 2306758100 : xas_store(&xas, NULL);
318 2307231020 : total_pages += folio_nr_pages(folio);
319 : }
320 229939581 : mapping->nrpages -= total_pages;
321 229939581 : }
322 :
323 236567315 : void delete_from_page_cache_batch(struct address_space *mapping,
324 : struct folio_batch *fbatch)
325 : {
326 236567315 : int i;
327 :
328 236567315 : if (!folio_batch_count(fbatch))
329 : return;
330 :
331 229973122 : spin_lock(&mapping->host->i_lock);
332 229973175 : xa_lock_irq(&mapping->i_pages);
333 2766989963 : for (i = 0; i < folio_batch_count(fbatch); i++) {
334 2307051605 : struct folio *folio = fbatch->folios[i];
335 :
336 2307051605 : trace_mm_filemap_delete_from_page_cache(folio);
337 2306969873 : filemap_unaccount_folio(mapping, folio);
338 : }
339 229965183 : page_cache_delete_batch(mapping, fbatch);
340 229944891 : xa_unlock_irq(&mapping->i_pages);
341 229960223 : if (mapping_shrinkable(mapping))
342 25487663 : inode_add_lru(mapping->host);
343 229932408 : spin_unlock(&mapping->host->i_lock);
344 :
345 2766886556 : for (i = 0; i < folio_batch_count(fbatch); i++)
346 2306983722 : filemap_free_folio(mapping, fbatch->folios[i]);
347 : }
348 :
349 744720881 : int filemap_check_errors(struct address_space *mapping)
350 : {
351 744720881 : int ret = 0;
352 : /* Check for outstanding write errors */
353 1489441807 : if (test_bit(AS_ENOSPC, &mapping->flags) &&
354 : test_and_clear_bit(AS_ENOSPC, &mapping->flags))
355 45 : ret = -ENOSPC;
356 1489445517 : if (test_bit(AS_EIO, &mapping->flags) &&
357 : test_and_clear_bit(AS_EIO, &mapping->flags))
358 3755 : ret = -EIO;
359 744720881 : return ret;
360 : }
361 : EXPORT_SYMBOL(filemap_check_errors);
362 :
363 270415607 : static int filemap_check_and_keep_errors(struct address_space *mapping)
364 : {
365 : /* Check for outstanding write errors */
366 540831214 : if (test_bit(AS_EIO, &mapping->flags))
367 : return -EIO;
368 540819948 : if (test_bit(AS_ENOSPC, &mapping->flags))
369 241 : return -ENOSPC;
370 : return 0;
371 : }
372 :
373 : /**
374 : * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
375 : * @mapping: address space structure to write
376 : * @wbc: the writeback_control controlling the writeout
377 : *
378 : * Call writepages on the mapping using the provided wbc to control the
379 : * writeout.
380 : *
381 : * Return: %0 on success, negative error code otherwise.
382 : */
383 939916485 : int filemap_fdatawrite_wbc(struct address_space *mapping,
384 : struct writeback_control *wbc)
385 : {
386 939916485 : int ret;
387 :
388 939916485 : if (!mapping_can_writeback(mapping) ||
389 : !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
390 : return 0;
391 :
392 47897044 : wbc_attach_fdatawrite_inode(wbc, mapping->host);
393 47898711 : ret = do_writepages(mapping, wbc);
394 47896828 : wbc_detach_inode(wbc);
395 47896828 : return ret;
396 : }
397 : EXPORT_SYMBOL(filemap_fdatawrite_wbc);
398 :
399 : /**
400 : * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
401 : * @mapping: address space structure to write
402 : * @start: offset in bytes where the range starts
403 : * @end: offset in bytes where the range ends (inclusive)
404 : * @sync_mode: enable synchronous operation
405 : *
406 : * Start writeback against all of a mapping's dirty pages that lie
407 : * within the byte offsets <start, end> inclusive.
408 : *
409 : * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
410 : * opposed to a regular memory cleansing writeback. The difference between
411 : * these two operations is that if a dirty page/buffer is encountered, it must
412 : * be waited upon, and not just skipped over.
413 : *
414 : * Return: %0 on success, negative error code otherwise.
415 : */
416 937745403 : int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
417 : loff_t end, int sync_mode)
418 : {
419 937745403 : struct writeback_control wbc = {
420 : .sync_mode = sync_mode,
421 : .nr_to_write = LONG_MAX,
422 : .range_start = start,
423 : .range_end = end,
424 : };
425 :
426 937745403 : return filemap_fdatawrite_wbc(mapping, &wbc);
427 : }
428 :
429 : static inline int __filemap_fdatawrite(struct address_space *mapping,
430 : int sync_mode)
431 : {
432 269241086 : return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
433 : }
434 :
435 266018687 : int filemap_fdatawrite(struct address_space *mapping)
436 : {
437 266018687 : return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
438 : }
439 : EXPORT_SYMBOL(filemap_fdatawrite);
440 :
441 12054966 : int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
442 : loff_t end)
443 : {
444 12054966 : return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
445 : }
446 : EXPORT_SYMBOL(filemap_fdatawrite_range);
447 :
448 : /**
449 : * filemap_flush - mostly a non-blocking flush
450 : * @mapping: target address_space
451 : *
452 : * This is a mostly non-blocking flush. Not suitable for data-integrity
453 : * purposes - I/O may not be started against all dirty pages.
454 : *
455 : * Return: %0 on success, negative error code otherwise.
456 : */
457 3222399 : int filemap_flush(struct address_space *mapping)
458 : {
459 3222399 : return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
460 : }
461 : EXPORT_SYMBOL(filemap_flush);
462 :
463 : /**
464 : * filemap_range_has_page - check if a page exists in range.
465 : * @mapping: address space within which to check
466 : * @start_byte: offset in bytes where the range starts
467 : * @end_byte: offset in bytes where the range ends (inclusive)
468 : *
469 : * Find at least one page in the range supplied, usually used to check if
470 : * direct writing in this range will trigger a writeback.
471 : *
472 : * Return: %true if at least one page exists in the specified range,
473 : * %false otherwise.
474 : */
475 3368011 : bool filemap_range_has_page(struct address_space *mapping,
476 : loff_t start_byte, loff_t end_byte)
477 : {
478 3368011 : struct folio *folio;
479 3368011 : XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
480 3368011 : pgoff_t max = end_byte >> PAGE_SHIFT;
481 :
482 3368011 : if (end_byte < start_byte)
483 : return false;
484 :
485 3367598 : rcu_read_lock();
486 3367577 : for (;;) {
487 3367577 : folio = xas_find(&xas, max);
488 3367487 : if (xas_retry(&xas, folio))
489 0 : continue;
490 : /* Shadow entries don't count */
491 3367487 : if (xa_is_value(folio))
492 0 : continue;
493 : /*
494 : * We don't need to try to pin this page; we're about to
495 : * release the RCU lock anyway. It is enough to know that
496 : * there was a page here recently.
497 : */
498 3367487 : break;
499 : }
500 3367487 : rcu_read_unlock();
501 :
502 3367528 : return folio != NULL;
503 : }
504 : EXPORT_SYMBOL(filemap_range_has_page);
505 :
506 937920170 : static void __filemap_fdatawait_range(struct address_space *mapping,
507 : loff_t start_byte, loff_t end_byte)
508 : {
509 937920170 : pgoff_t index = start_byte >> PAGE_SHIFT;
510 937920170 : pgoff_t end = end_byte >> PAGE_SHIFT;
511 937920170 : struct folio_batch fbatch;
512 937920170 : unsigned nr_folios;
513 :
514 937920170 : folio_batch_init(&fbatch);
515 :
516 957639075 : while (index <= end) {
517 940746325 : unsigned i;
518 :
519 940746325 : nr_folios = filemap_get_folios_tag(mapping, &index, end,
520 : PAGECACHE_TAG_WRITEBACK, &fbatch);
521 :
522 940703773 : if (!nr_folios)
523 : break;
524 :
525 128399436 : for (i = 0; i < nr_folios; i++) {
526 108624389 : struct folio *folio = fbatch.folios[i];
527 :
528 108624389 : folio_wait_writeback(folio);
529 108614686 : folio_clear_error(folio);
530 : }
531 19775047 : folio_batch_release(&fbatch);
532 19772827 : cond_resched();
533 : }
534 937818458 : }
535 :
536 : /**
537 : * filemap_fdatawait_range - wait for writeback to complete
538 : * @mapping: address space structure to wait for
539 : * @start_byte: offset in bytes where the range starts
540 : * @end_byte: offset in bytes where the range ends (inclusive)
541 : *
542 : * Walk the list of under-writeback pages of the given address space
543 : * in the given range and wait for all of them. Check error status of
544 : * the address space and return it.
545 : *
546 : * Since the error status of the address space is cleared by this function,
547 : * callers are responsible for checking the return value and handling and/or
548 : * reporting the error.
549 : *
550 : * Return: error status of the address space.
551 : */
552 11450723 : int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
553 : loff_t end_byte)
554 : {
555 11450723 : __filemap_fdatawait_range(mapping, start_byte, end_byte);
556 11450228 : return filemap_check_errors(mapping);
557 : }
558 : EXPORT_SYMBOL(filemap_fdatawait_range);
559 :
560 : /**
561 : * filemap_fdatawait_range_keep_errors - wait for writeback to complete
562 : * @mapping: address space structure to wait for
563 : * @start_byte: offset in bytes where the range starts
564 : * @end_byte: offset in bytes where the range ends (inclusive)
565 : *
566 : * Walk the list of under-writeback pages of the given address space in the
567 : * given range and wait for all of them. Unlike filemap_fdatawait_range(),
568 : * this function does not clear error status of the address space.
569 : *
570 : * Use this function if callers don't handle errors themselves. Expected
571 : * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
572 : * fsfreeze(8)
573 : */
574 159914 : int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
575 : loff_t start_byte, loff_t end_byte)
576 : {
577 159914 : __filemap_fdatawait_range(mapping, start_byte, end_byte);
578 159914 : return filemap_check_and_keep_errors(mapping);
579 : }
580 : EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
581 :
582 : /**
583 : * file_fdatawait_range - wait for writeback to complete
584 : * @file: file pointing to address space structure to wait for
585 : * @start_byte: offset in bytes where the range starts
586 : * @end_byte: offset in bytes where the range ends (inclusive)
587 : *
588 : * Walk the list of under-writeback pages of the address space that file
589 : * refers to, in the given range and wait for all of them. Check error
590 : * status of the address space vs. the file->f_wb_err cursor and return it.
591 : *
592 : * Since the error status of the file is advanced by this function,
593 : * callers are responsible for checking the return value and handling and/or
594 : * reporting the error.
595 : *
596 : * Return: error status of the address space vs. the file->f_wb_err cursor.
597 : */
598 1501 : int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
599 : {
600 1501 : struct address_space *mapping = file->f_mapping;
601 :
602 1501 : __filemap_fdatawait_range(mapping, start_byte, end_byte);
603 1501 : return file_check_and_advance_wb_err(file);
604 : }
605 : EXPORT_SYMBOL(file_fdatawait_range);
606 :
607 : /**
608 : * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
609 : * @mapping: address space structure to wait for
610 : *
611 : * Walk the list of under-writeback pages of the given address space
612 : * and wait for all of them. Unlike filemap_fdatawait(), this function
613 : * does not clear error status of the address space.
614 : *
615 : * Use this function if callers don't handle errors themselves. Expected
616 : * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
617 : * fsfreeze(8)
618 : *
619 : * Return: error status of the address space.
620 : */
621 270120069 : int filemap_fdatawait_keep_errors(struct address_space *mapping)
622 : {
623 270120069 : __filemap_fdatawait_range(mapping, 0, LLONG_MAX);
624 270095122 : return filemap_check_and_keep_errors(mapping);
625 : }
626 : EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
627 :
628 : /* Returns true if writeback might be needed or already in progress. */
629 : static bool mapping_needs_writeback(struct address_space *mapping)
630 : {
631 748265792 : return mapping->nrpages;
632 : }
633 :
634 0 : bool filemap_range_has_writeback(struct address_space *mapping,
635 : loff_t start_byte, loff_t end_byte)
636 : {
637 0 : XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
638 0 : pgoff_t max = end_byte >> PAGE_SHIFT;
639 0 : struct folio *folio;
640 :
641 0 : if (end_byte < start_byte)
642 : return false;
643 :
644 0 : rcu_read_lock();
645 0 : xas_for_each(&xas, folio, max) {
646 0 : if (xas_retry(&xas, folio))
647 0 : continue;
648 0 : if (xa_is_value(folio))
649 0 : continue;
650 0 : if (folio_test_dirty(folio) || folio_test_locked(folio) ||
651 : folio_test_writeback(folio))
652 : break;
653 : }
654 0 : rcu_read_unlock();
655 0 : return folio != NULL;
656 : }
657 : EXPORT_SYMBOL_GPL(filemap_range_has_writeback);
658 :
659 : /**
660 : * filemap_write_and_wait_range - write out & wait on a file range
661 : * @mapping: the address_space for the pages
662 : * @lstart: offset in bytes where the range starts
663 : * @lend: offset in bytes where the range ends (inclusive)
664 : *
665 : * Write out and wait upon file offsets lstart->lend, inclusive.
666 : *
667 : * Note that @lend is inclusive (describes the last byte to be written) so
668 : * that this function can be used to write to the very end-of-file (end = -1).
669 : *
670 : * Return: error status of the address space.
671 : */
672 734692623 : int filemap_write_and_wait_range(struct address_space *mapping,
673 : loff_t lstart, loff_t lend)
674 : {
675 734692623 : int err = 0, err2;
676 :
677 734692623 : if (lend < lstart)
678 : return 0;
679 :
680 733231910 : if (mapping_needs_writeback(mapping)) {
681 651611402 : err = __filemap_fdatawrite_range(mapping, lstart, lend,
682 : WB_SYNC_ALL);
683 : /*
684 : * Even if the above returned error, the pages may be
685 : * written partially (e.g. -ENOSPC), so we wait for it.
686 : * But the -EIO is special case, it may indicate the worst
687 : * thing (e.g. bug) happened, so we avoid waiting for it.
688 : */
689 651524332 : if (err != -EIO)
690 651504419 : __filemap_fdatawait_range(mapping, lstart, lend);
691 : }
692 733205069 : err2 = filemap_check_errors(mapping);
693 733081284 : if (!err)
694 732987709 : err = err2;
695 : return err;
696 : }
697 : EXPORT_SYMBOL(filemap_write_and_wait_range);
698 :
699 2398955 : void __filemap_set_wb_err(struct address_space *mapping, int err)
700 : {
701 2398955 : errseq_t eseq = errseq_set(&mapping->wb_err, err);
702 :
703 2398956 : trace_filemap_set_wb_err(mapping, eseq);
704 2398957 : }
705 : EXPORT_SYMBOL(__filemap_set_wb_err);
706 :
707 : /**
708 : * file_check_and_advance_wb_err - report wb error (if any) that was previously
709 : * and advance wb_err to current one
710 : * @file: struct file on which the error is being reported
711 : *
712 : * When userland calls fsync (or something like nfsd does the equivalent), we
713 : * want to report any writeback errors that occurred since the last fsync (or
714 : * since the file was opened if there haven't been any).
715 : *
716 : * Grab the wb_err from the mapping. If it matches what we have in the file,
717 : * then just quickly return 0. The file is all caught up.
718 : *
719 : * If it doesn't match, then take the mapping value, set the "seen" flag in
720 : * it and try to swap it into place. If it works, or another task beat us
721 : * to it with the new value, then update the f_wb_err and return the error
722 : * portion. The error at this point must be reported via proper channels
723 : * (a'la fsync, or NFS COMMIT operation, etc.).
724 : *
725 : * While we handle mapping->wb_err with atomic operations, the f_wb_err
726 : * value is protected by the f_lock since we must ensure that it reflects
727 : * the latest value swapped in for this file descriptor.
728 : *
729 : * Return: %0 on success, negative error code otherwise.
730 : */
731 15549137 : int file_check_and_advance_wb_err(struct file *file)
732 : {
733 15549137 : int err = 0;
734 15549137 : errseq_t old = READ_ONCE(file->f_wb_err);
735 15549137 : struct address_space *mapping = file->f_mapping;
736 :
737 : /* Locklessly handle the common case where nothing has changed */
738 15549137 : if (errseq_check(&mapping->wb_err, old)) {
739 : /* Something changed, must use slow path */
740 3682 : spin_lock(&file->f_lock);
741 3681 : old = file->f_wb_err;
742 3681 : err = errseq_check_and_advance(&mapping->wb_err,
743 : &file->f_wb_err);
744 3682 : trace_file_check_and_advance_wb_err(file, old);
745 3682 : spin_unlock(&file->f_lock);
746 : }
747 :
748 : /*
749 : * We're mostly using this function as a drop in replacement for
750 : * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
751 : * that the legacy code would have had on these flags.
752 : */
753 15550069 : clear_bit(AS_EIO, &mapping->flags);
754 15552194 : clear_bit(AS_ENOSPC, &mapping->flags);
755 15551621 : return err;
756 : }
757 : EXPORT_SYMBOL(file_check_and_advance_wb_err);
758 :
759 : /**
760 : * file_write_and_wait_range - write out & wait on a file range
761 : * @file: file pointing to address_space with pages
762 : * @lstart: offset in bytes where the range starts
763 : * @lend: offset in bytes where the range ends (inclusive)
764 : *
765 : * Write out and wait upon file offsets lstart->lend, inclusive.
766 : *
767 : * Note that @lend is inclusive (describes the last byte to be written) so
768 : * that this function can be used to write to the very end-of-file (end = -1).
769 : *
770 : * After writing out and waiting on the data, we check and advance the
771 : * f_wb_err cursor to the latest value, and return any errors detected there.
772 : *
773 : * Return: %0 on success, negative error code otherwise.
774 : */
775 15033882 : int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
776 : {
777 15033882 : int err = 0, err2;
778 15033882 : struct address_space *mapping = file->f_mapping;
779 :
780 15033882 : if (lend < lstart)
781 : return 0;
782 :
783 15033882 : if (mapping_needs_writeback(mapping)) {
784 4692826 : err = __filemap_fdatawrite_range(mapping, lstart, lend,
785 : WB_SYNC_ALL);
786 : /* See comment of filemap_write_and_wait() */
787 4692942 : if (err != -EIO)
788 4692641 : __filemap_fdatawait_range(mapping, lstart, lend);
789 : }
790 15033875 : err2 = file_check_and_advance_wb_err(file);
791 15037200 : if (!err)
792 15036748 : err = err2;
793 : return err;
794 : }
795 : EXPORT_SYMBOL(file_write_and_wait_range);
796 :
797 : /**
798 : * replace_page_cache_folio - replace a pagecache folio with a new one
799 : * @old: folio to be replaced
800 : * @new: folio to replace with
801 : *
802 : * This function replaces a folio in the pagecache with a new one. On
803 : * success it acquires the pagecache reference for the new folio and
804 : * drops it for the old folio. Both the old and new folios must be
805 : * locked. This function does not add the new folio to the LRU, the
806 : * caller must do that.
807 : *
808 : * The remove + add is atomic. This function cannot fail.
809 : */
810 0 : void replace_page_cache_folio(struct folio *old, struct folio *new)
811 : {
812 0 : struct address_space *mapping = old->mapping;
813 0 : void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;
814 0 : pgoff_t offset = old->index;
815 0 : XA_STATE(xas, &mapping->i_pages, offset);
816 :
817 0 : VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
818 0 : VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
819 0 : VM_BUG_ON_FOLIO(new->mapping, new);
820 :
821 0 : folio_get(new);
822 0 : new->mapping = mapping;
823 0 : new->index = offset;
824 :
825 0 : mem_cgroup_migrate(old, new);
826 :
827 0 : xas_lock_irq(&xas);
828 0 : xas_store(&xas, new);
829 :
830 0 : old->mapping = NULL;
831 : /* hugetlb pages do not participate in page cache accounting. */
832 0 : if (!folio_test_hugetlb(old))
833 0 : __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
834 0 : if (!folio_test_hugetlb(new))
835 0 : __lruvec_stat_add_folio(new, NR_FILE_PAGES);
836 0 : if (folio_test_swapbacked(old))
837 0 : __lruvec_stat_sub_folio(old, NR_SHMEM);
838 0 : if (folio_test_swapbacked(new))
839 0 : __lruvec_stat_add_folio(new, NR_SHMEM);
840 0 : xas_unlock_irq(&xas);
841 0 : if (free_folio)
842 0 : free_folio(old);
843 0 : folio_put(old);
844 0 : }
845 : EXPORT_SYMBOL_GPL(replace_page_cache_folio);
846 :
847 2513261639 : noinline int __filemap_add_folio(struct address_space *mapping,
848 : struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)
849 : {
850 2513261639 : XA_STATE(xas, &mapping->i_pages, index);
851 2513261639 : int huge = folio_test_hugetlb(folio);
852 2513724319 : bool charged = false;
853 2513724319 : long nr = 1;
854 :
855 2513724319 : VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
856 2513111268 : VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);
857 5026034772 : mapping_set_update(&xas, mapping);
858 :
859 2513017386 : if (!huge) {
860 2512601427 : int error = mem_cgroup_charge(folio, NULL, gfp);
861 2513893329 : VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
862 2513419724 : if (error)
863 : return error;
864 2513439026 : charged = true;
865 2513439026 : xas_set_order(&xas, index, folio_order(folio));
866 2513455887 : nr = folio_nr_pages(folio);
867 : }
868 :
869 2513594053 : gfp &= GFP_RECLAIM_MASK;
870 2513594053 : folio_ref_add(folio, nr);
871 2514283044 : folio->mapping = mapping;
872 2514283044 : folio->index = xas.xa_index;
873 :
874 2514265423 : do {
875 2514265423 : unsigned int order = xa_get_order(xas.xa, xas.xa_index);
876 2513683440 : void *entry, *old = NULL;
877 :
878 2513683440 : if (order > folio_order(folio))
879 2928506 : xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
880 : order, gfp);
881 2513401399 : xas_lock_irq(&xas);
882 2553997849 : xas_for_each_conflict(&xas, entry) {
883 45828049 : old = entry;
884 45828049 : if (!xa_is_value(entry)) {
885 6366055 : xas_set_err(&xas, -EEXIST);
886 6366055 : goto unlock;
887 : }
888 : }
889 :
890 2507066456 : if (old) {
891 35621887 : if (shadowp)
892 35621887 : *shadowp = old;
893 : /* entry may have been split before we acquired lock */
894 35621887 : order = xa_get_order(xas.xa, xas.xa_index);
895 35622388 : if (order > folio_order(folio)) {
896 : /* How to handle large swap entries? */
897 2749888 : BUG_ON(shmem_mapping(mapping));
898 2749888 : xas_split(&xas, old, order);
899 2749899 : xas_reset(&xas);
900 : }
901 : }
902 :
903 2507066519 : xas_store(&xas, folio);
904 2507847504 : if (xas_error(&xas))
905 0 : goto unlock;
906 :
907 2507847504 : mapping->nrpages += nr;
908 :
909 : /* hugetlb pages do not participate in page cache accounting */
910 2507847504 : if (!huge) {
911 2507847504 : __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
912 2507144553 : if (folio_test_pmd_mappable(folio))
913 266636 : __lruvec_stat_mod_folio(folio,
914 : NR_FILE_THPS, nr);
915 : }
916 2506594826 : unlock:
917 2513227523 : xas_unlock_irq(&xas);
918 2514597741 : } while (xas_nomem(&xas, gfp));
919 :
920 2520624324 : if (xas_error(&xas))
921 6366040 : goto error;
922 :
923 2507892244 : trace_mm_filemap_add_to_page_cache(folio);
924 2507892244 : return 0;
925 : error:
926 6366040 : if (charged)
927 6366038 : mem_cgroup_uncharge(folio);
928 6365972 : folio->mapping = NULL;
929 : /* Leave page->index set: truncation relies upon it */
930 6365972 : folio_put_refs(folio, nr);
931 6366029 : return xas_error(&xas);
932 : }
933 : ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
934 :
935 2513634723 : int filemap_add_folio(struct address_space *mapping, struct folio *folio,
936 : pgoff_t index, gfp_t gfp)
937 : {
938 2513634723 : void *shadow = NULL;
939 2513634723 : int ret;
940 :
941 2513634723 : __folio_set_locked(folio);
942 2512737149 : ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);
943 2513109754 : if (unlikely(ret))
944 6366014 : __folio_clear_locked(folio);
945 : else {
946 : /*
947 : * The folio might have been evicted from cache only
948 : * recently, in which case it should be activated like
949 : * any other repeatedly accessed folio.
950 : * The exception is folios getting rewritten; evicting other
951 : * data from the working set, only to cache data that will
952 : * get overwritten with something else, is a waste of memory.
953 : */
954 2506743740 : WARN_ON_ONCE(folio_test_active(folio));
955 2506449116 : if (!(gfp & __GFP_WRITE) && shadow)
956 33529203 : workingset_refault(folio, shadow);
957 2506449531 : folio_add_lru(folio);
958 : }
959 2514288085 : return ret;
960 : }
961 : EXPORT_SYMBOL_GPL(filemap_add_folio);
962 :
963 : #ifdef CONFIG_NUMA
964 2513134979 : struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
965 : {
966 2513134979 : int n;
967 2513134979 : struct folio *folio;
968 :
969 2513134979 : if (cpuset_do_page_mem_spread()) {
970 3785 : unsigned int cpuset_mems_cookie;
971 3785 : do {
972 3785 : cpuset_mems_cookie = read_mems_allowed_begin();
973 0 : n = cpuset_mem_spread_node();
974 0 : folio = __folio_alloc_node(gfp, order, n);
975 0 : } while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));
976 :
977 0 : return folio;
978 : }
979 2512803808 : return folio_alloc(gfp, order);
980 : }
981 : EXPORT_SYMBOL(filemap_alloc_folio);
982 : #endif
983 :
984 : /*
985 : * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
986 : *
987 : * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
988 : *
989 : * @mapping1: the first mapping to lock
990 : * @mapping2: the second mapping to lock
991 : */
992 140173298 : void filemap_invalidate_lock_two(struct address_space *mapping1,
993 : struct address_space *mapping2)
994 : {
995 140173298 : if (mapping1 > mapping2)
996 67896920 : swap(mapping1, mapping2);
997 140173298 : if (mapping1)
998 140173298 : down_write(&mapping1->invalidate_lock);
999 140178394 : if (mapping2 && mapping1 != mapping2)
1000 135386882 : down_write_nested(&mapping2->invalidate_lock, 1);
1001 140185151 : }
1002 : EXPORT_SYMBOL(filemap_invalidate_lock_two);
1003 :
1004 : /*
1005 : * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1006 : *
1007 : * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1008 : *
1009 : * @mapping1: the first mapping to unlock
1010 : * @mapping2: the second mapping to unlock
1011 : */
1012 140143548 : void filemap_invalidate_unlock_two(struct address_space *mapping1,
1013 : struct address_space *mapping2)
1014 : {
1015 140143548 : if (mapping1)
1016 140143548 : up_write(&mapping1->invalidate_lock);
1017 140174986 : if (mapping2 && mapping1 != mapping2)
1018 135388742 : up_write(&mapping2->invalidate_lock);
1019 140166997 : }
1020 : EXPORT_SYMBOL(filemap_invalidate_unlock_two);
1021 :
1022 : /*
1023 : * In order to wait for pages to become available there must be
1024 : * waitqueues associated with pages. By using a hash table of
1025 : * waitqueues where the bucket discipline is to maintain all
1026 : * waiters on the same queue and wake all when any of the pages
1027 : * become available, and for the woken contexts to check to be
1028 : * sure the appropriate page became available, this saves space
1029 : * at a cost of "thundering herd" phenomena during rare hash
1030 : * collisions.
1031 : */
1032 : #define PAGE_WAIT_TABLE_BITS 8
1033 : #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1034 : static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1035 :
1036 : static wait_queue_head_t *folio_waitqueue(struct folio *folio)
1037 : {
1038 116035281 : return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];
1039 : }
1040 :
1041 0 : void __init pagecache_init(void)
1042 : {
1043 0 : int i;
1044 :
1045 0 : for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1046 0 : init_waitqueue_head(&folio_wait_table[i]);
1047 :
1048 0 : page_writeback_init();
1049 0 : }
1050 :
1051 : /*
1052 : * The page wait code treats the "wait->flags" somewhat unusually, because
1053 : * we have multiple different kinds of waits, not just the usual "exclusive"
1054 : * one.
1055 : *
1056 : * We have:
1057 : *
1058 : * (a) no special bits set:
1059 : *
1060 : * We're just waiting for the bit to be released, and when a waker
1061 : * calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1062 : * and remove it from the wait queue.
1063 : *
1064 : * Simple and straightforward.
1065 : *
1066 : * (b) WQ_FLAG_EXCLUSIVE:
1067 : *
1068 : * The waiter is waiting to get the lock, and only one waiter should
1069 : * be woken up to avoid any thundering herd behavior. We'll set the
1070 : * WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1071 : *
1072 : * This is the traditional exclusive wait.
1073 : *
1074 : * (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1075 : *
1076 : * The waiter is waiting to get the bit, and additionally wants the
1077 : * lock to be transferred to it for fair lock behavior. If the lock
1078 : * cannot be taken, we stop walking the wait queue without waking
1079 : * the waiter.
1080 : *
1081 : * This is the "fair lock handoff" case, and in addition to setting
1082 : * WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1083 : * that it now has the lock.
1084 : */
1085 57864700 : static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1086 : {
1087 57864700 : unsigned int flags;
1088 57864700 : struct wait_page_key *key = arg;
1089 57864700 : struct wait_page_queue *wait_page
1090 57864700 : = container_of(wait, struct wait_page_queue, wait);
1091 :
1092 57864700 : if (!wake_page_match(wait_page, key))
1093 372569 : return 0;
1094 :
1095 : /*
1096 : * If it's a lock handoff wait, we get the bit for it, and
1097 : * stop walking (and do not wake it up) if we can't.
1098 : */
1099 57492131 : flags = wait->flags;
1100 57492131 : if (flags & WQ_FLAG_EXCLUSIVE) {
1101 13397409 : if (test_bit(key->bit_nr, &key->folio->flags))
1102 : return -1;
1103 6698054 : if (flags & WQ_FLAG_CUSTOM) {
1104 9077 : if (test_and_set_bit(key->bit_nr, &key->folio->flags))
1105 : return -1;
1106 9077 : flags |= WQ_FLAG_DONE;
1107 : }
1108 : }
1109 :
1110 : /*
1111 : * We are holding the wait-queue lock, but the waiter that
1112 : * is waiting for this will be checking the flags without
1113 : * any locking.
1114 : *
1115 : * So update the flags atomically, and wake up the waiter
1116 : * afterwards to avoid any races. This store-release pairs
1117 : * with the load-acquire in folio_wait_bit_common().
1118 : */
1119 57491480 : smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1120 57491460 : wake_up_state(wait->private, mode);
1121 :
1122 : /*
1123 : * Ok, we have successfully done what we're waiting for,
1124 : * and we can unconditionally remove the wait entry.
1125 : *
1126 : * Note that this pairs with the "finish_wait()" in the
1127 : * waiter, and has to be the absolute last thing we do.
1128 : * After this list_del_init(&wait->entry) the wait entry
1129 : * might be de-allocated and the process might even have
1130 : * exited.
1131 : */
1132 57491510 : list_del_init_careful(&wait->entry);
1133 57491486 : return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1134 : }
1135 :
1136 57705288 : static void folio_wake_bit(struct folio *folio, int bit_nr)
1137 : {
1138 57705288 : wait_queue_head_t *q = folio_waitqueue(folio);
1139 57705288 : struct wait_page_key key;
1140 57705288 : unsigned long flags;
1141 57705288 : wait_queue_entry_t bookmark;
1142 :
1143 57705288 : key.folio = folio;
1144 57705288 : key.bit_nr = bit_nr;
1145 57705288 : key.page_match = 0;
1146 :
1147 57705288 : bookmark.flags = 0;
1148 57705288 : bookmark.private = NULL;
1149 57705288 : bookmark.func = NULL;
1150 57705288 : INIT_LIST_HEAD(&bookmark.entry);
1151 :
1152 57705288 : spin_lock_irqsave(&q->lock, flags);
1153 57705351 : __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1154 :
1155 57706071 : while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1156 : /*
1157 : * Take a breather from holding the lock,
1158 : * allow pages that finish wake up asynchronously
1159 : * to acquire the lock and remove themselves
1160 : * from wait queue
1161 : */
1162 721 : spin_unlock_irqrestore(&q->lock, flags);
1163 721 : cpu_relax();
1164 721 : spin_lock_irqsave(&q->lock, flags);
1165 721 : __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1166 : }
1167 :
1168 : /*
1169 : * It's possible to miss clearing waiters here, when we woke our page
1170 : * waiters, but the hashed waitqueue has waiters for other pages on it.
1171 : * That's okay, it's a rare case. The next waker will clear it.
1172 : *
1173 : * Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,
1174 : * other), the flag may be cleared in the course of freeing the page;
1175 : * but that is not required for correctness.
1176 : */
1177 57705343 : if (!waitqueue_active(q) || !key.page_match)
1178 57240465 : folio_clear_waiters(folio);
1179 :
1180 57705382 : spin_unlock_irqrestore(&q->lock, flags);
1181 57705336 : }
1182 :
1183 389530501 : static void folio_wake(struct folio *folio, int bit)
1184 : {
1185 389530501 : if (!folio_test_waiters(folio))
1186 : return;
1187 25238888 : folio_wake_bit(folio, bit);
1188 : }
1189 :
1190 : /*
1191 : * A choice of three behaviors for folio_wait_bit_common():
1192 : */
1193 : enum behavior {
1194 : EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1195 : * __folio_lock() waiting on then setting PG_locked.
1196 : */
1197 : SHARED, /* Hold ref to page and check the bit when woken, like
1198 : * folio_wait_writeback() waiting on PG_writeback.
1199 : */
1200 : DROP, /* Drop ref to page before wait, no check when woken,
1201 : * like folio_put_wait_locked() on PG_locked.
1202 : */
1203 : };
1204 :
1205 : /*
1206 : * Attempt to check (or get) the folio flag, and mark us done
1207 : * if successful.
1208 : */
1209 58820028 : static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,
1210 : struct wait_queue_entry *wait)
1211 : {
1212 58820028 : if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1213 7719659 : if (test_and_set_bit(bit_nr, &folio->flags))
1214 : return false;
1215 102200777 : } else if (test_bit(bit_nr, &folio->flags))
1216 : return false;
1217 :
1218 1328163 : wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1219 1328163 : return true;
1220 : }
1221 :
1222 : /* How many times do we accept lock stealing from under a waiter? */
1223 : int sysctl_page_lock_unfairness = 5;
1224 :
1225 58328373 : static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,
1226 : int state, enum behavior behavior)
1227 : {
1228 58328373 : wait_queue_head_t *q = folio_waitqueue(folio);
1229 58328373 : int unfairness = sysctl_page_lock_unfairness;
1230 58328373 : struct wait_page_queue wait_page;
1231 58328373 : wait_queue_entry_t *wait = &wait_page.wait;
1232 58328373 : bool thrashing = false;
1233 58328373 : unsigned long pflags;
1234 58328373 : bool in_thrashing;
1235 :
1236 90409836 : if (bit_nr == PG_locked &&
1237 58708081 : !folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1238 241506 : delayacct_thrashing_start(&in_thrashing);
1239 241503 : psi_memstall_enter(&pflags);
1240 241503 : thrashing = true;
1241 : }
1242 :
1243 58328396 : init_wait(wait);
1244 58328396 : wait->func = wake_page_function;
1245 58328396 : wait_page.folio = folio;
1246 58328396 : wait_page.bit_nr = bit_nr;
1247 :
1248 58815435 : repeat:
1249 58815435 : wait->flags = 0;
1250 58815435 : if (behavior == EXCLUSIVE) {
1251 7719050 : wait->flags = WQ_FLAG_EXCLUSIVE;
1252 7719050 : if (--unfairness < 0)
1253 9827 : wait->flags |= WQ_FLAG_CUSTOM;
1254 : }
1255 :
1256 : /*
1257 : * Do one last check whether we can get the
1258 : * page bit synchronously.
1259 : *
1260 : * Do the folio_set_waiters() marking before that
1261 : * to let any waker we _just_ missed know they
1262 : * need to wake us up (otherwise they'll never
1263 : * even go to the slow case that looks at the
1264 : * page queue), and add ourselves to the wait
1265 : * queue if we need to sleep.
1266 : *
1267 : * This part needs to be done under the queue
1268 : * lock to avoid races.
1269 : */
1270 58815435 : spin_lock_irq(&q->lock);
1271 58818224 : folio_set_waiters(folio);
1272 58818588 : if (!folio_trylock_flag(folio, bit_nr, wait))
1273 57490224 : __add_wait_queue_entry_tail(q, wait);
1274 58818176 : spin_unlock_irq(&q->lock);
1275 :
1276 : /*
1277 : * From now on, all the logic will be based on
1278 : * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1279 : * see whether the page bit testing has already
1280 : * been done by the wake function.
1281 : *
1282 : * We can drop our reference to the folio.
1283 : */
1284 58818485 : if (behavior == DROP)
1285 11088796 : folio_put(folio);
1286 :
1287 : /*
1288 : * Note that until the "finish_wait()", or until
1289 : * we see the WQ_FLAG_WOKEN flag, we need to
1290 : * be very careful with the 'wait->flags', because
1291 : * we may race with a waker that sets them.
1292 : */
1293 173394730 : for (;;) {
1294 116106497 : unsigned int flags;
1295 :
1296 116106497 : set_current_state(state);
1297 :
1298 : /* Loop until we've been woken or interrupted */
1299 116109109 : flags = smp_load_acquire(&wait->flags);
1300 116107510 : if (!(flags & WQ_FLAG_WOKEN)) {
1301 57296184 : if (signal_pending_state(state, current))
1302 : break;
1303 :
1304 57295019 : io_schedule();
1305 57288233 : continue;
1306 : }
1307 :
1308 : /* If we were non-exclusive, we're done */
1309 58811326 : if (behavior != EXCLUSIVE)
1310 : break;
1311 :
1312 : /* If the waker got the lock for us, we're done */
1313 7719451 : if (flags & WQ_FLAG_DONE)
1314 : break;
1315 :
1316 : /*
1317 : * Otherwise, if we're getting the lock, we need to
1318 : * try to get it ourselves.
1319 : *
1320 : * And if that fails, we'll have to retry this all.
1321 : */
1322 6688921 : if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))
1323 487039 : goto repeat;
1324 :
1325 6201929 : wait->flags |= WQ_FLAG_DONE;
1326 6201929 : break;
1327 : }
1328 :
1329 : /*
1330 : * If a signal happened, this 'finish_wait()' may remove the last
1331 : * waiter from the wait-queues, but the folio waiters bit will remain
1332 : * set. That's ok. The next wakeup will take care of it, and trying
1333 : * to do it here would be difficult and prone to races.
1334 : */
1335 58324042 : finish_wait(q, wait);
1336 :
1337 58316664 : if (thrashing) {
1338 241516 : delayacct_thrashing_end(&in_thrashing);
1339 241516 : psi_memstall_leave(&pflags);
1340 : }
1341 :
1342 : /*
1343 : * NOTE! The wait->flags weren't stable until we've done the
1344 : * 'finish_wait()', and we could have exited the loop above due
1345 : * to a signal, and had a wakeup event happen after the signal
1346 : * test but before the 'finish_wait()'.
1347 : *
1348 : * So only after the finish_wait() can we reliably determine
1349 : * if we got woken up or not, so we can now figure out the final
1350 : * return value based on that state without races.
1351 : *
1352 : * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1353 : * waiter, but an exclusive one requires WQ_FLAG_DONE.
1354 : */
1355 58316662 : if (behavior == EXCLUSIVE)
1356 7232584 : return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1357 :
1358 51084078 : return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1359 : }
1360 :
1361 : #ifdef CONFIG_MIGRATION
1362 : /**
1363 : * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1364 : * @entry: migration swap entry.
1365 : * @ptl: already locked ptl. This function will drop the lock.
1366 : *
1367 : * Wait for a migration entry referencing the given page to be removed. This is
1368 : * equivalent to put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE) except
1369 : * this can be called without taking a reference on the page. Instead this
1370 : * should be called while holding the ptl for the migration entry referencing
1371 : * the page.
1372 : *
1373 : * Returns after unlocking the ptl.
1374 : *
1375 : * This follows the same logic as folio_wait_bit_common() so see the comments
1376 : * there.
1377 : */
1378 1620 : void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)
1379 : __releases(ptl)
1380 : {
1381 1620 : struct wait_page_queue wait_page;
1382 1620 : wait_queue_entry_t *wait = &wait_page.wait;
1383 1620 : bool thrashing = false;
1384 1620 : unsigned long pflags;
1385 1620 : bool in_thrashing;
1386 1620 : wait_queue_head_t *q;
1387 1620 : struct folio *folio = page_folio(pfn_swap_entry_to_page(entry));
1388 :
1389 1620 : q = folio_waitqueue(folio);
1390 1620 : if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {
1391 0 : delayacct_thrashing_start(&in_thrashing);
1392 0 : psi_memstall_enter(&pflags);
1393 0 : thrashing = true;
1394 : }
1395 :
1396 1620 : init_wait(wait);
1397 1620 : wait->func = wake_page_function;
1398 1620 : wait_page.folio = folio;
1399 1620 : wait_page.bit_nr = PG_locked;
1400 1620 : wait->flags = 0;
1401 :
1402 1620 : spin_lock_irq(&q->lock);
1403 1621 : folio_set_waiters(folio);
1404 1621 : if (!folio_trylock_flag(folio, PG_locked, wait))
1405 1621 : __add_wait_queue_entry_tail(q, wait);
1406 1621 : spin_unlock_irq(&q->lock);
1407 :
1408 : /*
1409 : * If a migration entry exists for the page the migration path must hold
1410 : * a valid reference to the page, and it must take the ptl to remove the
1411 : * migration entry. So the page is valid until the ptl is dropped.
1412 : */
1413 1621 : spin_unlock(ptl);
1414 :
1415 4863 : for (;;) {
1416 3242 : unsigned int flags;
1417 :
1418 3242 : set_current_state(TASK_UNINTERRUPTIBLE);
1419 :
1420 : /* Loop until we've been woken or interrupted */
1421 3242 : flags = smp_load_acquire(&wait->flags);
1422 3242 : if (!(flags & WQ_FLAG_WOKEN)) {
1423 1621 : if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))
1424 : break;
1425 :
1426 1621 : io_schedule();
1427 1621 : continue;
1428 : }
1429 : break;
1430 : }
1431 :
1432 1621 : finish_wait(q, wait);
1433 :
1434 1621 : if (thrashing) {
1435 0 : delayacct_thrashing_end(&in_thrashing);
1436 0 : psi_memstall_leave(&pflags);
1437 : }
1438 1621 : }
1439 : #endif
1440 :
1441 26773854 : void folio_wait_bit(struct folio *folio, int bit_nr)
1442 : {
1443 26773854 : folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1444 0 : }
1445 : EXPORT_SYMBOL(folio_wait_bit);
1446 :
1447 0 : int folio_wait_bit_killable(struct folio *folio, int bit_nr)
1448 : {
1449 13234442 : return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);
1450 : }
1451 : EXPORT_SYMBOL(folio_wait_bit_killable);
1452 :
1453 : /**
1454 : * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1455 : * @folio: The folio to wait for.
1456 : * @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).
1457 : *
1458 : * The caller should hold a reference on @folio. They expect the page to
1459 : * become unlocked relatively soon, but do not wish to hold up migration
1460 : * (for example) by holding the reference while waiting for the folio to
1461 : * come unlocked. After this function returns, the caller should not
1462 : * dereference @folio.
1463 : *
1464 : * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1465 : */
1466 : static int folio_put_wait_locked(struct folio *folio, int state)
1467 : {
1468 11088490 : return folio_wait_bit_common(folio, PG_locked, state, DROP);
1469 : }
1470 :
1471 : /**
1472 : * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1473 : * @folio: Folio defining the wait queue of interest
1474 : * @waiter: Waiter to add to the queue
1475 : *
1476 : * Add an arbitrary @waiter to the wait queue for the nominated @folio.
1477 : */
1478 0 : void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter)
1479 : {
1480 0 : wait_queue_head_t *q = folio_waitqueue(folio);
1481 0 : unsigned long flags;
1482 :
1483 0 : spin_lock_irqsave(&q->lock, flags);
1484 0 : __add_wait_queue_entry_tail(q, waiter);
1485 0 : folio_set_waiters(folio);
1486 0 : spin_unlock_irqrestore(&q->lock, flags);
1487 0 : }
1488 : EXPORT_SYMBOL_GPL(folio_add_wait_queue);
1489 :
1490 : #ifndef clear_bit_unlock_is_negative_byte
1491 :
1492 : /*
1493 : * PG_waiters is the high bit in the same byte as PG_lock.
1494 : *
1495 : * On x86 (and on many other architectures), we can clear PG_lock and
1496 : * test the sign bit at the same time. But if the architecture does
1497 : * not support that special operation, we just do this all by hand
1498 : * instead.
1499 : *
1500 : * The read of PG_waiters has to be after (or concurrently with) PG_locked
1501 : * being cleared, but a memory barrier should be unnecessary since it is
1502 : * in the same byte as PG_locked.
1503 : */
1504 : static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1505 : {
1506 : clear_bit_unlock(nr, mem);
1507 : /* smp_mb__after_atomic(); */
1508 : return test_bit(PG_waiters, mem);
1509 : }
1510 :
1511 : #endif
1512 :
1513 : /**
1514 : * folio_unlock - Unlock a locked folio.
1515 : * @folio: The folio.
1516 : *
1517 : * Unlocks the folio and wakes up any thread sleeping on the page lock.
1518 : *
1519 : * Context: May be called from interrupt or process context. May not be
1520 : * called from NMI context.
1521 : */
1522 42752119952 : void folio_unlock(struct folio *folio)
1523 : {
1524 : /* Bit 7 allows x86 to check the byte's sign bit */
1525 42752119952 : BUILD_BUG_ON(PG_waiters != 7);
1526 42752119952 : BUILD_BUG_ON(PG_locked > 7);
1527 42752119952 : VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1528 85520903561 : if (clear_bit_unlock_is_negative_byte(PG_locked, folio_flags(folio, 0)))
1529 32466460 : folio_wake_bit(folio, PG_locked);
1530 42792113105 : }
1531 : EXPORT_SYMBOL(folio_unlock);
1532 :
1533 : /**
1534 : * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1535 : * @folio: The folio.
1536 : *
1537 : * Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for
1538 : * it. The folio reference held for PG_private_2 being set is released.
1539 : *
1540 : * This is, for example, used when a netfs folio is being written to a local
1541 : * disk cache, thereby allowing writes to the cache for the same folio to be
1542 : * serialised.
1543 : */
1544 0 : void folio_end_private_2(struct folio *folio)
1545 : {
1546 0 : VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);
1547 0 : clear_bit_unlock(PG_private_2, folio_flags(folio, 0));
1548 0 : folio_wake_bit(folio, PG_private_2);
1549 0 : folio_put(folio);
1550 0 : }
1551 : EXPORT_SYMBOL(folio_end_private_2);
1552 :
1553 : /**
1554 : * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1555 : * @folio: The folio to wait on.
1556 : *
1557 : * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio.
1558 : */
1559 0 : void folio_wait_private_2(struct folio *folio)
1560 : {
1561 0 : while (folio_test_private_2(folio))
1562 0 : folio_wait_bit(folio, PG_private_2);
1563 0 : }
1564 : EXPORT_SYMBOL(folio_wait_private_2);
1565 :
1566 : /**
1567 : * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1568 : * @folio: The folio to wait on.
1569 : *
1570 : * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a
1571 : * fatal signal is received by the calling task.
1572 : *
1573 : * Return:
1574 : * - 0 if successful.
1575 : * - -EINTR if a fatal signal was encountered.
1576 : */
1577 0 : int folio_wait_private_2_killable(struct folio *folio)
1578 : {
1579 0 : int ret = 0;
1580 :
1581 0 : while (folio_test_private_2(folio)) {
1582 0 : ret = folio_wait_bit_killable(folio, PG_private_2);
1583 0 : if (ret < 0)
1584 : break;
1585 : }
1586 :
1587 0 : return ret;
1588 : }
1589 : EXPORT_SYMBOL(folio_wait_private_2_killable);
1590 :
1591 : /**
1592 : * folio_end_writeback - End writeback against a folio.
1593 : * @folio: The folio.
1594 : */
1595 389527493 : void folio_end_writeback(struct folio *folio)
1596 : {
1597 : /*
1598 : * folio_test_clear_reclaim() could be used here but it is an
1599 : * atomic operation and overkill in this particular case. Failing
1600 : * to shuffle a folio marked for immediate reclaim is too mild
1601 : * a gain to justify taking an atomic operation penalty at the
1602 : * end of every folio writeback.
1603 : */
1604 389527493 : if (folio_test_reclaim(folio)) {
1605 548572 : folio_clear_reclaim(folio);
1606 548572 : folio_rotate_reclaimable(folio);
1607 : }
1608 :
1609 : /*
1610 : * Writeback does not hold a folio reference of its own, relying
1611 : * on truncation to wait for the clearing of PG_writeback.
1612 : * But here we must make sure that the folio is not freed and
1613 : * reused before the folio_wake().
1614 : */
1615 389526049 : folio_get(folio);
1616 389529053 : if (!__folio_end_writeback(folio))
1617 0 : BUG();
1618 :
1619 389530575 : smp_mb__after_atomic();
1620 389530575 : folio_wake(folio, PG_writeback);
1621 389530594 : acct_reclaim_writeback(folio);
1622 389530414 : folio_put(folio);
1623 389530859 : }
1624 : EXPORT_SYMBOL(folio_end_writeback);
1625 :
1626 : /**
1627 : * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1628 : * @folio: The folio to lock
1629 : */
1630 3721657 : void __folio_lock(struct folio *folio)
1631 : {
1632 4640302 : folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,
1633 : EXCLUSIVE);
1634 918614 : }
1635 : EXPORT_SYMBOL(__folio_lock);
1636 :
1637 0 : int __folio_lock_killable(struct folio *folio)
1638 : {
1639 0 : return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,
1640 : EXCLUSIVE);
1641 : }
1642 : EXPORT_SYMBOL_GPL(__folio_lock_killable);
1643 :
1644 0 : static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)
1645 : {
1646 0 : struct wait_queue_head *q = folio_waitqueue(folio);
1647 0 : int ret = 0;
1648 :
1649 0 : wait->folio = folio;
1650 0 : wait->bit_nr = PG_locked;
1651 :
1652 0 : spin_lock_irq(&q->lock);
1653 0 : __add_wait_queue_entry_tail(q, &wait->wait);
1654 0 : folio_set_waiters(folio);
1655 0 : ret = !folio_trylock(folio);
1656 : /*
1657 : * If we were successful now, we know we're still on the
1658 : * waitqueue as we're still under the lock. This means it's
1659 : * safe to remove and return success, we know the callback
1660 : * isn't going to trigger.
1661 : */
1662 0 : if (!ret)
1663 0 : __remove_wait_queue(q, &wait->wait);
1664 : else
1665 : ret = -EIOCBQUEUED;
1666 0 : spin_unlock_irq(&q->lock);
1667 0 : return ret;
1668 : }
1669 :
1670 : /*
1671 : * Return values:
1672 : * true - folio is locked; mmap_lock is still held.
1673 : * false - folio is not locked.
1674 : * mmap_lock has been released (mmap_read_unlock(), unless flags had both
1675 : * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1676 : * which case mmap_lock is still held.
1677 : *
1678 : * If neither ALLOW_RETRY nor KILLABLE are set, will always return true
1679 : * with the folio locked and the mmap_lock unperturbed.
1680 : */
1681 0 : bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
1682 : unsigned int flags)
1683 : {
1684 0 : if (fault_flag_allow_retry_first(flags)) {
1685 : /*
1686 : * CAUTION! In this case, mmap_lock is not released
1687 : * even though return 0.
1688 : */
1689 0 : if (flags & FAULT_FLAG_RETRY_NOWAIT)
1690 : return false;
1691 :
1692 0 : mmap_read_unlock(mm);
1693 0 : if (flags & FAULT_FLAG_KILLABLE)
1694 0 : folio_wait_locked_killable(folio);
1695 : else
1696 0 : folio_wait_locked(folio);
1697 0 : return false;
1698 : }
1699 0 : if (flags & FAULT_FLAG_KILLABLE) {
1700 0 : bool ret;
1701 :
1702 0 : ret = __folio_lock_killable(folio);
1703 0 : if (ret) {
1704 0 : mmap_read_unlock(mm);
1705 0 : return false;
1706 : }
1707 : } else {
1708 0 : __folio_lock(folio);
1709 : }
1710 :
1711 : return true;
1712 : }
1713 :
1714 : /**
1715 : * page_cache_next_miss() - Find the next gap in the page cache.
1716 : * @mapping: Mapping.
1717 : * @index: Index.
1718 : * @max_scan: Maximum range to search.
1719 : *
1720 : * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1721 : * gap with the lowest index.
1722 : *
1723 : * This function may be called under the rcu_read_lock. However, this will
1724 : * not atomically search a snapshot of the cache at a single point in time.
1725 : * For example, if a gap is created at index 5, then subsequently a gap is
1726 : * created at index 10, page_cache_next_miss covering both indices may
1727 : * return 10 if called under the rcu_read_lock.
1728 : *
1729 : * Return: The index of the gap if found, otherwise an index outside the
1730 : * range specified (in which case 'return - index >= max_scan' will be true).
1731 : * In the rare case of index wrap-around, 0 will be returned.
1732 : */
1733 657865 : pgoff_t page_cache_next_miss(struct address_space *mapping,
1734 : pgoff_t index, unsigned long max_scan)
1735 : {
1736 657865 : XA_STATE(xas, &mapping->i_pages, index);
1737 :
1738 112860133 : while (max_scan--) {
1739 112768773 : void *entry = xas_next(&xas);
1740 112768722 : if (!entry || xa_is_value(entry))
1741 : break;
1742 112202268 : if (xas.xa_index == 0)
1743 : break;
1744 : }
1745 :
1746 657814 : return xas.xa_index;
1747 : }
1748 : EXPORT_SYMBOL(page_cache_next_miss);
1749 :
1750 : /**
1751 : * page_cache_prev_miss() - Find the previous gap in the page cache.
1752 : * @mapping: Mapping.
1753 : * @index: Index.
1754 : * @max_scan: Maximum range to search.
1755 : *
1756 : * Search the range [max(index - max_scan + 1, 0), index] for the
1757 : * gap with the highest index.
1758 : *
1759 : * This function may be called under the rcu_read_lock. However, this will
1760 : * not atomically search a snapshot of the cache at a single point in time.
1761 : * For example, if a gap is created at index 10, then subsequently a gap is
1762 : * created at index 5, page_cache_prev_miss() covering both indices may
1763 : * return 5 if called under the rcu_read_lock.
1764 : *
1765 : * Return: The index of the gap if found, otherwise an index outside the
1766 : * range specified (in which case 'index - return >= max_scan' will be true).
1767 : * In the rare case of wrap-around, ULONG_MAX will be returned.
1768 : */
1769 7543672 : pgoff_t page_cache_prev_miss(struct address_space *mapping,
1770 : pgoff_t index, unsigned long max_scan)
1771 : {
1772 7543672 : XA_STATE(xas, &mapping->i_pages, index);
1773 :
1774 10290459 : while (max_scan--) {
1775 10289608 : void *entry = xas_prev(&xas);
1776 10289599 : if (!entry || xa_is_value(entry))
1777 : break;
1778 2746787 : if (xas.xa_index == ULONG_MAX)
1779 : break;
1780 : }
1781 :
1782 7543663 : return xas.xa_index;
1783 : }
1784 : EXPORT_SYMBOL(page_cache_prev_miss);
1785 :
1786 : /*
1787 : * Lockless page cache protocol:
1788 : * On the lookup side:
1789 : * 1. Load the folio from i_pages
1790 : * 2. Increment the refcount if it's not zero
1791 : * 3. If the folio is not found by xas_reload(), put the refcount and retry
1792 : *
1793 : * On the removal side:
1794 : * A. Freeze the page (by zeroing the refcount if nobody else has a reference)
1795 : * B. Remove the page from i_pages
1796 : * C. Return the page to the page allocator
1797 : *
1798 : * This means that any page may have its reference count temporarily
1799 : * increased by a speculative page cache (or fast GUP) lookup as it can
1800 : * be allocated by another user before the RCU grace period expires.
1801 : * Because the refcount temporarily acquired here may end up being the
1802 : * last refcount on the page, any page allocation must be freeable by
1803 : * folio_put().
1804 : */
1805 :
1806 : /*
1807 : * filemap_get_entry - Get a page cache entry.
1808 : * @mapping: the address_space to search
1809 : * @index: The page cache index.
1810 : *
1811 : * Looks up the page cache entry at @mapping & @index. If it is a folio,
1812 : * it is returned with an increased refcount. If it is a shadow entry
1813 : * of a previously evicted folio, or a swap entry from shmem/tmpfs,
1814 : * it is returned without further action.
1815 : *
1816 : * Return: The folio, swap or shadow entry, %NULL if nothing is found.
1817 : */
1818 5286714227 : void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
1819 : {
1820 5286714227 : XA_STATE(xas, &mapping->i_pages, index);
1821 5286714227 : struct folio *folio;
1822 :
1823 5286714227 : rcu_read_lock();
1824 : repeat:
1825 5284364066 : xas_reset(&xas);
1826 5284364066 : folio = xas_load(&xas);
1827 5287342604 : if (xas_retry(&xas, folio))
1828 0 : goto repeat;
1829 : /*
1830 : * A shadow entry of a recently evicted page, or a swap entry from
1831 : * shmem/tmpfs. Return it without attempting to raise page count.
1832 : */
1833 5287342604 : if (!folio || xa_is_value(folio))
1834 1828333082 : goto out;
1835 :
1836 3459009522 : if (!folio_try_get_rcu(folio))
1837 43115 : goto repeat;
1838 :
1839 3460111859 : if (unlikely(folio != xas_reload(&xas))) {
1840 38 : folio_put(folio);
1841 38 : goto repeat;
1842 : }
1843 3459694029 : out:
1844 5288027111 : rcu_read_unlock();
1845 :
1846 5284390413 : return folio;
1847 : }
1848 :
1849 : /**
1850 : * __filemap_get_folio - Find and get a reference to a folio.
1851 : * @mapping: The address_space to search.
1852 : * @index: The page index.
1853 : * @fgp_flags: %FGP flags modify how the folio is returned.
1854 : * @gfp: Memory allocation flags to use if %FGP_CREAT is specified.
1855 : *
1856 : * Looks up the page cache entry at @mapping & @index.
1857 : *
1858 : * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1859 : * if the %GFP flags specified for %FGP_CREAT are atomic.
1860 : *
1861 : * If this function returns a folio, it is returned with an increased refcount.
1862 : *
1863 : * Return: The found folio or an ERR_PTR() otherwise.
1864 : */
1865 4659729253 : struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
1866 : fgf_t fgp_flags, gfp_t gfp)
1867 : {
1868 4659731637 : struct folio *folio;
1869 :
1870 : repeat:
1871 4659731637 : folio = filemap_get_entry(mapping, index);
1872 4662151443 : if (xa_is_value(folio))
1873 : folio = NULL;
1874 4638148192 : if (!folio)
1875 1814679021 : goto no_page;
1876 :
1877 2847472422 : if (fgp_flags & FGP_LOCK) {
1878 364605791 : if (fgp_flags & FGP_NOWAIT) {
1879 0 : if (!folio_trylock(folio)) {
1880 0 : folio_put(folio);
1881 0 : return ERR_PTR(-EAGAIN);
1882 : }
1883 : } else {
1884 364605791 : folio_lock(folio);
1885 : }
1886 :
1887 : /* Has the page been truncated? */
1888 364589942 : if (unlikely(folio->mapping != mapping)) {
1889 11 : folio_unlock(folio);
1890 11 : folio_put(folio);
1891 11 : goto repeat;
1892 : }
1893 364589931 : VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
1894 : }
1895 :
1896 2847357351 : if (fgp_flags & FGP_ACCESSED)
1897 110696772 : folio_mark_accessed(folio);
1898 : else if (fgp_flags & FGP_WRITE) {
1899 : /* Clear idle flag for buffer write */
1900 : if (folio_test_idle(folio))
1901 : folio_clear_idle(folio);
1902 : }
1903 :
1904 2847383643 : if (fgp_flags & FGP_STABLE)
1905 283984462 : folio_wait_stable(folio);
1906 2563399181 : no_page:
1907 4661726834 : if (!folio && (fgp_flags & FGP_CREAT)) {
1908 330381996 : unsigned order = FGF_GET_ORDER(fgp_flags);
1909 330381996 : int err;
1910 :
1911 598183484 : if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1912 267807866 : gfp |= __GFP_WRITE;
1913 330264561 : if (fgp_flags & FGP_NOFS)
1914 232098137 : gfp &= ~__GFP_FS;
1915 330264561 : if (fgp_flags & FGP_NOWAIT) {
1916 2032 : gfp &= ~GFP_KERNEL;
1917 2032 : gfp |= GFP_NOWAIT | __GFP_NOWARN;
1918 : }
1919 330264561 : if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1920 0 : fgp_flags |= FGP_LOCK;
1921 :
1922 660529122 : if (!mapping_large_folio_support(mapping))
1923 : order = 0;
1924 232117159 : if (order > MAX_PAGECACHE_ORDER)
1925 : order = MAX_PAGECACHE_ORDER;
1926 : /* If we're not aligned, allocate a smaller folio */
1927 330264561 : if (index & ((1UL << order) - 1))
1928 45566674 : order = __ffs(index);
1929 :
1930 331137192 : do {
1931 331137192 : gfp_t alloc_gfp = gfp;
1932 :
1933 331137192 : err = -ENOMEM;
1934 331137192 : if (order == 1)
1935 : order = 0;
1936 305514085 : if (order > 0)
1937 41184085 : alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;
1938 331137192 : folio = filemap_alloc_folio(alloc_gfp, order);
1939 331235912 : if (!folio)
1940 100 : continue;
1941 :
1942 : /* Init accessed so avoid atomic mark_page_accessed later */
1943 331235812 : if (fgp_flags & FGP_ACCESSED)
1944 62494818 : __folio_set_referenced(folio);
1945 :
1946 331235630 : err = filemap_add_folio(mapping, folio, index, gfp);
1947 331340347 : if (!err)
1948 : break;
1949 804407 : folio_put(folio);
1950 804407 : folio = NULL;
1951 842192 : } while (order-- > 0);
1952 :
1953 330500931 : if (err == -EEXIST)
1954 2373 : goto repeat;
1955 330498558 : if (err)
1956 0 : return ERR_PTR(err);
1957 : /*
1958 : * filemap_add_folio locks the page, and for mmap
1959 : * we expect an unlocked page.
1960 : */
1961 330498558 : if (folio && (fgp_flags & FGP_FOR_MMAP))
1962 24497 : folio_unlock(folio);
1963 : }
1964 :
1965 4661843419 : if (!folio)
1966 1484238945 : return ERR_PTR(-ENOENT);
1967 : return folio;
1968 : }
1969 : EXPORT_SYMBOL(__filemap_get_folio);
1970 :
1971 4573427362 : static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
1972 : xa_mark_t mark)
1973 : {
1974 4573437583 : struct folio *folio;
1975 :
1976 : retry:
1977 4573437583 : if (mark == XA_PRESENT)
1978 3026433564 : folio = xas_find(xas, max);
1979 : else
1980 1547004019 : folio = xas_find_marked(xas, max, mark);
1981 :
1982 4573862669 : if (xas_retry(xas, folio))
1983 0 : goto retry;
1984 : /*
1985 : * A shadow entry of a recently evicted page, a swap
1986 : * entry from shmem/tmpfs or a DAX entry. Return it
1987 : * without attempting to raise page count.
1988 : */
1989 4573862669 : if (!folio || xa_is_value(folio))
1990 1545332190 : return folio;
1991 :
1992 3028530479 : if (!folio_try_get_rcu(folio))
1993 10183 : goto reset;
1994 :
1995 3028873430 : if (unlikely(folio != xas_reload(xas))) {
1996 38 : folio_put(folio);
1997 38 : goto reset;
1998 : }
1999 :
2000 : return folio;
2001 10221 : reset:
2002 10221 : xas_reset(xas);
2003 10221 : goto retry;
2004 : }
2005 :
2006 : /**
2007 : * find_get_entries - gang pagecache lookup
2008 : * @mapping: The address_space to search
2009 : * @start: The starting page cache index
2010 : * @end: The final page index (inclusive).
2011 : * @fbatch: Where the resulting entries are placed.
2012 : * @indices: The cache indices corresponding to the entries in @entries
2013 : *
2014 : * find_get_entries() will search for and return a batch of entries in
2015 : * the mapping. The entries are placed in @fbatch. find_get_entries()
2016 : * takes a reference on any actual folios it returns.
2017 : *
2018 : * The entries have ascending indexes. The indices may not be consecutive
2019 : * due to not-present entries or large folios.
2020 : *
2021 : * Any shadow entries of evicted folios, or swap entries from
2022 : * shmem/tmpfs, are included in the returned array.
2023 : *
2024 : * Return: The number of entries which were found.
2025 : */
2026 174670112 : unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
2027 : pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2028 : {
2029 174670112 : XA_STATE(xas, &mapping->i_pages, *start);
2030 174670112 : struct folio *folio;
2031 :
2032 174670112 : rcu_read_lock();
2033 228218117 : while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2034 55661944 : indices[fbatch->nr] = xas.xa_index;
2035 55661944 : if (!folio_batch_add(fbatch, folio))
2036 : break;
2037 : }
2038 174662341 : rcu_read_unlock();
2039 :
2040 174668392 : if (folio_batch_count(fbatch)) {
2041 9929967 : unsigned long nr = 1;
2042 9929967 : int idx = folio_batch_count(fbatch) - 1;
2043 :
2044 9929967 : folio = fbatch->folios[idx];
2045 9929967 : if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2046 9606548 : nr = folio_nr_pages(folio);
2047 9929889 : *start = indices[idx] + nr;
2048 : }
2049 174668314 : return folio_batch_count(fbatch);
2050 : }
2051 :
2052 : /**
2053 : * find_lock_entries - Find a batch of pagecache entries.
2054 : * @mapping: The address_space to search.
2055 : * @start: The starting page cache index.
2056 : * @end: The final page index (inclusive).
2057 : * @fbatch: Where the resulting entries are placed.
2058 : * @indices: The cache indices of the entries in @fbatch.
2059 : *
2060 : * find_lock_entries() will return a batch of entries from @mapping.
2061 : * Swap, shadow and DAX entries are included. Folios are returned
2062 : * locked and with an incremented refcount. Folios which are locked
2063 : * by somebody else or under writeback are skipped. Folios which are
2064 : * partially outside the range are not returned.
2065 : *
2066 : * The entries have ascending indexes. The indices may not be consecutive
2067 : * due to not-present entries, large folios, folios which could not be
2068 : * locked or folios under writeback.
2069 : *
2070 : * Return: The number of entries which were found.
2071 : */
2072 309207113 : unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
2073 : pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2074 2401609003 : {
2075 309207113 : XA_STATE(xas, &mapping->i_pages, *start);
2076 309207113 : struct folio *folio;
2077 :
2078 309207113 : rcu_read_lock();
2079 2734585573 : while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
2080 2547986697 : if (!xa_is_value(folio)) {
2081 2430108579 : if (folio->index < *start)
2082 4140212 : goto put;
2083 2425968367 : if (folio->index + folio_nr_pages(folio) - 1 > end)
2084 5947242 : goto put;
2085 2419875248 : if (!folio_trylock(folio))
2086 6773218 : goto put;
2087 4826282965 : if (folio->mapping != mapping ||
2088 : folio_test_writeback(folio))
2089 6920644 : goto unlock;
2090 2406184653 : VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
2091 : folio);
2092 : }
2093 2523770100 : indices[fbatch->nr] = xas.xa_index;
2094 2523770100 : if (!folio_batch_add(fbatch, folio))
2095 : break;
2096 2401609003 : continue;
2097 : unlock:
2098 6920644 : folio_unlock(folio);
2099 23788764 : put:
2100 23788764 : folio_put(folio);
2101 : }
2102 309212790 : rcu_read_unlock();
2103 :
2104 309209315 : if (folio_batch_count(fbatch)) {
2105 249107829 : unsigned long nr = 1;
2106 249107829 : int idx = folio_batch_count(fbatch) - 1;
2107 :
2108 249107829 : folio = fbatch->folios[idx];
2109 249107829 : if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
2110 240524657 : nr = folio_nr_pages(folio);
2111 249102174 : *start = indices[idx] + nr;
2112 : }
2113 309203660 : return folio_batch_count(fbatch);
2114 : }
2115 :
2116 : /**
2117 : * filemap_get_folios - Get a batch of folios
2118 : * @mapping: The address_space to search
2119 : * @start: The starting page index
2120 : * @end: The final page index (inclusive)
2121 : * @fbatch: The batch to fill.
2122 : *
2123 : * Search for and return a batch of folios in the mapping starting at
2124 : * index @start and up to index @end (inclusive). The folios are returned
2125 : * in @fbatch with an elevated reference count.
2126 : *
2127 : * The first folio may start before @start; if it does, it will contain
2128 : * @start. The final folio may extend beyond @end; if it does, it will
2129 : * contain @end. The folios have ascending indices. There may be gaps
2130 : * between the folios if there are indices which have no folio in the
2131 : * page cache. If folios are added to or removed from the page cache
2132 : * while this is running, they may or may not be found by this call.
2133 : *
2134 : * Return: The number of folios which were found.
2135 : * We also update @start to index the next folio for the traversal.
2136 : */
2137 33087166 : unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
2138 : pgoff_t end, struct folio_batch *fbatch)
2139 : {
2140 33087166 : XA_STATE(xas, &mapping->i_pages, *start);
2141 33087166 : struct folio *folio;
2142 :
2143 33087166 : rcu_read_lock();
2144 63593900 : while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
2145 : /* Skip over shadow, swap and DAX entries */
2146 32362326 : if (xa_is_value(folio))
2147 0 : continue;
2148 32362326 : if (!folio_batch_add(fbatch, folio)) {
2149 1797235 : unsigned long nr = folio_nr_pages(folio);
2150 :
2151 1797178 : if (folio_test_hugetlb(folio))
2152 0 : nr = 1;
2153 1797178 : *start = folio->index + nr;
2154 1797178 : goto out;
2155 : }
2156 : }
2157 :
2158 : /*
2159 : * We come here when there is no page beyond @end. We take care to not
2160 : * overflow the index @start as it confuses some of the callers. This
2161 : * breaks the iteration when there is a page at index -1 but that is
2162 : * already broken anyway.
2163 : */
2164 31262696 : if (end == (pgoff_t)-1)
2165 0 : *start = (pgoff_t)-1;
2166 : else
2167 31262696 : *start = end + 1;
2168 33059874 : out:
2169 33059874 : rcu_read_unlock();
2170 :
2171 33052687 : return folio_batch_count(fbatch);
2172 : }
2173 : EXPORT_SYMBOL(filemap_get_folios);
2174 :
2175 : static inline
2176 30759691868 : bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
2177 : {
2178 30759691868 : if (!folio_test_large(folio) || folio_test_hugetlb(folio))
2179 30735724750 : return false;
2180 11647647 : if (index >= max)
2181 : return false;
2182 10445890 : return index < folio->index + folio_nr_pages(folio) - 1;
2183 : }
2184 :
2185 : /**
2186 : * filemap_get_folios_contig - Get a batch of contiguous folios
2187 : * @mapping: The address_space to search
2188 : * @start: The starting page index
2189 : * @end: The final page index (inclusive)
2190 : * @fbatch: The batch to fill
2191 : *
2192 : * filemap_get_folios_contig() works exactly like filemap_get_folios(),
2193 : * except the returned folios are guaranteed to be contiguous. This may
2194 : * not return all contiguous folios if the batch gets filled up.
2195 : *
2196 : * Return: The number of folios found.
2197 : * Also update @start to be positioned for traversal of the next folio.
2198 : */
2199 :
2200 11184797 : unsigned filemap_get_folios_contig(struct address_space *mapping,
2201 : pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2202 103854002 : {
2203 11184797 : XA_STATE(xas, &mapping->i_pages, *start);
2204 11184797 : unsigned long nr;
2205 11184797 : struct folio *folio;
2206 :
2207 11184797 : rcu_read_lock();
2208 :
2209 115035555 : for (folio = xas_load(&xas); folio && xas.xa_index <= end;
2210 103854002 : folio = xas_next(&xas)) {
2211 110334820 : if (xas_retry(&xas, folio))
2212 0 : continue;
2213 : /*
2214 : * If the entry has been swapped out, we can stop looking.
2215 : * No current caller is looking for DAX entries.
2216 : */
2217 110334820 : if (xa_is_value(folio))
2218 0 : goto update_start;
2219 :
2220 110334820 : if (!folio_try_get_rcu(folio))
2221 0 : goto retry;
2222 :
2223 110337744 : if (unlikely(folio != xas_reload(&xas)))
2224 0 : goto put_folio;
2225 :
2226 110337833 : if (!folio_batch_add(fbatch, folio)) {
2227 6483831 : nr = folio_nr_pages(folio);
2228 :
2229 6483808 : if (folio_test_hugetlb(folio))
2230 0 : nr = 1;
2231 6483813 : *start = folio->index + nr;
2232 6483813 : goto out;
2233 : }
2234 103854002 : continue;
2235 : put_folio:
2236 0 : folio_put(folio);
2237 :
2238 0 : retry:
2239 0 : xas_reset(&xas);
2240 : }
2241 :
2242 4700827 : update_start:
2243 4700827 : nr = folio_batch_count(fbatch);
2244 :
2245 4700827 : if (nr) {
2246 4700815 : folio = fbatch->folios[nr - 1];
2247 4700815 : if (folio_test_hugetlb(folio))
2248 0 : *start = folio->index + 1;
2249 : else
2250 4700837 : *start = folio->index + folio_nr_pages(folio);
2251 : }
2252 12 : out:
2253 11184696 : rcu_read_unlock();
2254 11184689 : return folio_batch_count(fbatch);
2255 : }
2256 : EXPORT_SYMBOL(filemap_get_folios_contig);
2257 :
2258 : /**
2259 : * filemap_get_folios_tag - Get a batch of folios matching @tag
2260 : * @mapping: The address_space to search
2261 : * @start: The starting page index
2262 : * @end: The final page index (inclusive)
2263 : * @tag: The tag index
2264 : * @fbatch: The batch to fill
2265 : *
2266 : * Same as filemap_get_folios(), but only returning folios tagged with @tag.
2267 : *
2268 : * Return: The number of folios found.
2269 : * Also update @start to index the next folio for traversal.
2270 : */
2271 1050273115 : unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
2272 : pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2273 : {
2274 1050273115 : XA_STATE(xas, &mapping->i_pages, *start);
2275 1050273115 : struct folio *folio;
2276 :
2277 1050273115 : rcu_read_lock();
2278 1546912467 : while ((folio = find_get_entry(&xas, end, tag)) != NULL) {
2279 : /*
2280 : * Shadow entries should never be tagged, but this iteration
2281 : * is lockless so there is a window for page reclaim to evict
2282 : * a page we saw tagged. Skip over it.
2283 : */
2284 515174632 : if (xa_is_value(folio))
2285 0 : continue;
2286 515174632 : if (!folio_batch_add(fbatch, folio)) {
2287 17899293 : unsigned long nr = folio_nr_pages(folio);
2288 :
2289 17899120 : if (folio_test_hugetlb(folio))
2290 0 : nr = 1;
2291 17898995 : *start = folio->index + nr;
2292 17898995 : goto out;
2293 : }
2294 : }
2295 : /*
2296 : * We come here when there is no page beyond @end. We take care to not
2297 : * overflow the index @start as it confuses some of the callers. This
2298 : * breaks the iteration when there is a page at index -1 but that is
2299 : * already broke anyway.
2300 : */
2301 1032125072 : if (end == (pgoff_t)-1)
2302 39934199 : *start = (pgoff_t)-1;
2303 : else
2304 992190873 : *start = end + 1;
2305 1050024067 : out:
2306 1050024067 : rcu_read_unlock();
2307 :
2308 1049942185 : return folio_batch_count(fbatch);
2309 : }
2310 : EXPORT_SYMBOL(filemap_get_folios_tag);
2311 :
2312 : /*
2313 : * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2314 : * a _large_ part of the i/o request. Imagine the worst scenario:
2315 : *
2316 : * ---R__________________________________________B__________
2317 : * ^ reading here ^ bad block(assume 4k)
2318 : *
2319 : * read(R) => miss => readahead(R...B) => media error => frustrating retries
2320 : * => failing the whole request => read(R) => read(R+1) =>
2321 : * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2322 : * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2323 : * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2324 : *
2325 : * It is going insane. Fix it by quickly scaling down the readahead size.
2326 : */
2327 : static void shrink_readahead_size_eio(struct file_ra_state *ra)
2328 : {
2329 8144 : ra->ra_pages /= 4;
2330 8144 : }
2331 :
2332 : /*
2333 : * filemap_get_read_batch - Get a batch of folios for read
2334 : *
2335 : * Get a batch of folios which represent a contiguous range of bytes in
2336 : * the file. No exceptional entries will be returned. If @index is in
2337 : * the middle of a folio, the entire folio will be returned. The last
2338 : * folio in the batch may have the readahead flag set or the uptodate flag
2339 : * clear so that the caller can take the appropriate action.
2340 : */
2341 964514351 : static void filemap_get_read_batch(struct address_space *mapping,
2342 : pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2343 1331821955 : {
2344 964514351 : XA_STATE(xas, &mapping->i_pages, index);
2345 964514351 : struct folio *folio;
2346 :
2347 964514351 : rcu_read_lock();
2348 2295993965 : for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {
2349 2199344849 : if (xas_retry(&xas, folio))
2350 0 : continue;
2351 2199344849 : if (xas.xa_index > max || xa_is_value(folio))
2352 : break;
2353 1364493691 : if (xa_is_sibling(folio))
2354 : break;
2355 1364493691 : if (!folio_try_get_rcu(folio))
2356 203 : goto retry;
2357 :
2358 1366000599 : if (unlikely(folio != xas_reload(&xas)))
2359 114 : goto put_folio;
2360 :
2361 1365958887 : if (!folio_batch_add(fbatch, folio))
2362 : break;
2363 1346091578 : if (!folio_test_uptodate(folio))
2364 : break;
2365 1334646081 : if (folio_test_readahead(folio))
2366 : break;
2367 1333766822 : xas_advance(&xas, folio->index + folio_nr_pages(folio) - 1);
2368 1331821955 : continue;
2369 : put_folio:
2370 114 : folio_put(folio);
2371 317 : retry:
2372 317 : xas_reset(&xas);
2373 : }
2374 966510140 : rcu_read_unlock();
2375 966642398 : }
2376 :
2377 1212919573 : static int filemap_read_folio(struct file *file, filler_t filler,
2378 : struct folio *folio)
2379 : {
2380 1212919573 : bool workingset = folio_test_workingset(folio);
2381 1212285631 : unsigned long pflags;
2382 1212285631 : int error;
2383 :
2384 : /*
2385 : * A previous I/O error may have been due to temporary failures,
2386 : * eg. multipath errors. PG_error will be set again if read_folio
2387 : * fails.
2388 : */
2389 1212285631 : folio_clear_error(folio);
2390 :
2391 : /* Start the actual read. The read will unlock the page. */
2392 1212832453 : if (unlikely(workingset))
2393 1327470 : psi_memstall_enter(&pflags);
2394 1212832453 : error = filler(file, folio);
2395 1212819221 : if (unlikely(workingset))
2396 1327472 : psi_memstall_leave(&pflags);
2397 1212819221 : if (error)
2398 : return error;
2399 :
2400 1212842274 : error = folio_wait_locked_killable(folio);
2401 1212698367 : if (error)
2402 : return error;
2403 1212705019 : if (folio_test_uptodate(folio))
2404 : return 0;
2405 8144 : if (file)
2406 8144 : shrink_readahead_size_eio(&file->f_ra);
2407 : return -EIO;
2408 : }
2409 :
2410 61749 : static bool filemap_range_uptodate(struct address_space *mapping,
2411 : loff_t pos, size_t count, struct folio *folio,
2412 : bool need_uptodate)
2413 : {
2414 61749 : if (folio_test_uptodate(folio))
2415 : return true;
2416 : /* pipes can't handle partially uptodate pages */
2417 59576 : if (need_uptodate)
2418 : return false;
2419 46627 : if (!mapping->a_ops->is_partially_uptodate)
2420 : return false;
2421 19655 : if (mapping->host->i_blkbits >= folio_shift(folio))
2422 : return false;
2423 :
2424 56 : if (folio_pos(folio) > pos) {
2425 19 : count -= folio_pos(folio) - pos;
2426 19 : pos = 0;
2427 : } else {
2428 37 : pos -= folio_pos(folio);
2429 : }
2430 :
2431 56 : return mapping->a_ops->is_partially_uptodate(folio, pos, count);
2432 : }
2433 :
2434 11126587 : static int filemap_update_page(struct kiocb *iocb,
2435 : struct address_space *mapping, size_t count,
2436 : struct folio *folio, bool need_uptodate)
2437 : {
2438 11126587 : int error;
2439 :
2440 11126587 : if (iocb->ki_flags & IOCB_NOWAIT) {
2441 0 : if (!filemap_invalidate_trylock_shared(mapping))
2442 : return -EAGAIN;
2443 : } else {
2444 11126587 : filemap_invalidate_lock_shared(mapping);
2445 : }
2446 :
2447 11126766 : if (!folio_trylock(folio)) {
2448 11064974 : error = -EAGAIN;
2449 11064974 : if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))
2450 0 : goto unlock_mapping;
2451 11064974 : if (!(iocb->ki_flags & IOCB_WAITQ)) {
2452 11064974 : filemap_invalidate_unlock_shared(mapping);
2453 : /*
2454 : * This is where we usually end up waiting for a
2455 : * previously submitted readahead to finish.
2456 : */
2457 11064882 : folio_put_wait_locked(folio, TASK_KILLABLE);
2458 11064882 : return AOP_TRUNCATED_PAGE;
2459 : }
2460 0 : error = __folio_lock_async(folio, iocb->ki_waitq);
2461 0 : if (error)
2462 0 : goto unlock_mapping;
2463 : }
2464 :
2465 61759 : error = AOP_TRUNCATED_PAGE;
2466 61759 : if (!folio->mapping)
2467 10 : goto unlock;
2468 :
2469 61749 : error = 0;
2470 61749 : if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,
2471 : need_uptodate))
2472 2174 : goto unlock;
2473 :
2474 59575 : error = -EAGAIN;
2475 59575 : if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))
2476 0 : goto unlock;
2477 :
2478 59575 : error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,
2479 : folio);
2480 59575 : goto unlock_mapping;
2481 2184 : unlock:
2482 2184 : folio_unlock(folio);
2483 61759 : unlock_mapping:
2484 61759 : filemap_invalidate_unlock_shared(mapping);
2485 61759 : if (error == AOP_TRUNCATED_PAGE)
2486 10 : folio_put(folio);
2487 : return error;
2488 : }
2489 :
2490 152498 : static int filemap_create_folio(struct file *file,
2491 : struct address_space *mapping, pgoff_t index,
2492 : struct folio_batch *fbatch)
2493 : {
2494 152498 : struct folio *folio;
2495 152498 : int error;
2496 :
2497 152498 : folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0);
2498 152498 : if (!folio)
2499 : return -ENOMEM;
2500 :
2501 : /*
2502 : * Protect against truncate / hole punch. Grabbing invalidate_lock
2503 : * here assures we cannot instantiate and bring uptodate new
2504 : * pagecache folios after evicting page cache during truncate
2505 : * and before actually freeing blocks. Note that we could
2506 : * release invalidate_lock after inserting the folio into
2507 : * the page cache as the locked folio would then be enough to
2508 : * synchronize with hole punching. But there are code paths
2509 : * such as filemap_update_page() filling in partially uptodate
2510 : * pages or ->readahead() that need to hold invalidate_lock
2511 : * while mapping blocks for IO so let's hold the lock here as
2512 : * well to keep locking rules simple.
2513 : */
2514 152498 : filemap_invalidate_lock_shared(mapping);
2515 152498 : error = filemap_add_folio(mapping, folio, index,
2516 : mapping_gfp_constraint(mapping, GFP_KERNEL));
2517 152498 : if (error == -EEXIST)
2518 : error = AOP_TRUNCATED_PAGE;
2519 152495 : if (error)
2520 3 : goto error;
2521 :
2522 152495 : error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
2523 152495 : if (error)
2524 5 : goto error;
2525 :
2526 152490 : filemap_invalidate_unlock_shared(mapping);
2527 152490 : folio_batch_add(fbatch, folio);
2528 152490 : return 0;
2529 8 : error:
2530 8 : filemap_invalidate_unlock_shared(mapping);
2531 8 : folio_put(folio);
2532 8 : return error;
2533 : }
2534 :
2535 1125291 : static int filemap_readahead(struct kiocb *iocb, struct file *file,
2536 : struct address_space *mapping, struct folio *folio,
2537 : pgoff_t last_index)
2538 : {
2539 1125291 : DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);
2540 :
2541 1125291 : if (iocb->ki_flags & IOCB_NOIO)
2542 : return -EAGAIN;
2543 1125291 : page_cache_async_ra(&ractl, folio, last_index - folio->index);
2544 1125291 : return 0;
2545 : }
2546 :
2547 930039894 : static int filemap_get_pages(struct kiocb *iocb, size_t count,
2548 : struct folio_batch *fbatch, bool need_uptodate)
2549 : {
2550 930039894 : struct file *filp = iocb->ki_filp;
2551 930039894 : struct address_space *mapping = filp->f_mapping;
2552 930039894 : struct file_ra_state *ra = &filp->f_ra;
2553 930039894 : pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;
2554 930039894 : pgoff_t last_index;
2555 930039894 : struct folio *folio;
2556 930039894 : int err = 0;
2557 :
2558 : /* "last_index" is the index of the page beyond the end of the read */
2559 930039894 : last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE);
2560 : retry:
2561 939612534 : if (fatal_signal_pending(current))
2562 : return -EINTR;
2563 :
2564 937356069 : filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2565 939344276 : if (!folio_batch_count(fbatch)) {
2566 26840894 : if (iocb->ki_flags & IOCB_NOIO)
2567 : return -EAGAIN;
2568 26840894 : page_cache_sync_readahead(mapping, ra, filp, index,
2569 : last_index - index);
2570 26845742 : filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2571 : }
2572 939349361 : if (!folio_batch_count(fbatch)) {
2573 152498 : if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))
2574 : return -EAGAIN;
2575 152498 : err = filemap_create_folio(filp, mapping,
2576 152498 : iocb->ki_pos >> PAGE_SHIFT, fbatch);
2577 152498 : if (err == AOP_TRUNCATED_PAGE)
2578 3 : goto retry;
2579 152495 : return err;
2580 : }
2581 :
2582 939196863 : folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2583 939196863 : if (folio_test_readahead(folio)) {
2584 1125331 : err = filemap_readahead(iocb, filp, mapping, folio, last_index);
2585 1125324 : if (err)
2586 0 : goto err;
2587 : }
2588 938672448 : if (!folio_test_uptodate(folio)) {
2589 11126709 : if ((iocb->ki_flags & IOCB_WAITQ) &&
2590 : folio_batch_count(fbatch) > 1)
2591 0 : iocb->ki_flags |= IOCB_NOWAIT;
2592 11126709 : err = filemap_update_page(iocb, mapping, count, folio,
2593 : need_uptodate);
2594 11124314 : if (err)
2595 11068810 : goto err;
2596 : }
2597 :
2598 : return 0;
2599 11068810 : err:
2600 11068810 : if (err < 0)
2601 6245 : folio_put(folio);
2602 11068810 : if (likely(--fbatch->nr))
2603 : return 0;
2604 9578778 : if (err == AOP_TRUNCATED_PAGE)
2605 9572637 : goto retry;
2606 : return err;
2607 : }
2608 :
2609 : static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)
2610 : {
2611 913722570 : unsigned int shift = folio_shift(folio);
2612 :
2613 913404987 : return (pos1 >> shift == pos2 >> shift);
2614 : }
2615 :
2616 : /**
2617 : * filemap_read - Read data from the page cache.
2618 : * @iocb: The iocb to read.
2619 : * @iter: Destination for the data.
2620 : * @already_read: Number of bytes already read by the caller.
2621 : *
2622 : * Copies data from the page cache. If the data is not currently present,
2623 : * uses the readahead and read_folio address_space operations to fetch it.
2624 : *
2625 : * Return: Total number of bytes copied, including those already read by
2626 : * the caller. If an error happens before any bytes are copied, returns
2627 : * a negative error number.
2628 : */
2629 915894015 : ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,
2630 : ssize_t already_read)
2631 : {
2632 915894015 : struct file *filp = iocb->ki_filp;
2633 915894015 : struct file_ra_state *ra = &filp->f_ra;
2634 915894015 : struct address_space *mapping = filp->f_mapping;
2635 915894015 : struct inode *inode = mapping->host;
2636 915894015 : struct folio_batch fbatch;
2637 915894015 : int i, error = 0;
2638 915894015 : bool writably_mapped;
2639 915894015 : loff_t isize, end_offset;
2640 :
2641 915894015 : if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))
2642 : return 0;
2643 915894015 : if (unlikely(!iov_iter_count(iter)))
2644 : return 0;
2645 :
2646 915894015 : iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2647 915894015 : folio_batch_init(&fbatch);
2648 :
2649 936129336 : do {
2650 936129336 : cond_resched();
2651 :
2652 : /*
2653 : * If we've already successfully copied some data, then we
2654 : * can no longer safely return -EIOCBQUEUED. Hence mark
2655 : * an async read NOWAIT at that point.
2656 : */
2657 935505187 : if ((iocb->ki_flags & IOCB_WAITQ) && already_read)
2658 0 : iocb->ki_flags |= IOCB_NOWAIT;
2659 :
2660 935505187 : if (unlikely(iocb->ki_pos >= i_size_read(inode)))
2661 : break;
2662 :
2663 914998868 : error = filemap_get_pages(iocb, iter->count, &fbatch, false);
2664 913728736 : if (error < 0)
2665 : break;
2666 :
2667 : /*
2668 : * i_size must be checked after we know the pages are Uptodate.
2669 : *
2670 : * Checking i_size after the check allows us to calculate
2671 : * the correct value for "nr", which means the zero-filled
2672 : * part of the page is not copied back to userspace (unless
2673 : * another truncate extends the file - this is desired though).
2674 : */
2675 913722570 : isize = i_size_read(inode);
2676 913722570 : if (unlikely(iocb->ki_pos >= isize))
2677 0 : goto put_folios;
2678 913722570 : end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);
2679 :
2680 : /*
2681 : * Once we start copying data, we don't want to be touching any
2682 : * cachelines that might be contended:
2683 : */
2684 913722570 : writably_mapped = mapping_writably_mapped(mapping);
2685 :
2686 : /*
2687 : * When a read accesses the same folio several times, only
2688 : * mark it as accessed the first time.
2689 : */
2690 913722570 : if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1,
2691 : fbatch.folios[0]))
2692 423327947 : folio_mark_accessed(fbatch.folios[0]);
2693 :
2694 2173024147 : for (i = 0; i < folio_batch_count(&fbatch); i++) {
2695 1257848766 : struct folio *folio = fbatch.folios[i];
2696 1257848766 : size_t fsize = folio_size(folio);
2697 1257433825 : size_t offset = iocb->ki_pos & (fsize - 1);
2698 1257433825 : size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,
2699 : fsize - offset);
2700 1257433825 : size_t copied;
2701 :
2702 1257433825 : if (end_offset < folio_pos(folio))
2703 : break;
2704 1257433824 : if (i > 0)
2705 344365730 : folio_mark_accessed(folio);
2706 : /*
2707 : * If users can be writing to this folio using arbitrary
2708 : * virtual addresses, take care of potential aliasing
2709 : * before reading the folio on the kernel side.
2710 : */
2711 1257443422 : if (writably_mapped)
2712 : flush_dcache_folio(folio);
2713 :
2714 1257443422 : copied = copy_folio_to_iter(folio, offset, bytes, iter);
2715 :
2716 1259778909 : already_read += copied;
2717 1259778909 : iocb->ki_pos += copied;
2718 1259778909 : ra->prev_pos = iocb->ki_pos;
2719 :
2720 1259778909 : if (copied < bytes) {
2721 : error = -EFAULT;
2722 : break;
2723 : }
2724 : }
2725 915175382 : put_folios:
2726 2175447429 : for (i = 0; i < folio_batch_count(&fbatch); i++)
2727 1259398589 : folio_put(fbatch.folios[i]);
2728 916048840 : folio_batch_init(&fbatch);
2729 916048840 : } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);
2730 :
2731 916326004 : file_accessed(filp);
2732 :
2733 913495115 : return already_read ? already_read : error;
2734 : }
2735 : EXPORT_SYMBOL_GPL(filemap_read);
2736 :
2737 40691438 : int kiocb_write_and_wait(struct kiocb *iocb, size_t count)
2738 : {
2739 40691438 : struct address_space *mapping = iocb->ki_filp->f_mapping;
2740 40691438 : loff_t pos = iocb->ki_pos;
2741 40691438 : loff_t end = pos + count - 1;
2742 :
2743 40691438 : if (iocb->ki_flags & IOCB_NOWAIT) {
2744 0 : if (filemap_range_needs_writeback(mapping, pos, end))
2745 : return -EAGAIN;
2746 0 : return 0;
2747 : }
2748 :
2749 40691438 : return filemap_write_and_wait_range(mapping, pos, end);
2750 : }
2751 :
2752 30516062 : int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)
2753 : {
2754 30516062 : struct address_space *mapping = iocb->ki_filp->f_mapping;
2755 30516062 : loff_t pos = iocb->ki_pos;
2756 30516062 : loff_t end = pos + count - 1;
2757 30516062 : int ret;
2758 :
2759 30516062 : if (iocb->ki_flags & IOCB_NOWAIT) {
2760 : /* we could block if there are any pages in the range */
2761 5 : if (filemap_range_has_page(mapping, pos, end))
2762 : return -EAGAIN;
2763 : } else {
2764 30516057 : ret = filemap_write_and_wait_range(mapping, pos, end);
2765 30519120 : if (ret)
2766 : return ret;
2767 : }
2768 :
2769 : /*
2770 : * After a write we want buffered reads to be sure to go to disk to get
2771 : * the new data. We invalidate clean cached page from the region we're
2772 : * about to write. We do this *before* the write so that we can return
2773 : * without clobbering -EIOCBQUEUED from ->direct_IO().
2774 : */
2775 30519054 : return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
2776 30519054 : end >> PAGE_SHIFT);
2777 : }
2778 :
2779 : /**
2780 : * generic_file_read_iter - generic filesystem read routine
2781 : * @iocb: kernel I/O control block
2782 : * @iter: destination for the data read
2783 : *
2784 : * This is the "read_iter()" routine for all filesystems
2785 : * that can use the page cache directly.
2786 : *
2787 : * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2788 : * be returned when no data can be read without waiting for I/O requests
2789 : * to complete; it doesn't prevent readahead.
2790 : *
2791 : * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2792 : * requests shall be made for the read or for readahead. When no data
2793 : * can be read, -EAGAIN shall be returned. When readahead would be
2794 : * triggered, a partial, possibly empty read shall be returned.
2795 : *
2796 : * Return:
2797 : * * number of bytes copied, even for partial reads
2798 : * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2799 : */
2800 : ssize_t
2801 785009267 : generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2802 : {
2803 785009267 : size_t count = iov_iter_count(iter);
2804 785009267 : ssize_t retval = 0;
2805 :
2806 785009267 : if (!count)
2807 : return 0; /* skip atime */
2808 :
2809 785008856 : if (iocb->ki_flags & IOCB_DIRECT) {
2810 0 : struct file *file = iocb->ki_filp;
2811 0 : struct address_space *mapping = file->f_mapping;
2812 0 : struct inode *inode = mapping->host;
2813 :
2814 0 : retval = kiocb_write_and_wait(iocb, count);
2815 0 : if (retval < 0)
2816 : return retval;
2817 0 : file_accessed(file);
2818 :
2819 0 : retval = mapping->a_ops->direct_IO(iocb, iter);
2820 0 : if (retval >= 0) {
2821 0 : iocb->ki_pos += retval;
2822 0 : count -= retval;
2823 : }
2824 0 : if (retval != -EIOCBQUEUED)
2825 0 : iov_iter_revert(iter, count - iov_iter_count(iter));
2826 :
2827 : /*
2828 : * Btrfs can have a short DIO read if we encounter
2829 : * compressed extents, so if there was an error, or if
2830 : * we've already read everything we wanted to, or if
2831 : * there was a short read because we hit EOF, go ahead
2832 : * and return. Otherwise fallthrough to buffered io for
2833 : * the rest of the read. Buffered reads will not work for
2834 : * DAX files, so don't bother trying.
2835 : */
2836 0 : if (retval < 0 || !count || IS_DAX(inode))
2837 : return retval;
2838 0 : if (iocb->ki_pos >= i_size_read(inode))
2839 : return retval;
2840 : }
2841 :
2842 785008856 : return filemap_read(iocb, iter, retval);
2843 : }
2844 : EXPORT_SYMBOL(generic_file_read_iter);
2845 :
2846 : /*
2847 : * Splice subpages from a folio into a pipe.
2848 : */
2849 91861058 : size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,
2850 : struct folio *folio, loff_t fpos, size_t size)
2851 : {
2852 91861058 : struct page *page;
2853 91861058 : size_t spliced = 0, offset = offset_in_folio(folio, fpos);
2854 :
2855 91859975 : page = folio_page(folio, offset / PAGE_SIZE);
2856 91859975 : size = min(size, folio_size(folio) - offset);
2857 91859630 : offset %= PAGE_SIZE;
2858 :
2859 204130935 : while (spliced < size &&
2860 112697899 : !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) {
2861 112270361 : struct pipe_buffer *buf = pipe_head_buf(pipe);
2862 112270361 : size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);
2863 :
2864 112270361 : *buf = (struct pipe_buffer) {
2865 : .ops = &page_cache_pipe_buf_ops,
2866 : .page = page,
2867 : .offset = offset,
2868 : .len = part,
2869 : };
2870 112270361 : folio_get(folio);
2871 112271305 : pipe->head++;
2872 112271305 : page++;
2873 112271305 : spliced += part;
2874 112271305 : offset = 0;
2875 : }
2876 :
2877 91860574 : return spliced;
2878 : }
2879 :
2880 : /**
2881 : * filemap_splice_read - Splice data from a file's pagecache into a pipe
2882 : * @in: The file to read from
2883 : * @ppos: Pointer to the file position to read from
2884 : * @pipe: The pipe to splice into
2885 : * @len: The amount to splice
2886 : * @flags: The SPLICE_F_* flags
2887 : *
2888 : * This function gets folios from a file's pagecache and splices them into the
2889 : * pipe. Readahead will be called as necessary to fill more folios. This may
2890 : * be used for blockdevs also.
2891 : *
2892 : * Return: On success, the number of bytes read will be returned and *@ppos
2893 : * will be updated if appropriate; 0 will be returned if there is no more data
2894 : * to be read; -EAGAIN will be returned if the pipe had no space, and some
2895 : * other negative error code will be returned on error. A short read may occur
2896 : * if the pipe has insufficient space, we reach the end of the data or we hit a
2897 : * hole.
2898 : */
2899 10344127 : ssize_t filemap_splice_read(struct file *in, loff_t *ppos,
2900 : struct pipe_inode_info *pipe,
2901 : size_t len, unsigned int flags)
2902 : {
2903 10344127 : struct folio_batch fbatch;
2904 10344127 : struct kiocb iocb;
2905 10344127 : size_t total_spliced = 0, used, npages;
2906 10344127 : loff_t isize, end_offset;
2907 10344127 : bool writably_mapped;
2908 10344127 : int i, error = 0;
2909 :
2910 10344127 : if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))
2911 : return 0;
2912 :
2913 10344127 : init_sync_kiocb(&iocb, in);
2914 10344219 : iocb.ki_pos = *ppos;
2915 :
2916 : /* Work out how much data we can actually add into the pipe */
2917 10344219 : used = pipe_occupancy(pipe->head, pipe->tail);
2918 10344219 : npages = max_t(ssize_t, pipe->max_usage - used, 0);
2919 10344219 : len = min_t(size_t, len, npages * PAGE_SIZE);
2920 :
2921 10344219 : folio_batch_init(&fbatch);
2922 :
2923 15884933 : do {
2924 15884933 : cond_resched();
2925 :
2926 15884953 : if (*ppos >= i_size_read(in->f_mapping->host))
2927 : break;
2928 :
2929 15884953 : iocb.ki_pos = *ppos;
2930 15884953 : error = filemap_get_pages(&iocb, len, &fbatch, true);
2931 15885131 : if (error < 0)
2932 : break;
2933 :
2934 : /*
2935 : * i_size must be checked after we know the pages are Uptodate.
2936 : *
2937 : * Checking i_size after the check allows us to calculate
2938 : * the correct value for "nr", which means the zero-filled
2939 : * part of the page is not copied back to userspace (unless
2940 : * another truncate extends the file - this is desired though).
2941 : */
2942 15884785 : isize = i_size_read(in->f_mapping->host);
2943 15884785 : if (unlikely(*ppos >= isize))
2944 : break;
2945 15884785 : end_offset = min_t(loff_t, isize, *ppos + len);
2946 :
2947 : /*
2948 : * Once we start copying data, we don't want to be touching any
2949 : * cachelines that might be contended:
2950 : */
2951 15884785 : writably_mapped = mapping_writably_mapped(in->f_mapping);
2952 :
2953 103964510 : for (i = 0; i < folio_batch_count(&fbatch); i++) {
2954 91860637 : struct folio *folio = fbatch.folios[i];
2955 91860637 : size_t n;
2956 :
2957 91860637 : if (folio_pos(folio) >= end_offset)
2958 0 : goto out;
2959 91860637 : folio_mark_accessed(folio);
2960 :
2961 : /*
2962 : * If users can be writing to this folio using arbitrary
2963 : * virtual addresses, take care of potential aliasing
2964 : * before reading the folio on the kernel side.
2965 : */
2966 91861233 : if (writably_mapped)
2967 : flush_dcache_folio(folio);
2968 :
2969 91861233 : n = min_t(loff_t, len, isize - *ppos);
2970 91861233 : n = splice_folio_into_pipe(pipe, folio, *ppos, n);
2971 91860688 : if (!n)
2972 0 : goto out;
2973 91860688 : len -= n;
2974 91860688 : total_spliced += n;
2975 91860688 : *ppos += n;
2976 91860688 : in->f_ra.prev_pos = *ppos;
2977 91860688 : if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
2978 3780963 : goto out;
2979 : }
2980 :
2981 12103873 : folio_batch_release(&fbatch);
2982 12103907 : } while (len);
2983 :
2984 6563539 : out:
2985 10344502 : folio_batch_release(&fbatch);
2986 10344506 : file_accessed(in);
2987 :
2988 10344147 : return total_spliced ? total_spliced : error;
2989 : }
2990 : EXPORT_SYMBOL(filemap_splice_read);
2991 :
2992 241157 : static inline loff_t folio_seek_hole_data(struct xa_state *xas,
2993 : struct address_space *mapping, struct folio *folio,
2994 : loff_t start, loff_t end, bool seek_data)
2995 : {
2996 241157 : const struct address_space_operations *ops = mapping->a_ops;
2997 241157 : size_t offset, bsz = i_blocksize(mapping->host);
2998 :
2999 241157 : if (xa_is_value(folio) || folio_test_uptodate(folio))
3000 212531 : return seek_data ? start : end;
3001 28626 : if (!ops->is_partially_uptodate)
3002 0 : return seek_data ? end : start;
3003 :
3004 28626 : xas_pause(xas);
3005 28626 : rcu_read_unlock();
3006 28626 : folio_lock(folio);
3007 28626 : if (unlikely(folio->mapping != mapping))
3008 0 : goto unlock;
3009 :
3010 28626 : offset = offset_in_folio(folio, start) & ~(bsz - 1);
3011 :
3012 46539 : do {
3013 46539 : if (ops->is_partially_uptodate(folio, offset, bsz) ==
3014 : seek_data)
3015 : break;
3016 46539 : start = (start + bsz) & ~(bsz - 1);
3017 46539 : offset += bsz;
3018 46539 : } while (offset < folio_size(folio));
3019 28626 : unlock:
3020 28626 : folio_unlock(folio);
3021 28626 : rcu_read_lock();
3022 28626 : return start;
3023 : }
3024 :
3025 241158 : static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
3026 : {
3027 241158 : if (xa_is_value(folio))
3028 2258 : return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
3029 238900 : return folio_size(folio);
3030 : }
3031 :
3032 : /**
3033 : * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3034 : * @mapping: Address space to search.
3035 : * @start: First byte to consider.
3036 : * @end: Limit of search (exclusive).
3037 : * @whence: Either SEEK_HOLE or SEEK_DATA.
3038 : *
3039 : * If the page cache knows which blocks contain holes and which blocks
3040 : * contain data, your filesystem can use this function to implement
3041 : * SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are
3042 : * entirely memory-based such as tmpfs, and filesystems which support
3043 : * unwritten extents.
3044 : *
3045 : * Return: The requested offset on success, or -ENXIO if @whence specifies
3046 : * SEEK_DATA and there is no data after @start. There is an implicit hole
3047 : * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3048 : * and @end contain data.
3049 : */
3050 77391 : loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
3051 : loff_t end, int whence)
3052 : {
3053 77391 : XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
3054 77391 : pgoff_t max = (end - 1) >> PAGE_SHIFT;
3055 77391 : bool seek_data = (whence == SEEK_DATA);
3056 77391 : struct folio *folio;
3057 :
3058 77391 : if (end <= start)
3059 : return -ENXIO;
3060 :
3061 77391 : rcu_read_lock();
3062 275474 : while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
3063 241448 : loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
3064 241448 : size_t seek_size;
3065 :
3066 241448 : if (start < pos) {
3067 5452 : if (!seek_data)
3068 290 : goto unlock;
3069 : start = pos;
3070 : }
3071 :
3072 241158 : seek_size = seek_folio_size(&xas, folio);
3073 241157 : pos = round_up((u64)pos + 1, seek_size);
3074 241157 : start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
3075 : seek_data);
3076 241158 : if (start < pos)
3077 17476 : goto unlock;
3078 223682 : if (start >= end)
3079 : break;
3080 198084 : if (seek_size > PAGE_SIZE)
3081 4062 : xas_set(&xas, pos >> PAGE_SHIFT);
3082 198084 : if (!xa_is_value(folio))
3083 197957 : folio_put(folio);
3084 : }
3085 59625 : if (seek_data)
3086 55481 : start = -ENXIO;
3087 4144 : unlock:
3088 77391 : rcu_read_unlock();
3089 77390 : if (folio && !xa_is_value(folio))
3090 41231 : folio_put(folio);
3091 77390 : if (start > end)
3092 : return end;
3093 : return start;
3094 : }
3095 :
3096 : #ifdef CONFIG_MMU
3097 : #define MMAP_LOTSAMISS (100)
3098 : /*
3099 : * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3100 : * @vmf - the vm_fault for this fault.
3101 : * @folio - the folio to lock.
3102 : * @fpin - the pointer to the file we may pin (or is already pinned).
3103 : *
3104 : * This works similar to lock_folio_or_retry in that it can drop the
3105 : * mmap_lock. It differs in that it actually returns the folio locked
3106 : * if it returns 1 and 0 if it couldn't lock the folio. If we did have
3107 : * to drop the mmap_lock then fpin will point to the pinned file and
3108 : * needs to be fput()'ed at a later point.
3109 : */
3110 430282417 : static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,
3111 : struct file **fpin)
3112 : {
3113 430282417 : if (folio_trylock(folio))
3114 : return 1;
3115 :
3116 : /*
3117 : * NOTE! This will make us return with VM_FAULT_RETRY, but with
3118 : * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
3119 : * is supposed to work. We have way too many special cases..
3120 : */
3121 2591972 : if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
3122 : return 0;
3123 :
3124 2591974 : *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
3125 2591945 : if (vmf->flags & FAULT_FLAG_KILLABLE) {
3126 2591945 : if (__folio_lock_killable(folio)) {
3127 : /*
3128 : * We didn't have the right flags to drop the mmap_lock,
3129 : * but all fault_handlers only check for fatal signals
3130 : * if we return VM_FAULT_RETRY, so we need to drop the
3131 : * mmap_lock here and return 0 if we don't have a fpin.
3132 : */
3133 175 : if (*fpin == NULL)
3134 0 : mmap_read_unlock(vmf->vma->vm_mm);
3135 175 : return 0;
3136 : }
3137 : } else
3138 0 : __folio_lock(folio);
3139 :
3140 : return 1;
3141 : }
3142 :
3143 : /*
3144 : * Synchronous readahead happens when we don't even find a page in the page
3145 : * cache at all. We don't want to perform IO under the mmap sem, so if we have
3146 : * to drop the mmap sem we return the file that was pinned in order for us to do
3147 : * that. If we didn't pin a file then we return NULL. The file that is
3148 : * returned needs to be fput()'ed when we're done with it.
3149 : */
3150 4468065 : static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
3151 : {
3152 4468065 : struct file *file = vmf->vma->vm_file;
3153 4468065 : struct file_ra_state *ra = &file->f_ra;
3154 4468065 : struct address_space *mapping = file->f_mapping;
3155 4468065 : DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);
3156 4468065 : struct file *fpin = NULL;
3157 4468065 : unsigned long vm_flags = vmf->vma->vm_flags;
3158 4468065 : unsigned int mmap_miss;
3159 :
3160 : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3161 : /* Use the readahead code, even if readahead is disabled */
3162 4468065 : if (vm_flags & VM_HUGEPAGE) {
3163 0 : fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3164 0 : ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);
3165 0 : ra->size = HPAGE_PMD_NR;
3166 : /*
3167 : * Fetch two PMD folios, so we get the chance to actually
3168 : * readahead, unless we've been told not to.
3169 : */
3170 0 : if (!(vm_flags & VM_RAND_READ))
3171 0 : ra->size *= 2;
3172 0 : ra->async_size = HPAGE_PMD_NR;
3173 0 : page_cache_ra_order(&ractl, ra, HPAGE_PMD_ORDER);
3174 0 : return fpin;
3175 : }
3176 : #endif
3177 :
3178 : /* If we don't want any read-ahead, don't bother */
3179 4468065 : if (vm_flags & VM_RAND_READ)
3180 : return fpin;
3181 4449535 : if (!ra->ra_pages)
3182 : return fpin;
3183 :
3184 4449535 : if (vm_flags & VM_SEQ_READ) {
3185 0 : fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3186 0 : page_cache_sync_ra(&ractl, ra->ra_pages);
3187 0 : return fpin;
3188 : }
3189 :
3190 : /* Avoid banging the cache line if not needed */
3191 4449535 : mmap_miss = READ_ONCE(ra->mmap_miss);
3192 4449535 : if (mmap_miss < MMAP_LOTSAMISS * 10)
3193 4449580 : WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
3194 :
3195 : /*
3196 : * Do we miss much more than hit in this file? If so,
3197 : * stop bothering with read-ahead. It will only hurt.
3198 : */
3199 4449535 : if (mmap_miss > MMAP_LOTSAMISS)
3200 : return fpin;
3201 :
3202 : /*
3203 : * mmap read-around
3204 : */
3205 4449535 : fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3206 4449595 : ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
3207 4449595 : ra->size = ra->ra_pages;
3208 4449595 : ra->async_size = ra->ra_pages / 4;
3209 4449595 : ractl._index = ra->start;
3210 4449595 : page_cache_ra_order(&ractl, ra, 0);
3211 4449595 : return fpin;
3212 : }
3213 :
3214 : /*
3215 : * Asynchronous readahead happens when we find the page and PG_readahead,
3216 : * so we want to possibly extend the readahead further. We return the file that
3217 : * was pinned if we have to drop the mmap_lock in order to do IO.
3218 : */
3219 421201530 : static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
3220 : struct folio *folio)
3221 : {
3222 421201530 : struct file *file = vmf->vma->vm_file;
3223 421201530 : struct file_ra_state *ra = &file->f_ra;
3224 421201530 : DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);
3225 421201530 : struct file *fpin = NULL;
3226 421201530 : unsigned int mmap_miss;
3227 :
3228 : /* If we don't want any read-ahead, don't bother */
3229 421201530 : if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
3230 : return fpin;
3231 :
3232 421174078 : mmap_miss = READ_ONCE(ra->mmap_miss);
3233 421174078 : if (mmap_miss)
3234 3150001 : WRITE_ONCE(ra->mmap_miss, --mmap_miss);
3235 :
3236 421174078 : if (folio_test_readahead(folio)) {
3237 347270 : fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3238 347347 : page_cache_async_ra(&ractl, folio, ra->ra_pages);
3239 : }
3240 : return fpin;
3241 : }
3242 :
3243 : /**
3244 : * filemap_fault - read in file data for page fault handling
3245 : * @vmf: struct vm_fault containing details of the fault
3246 : *
3247 : * filemap_fault() is invoked via the vma operations vector for a
3248 : * mapped memory region to read in file data during a page fault.
3249 : *
3250 : * The goto's are kind of ugly, but this streamlines the normal case of having
3251 : * it in the page cache, and handles the special cases reasonably without
3252 : * having a lot of duplicated code.
3253 : *
3254 : * vma->vm_mm->mmap_lock must be held on entry.
3255 : *
3256 : * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
3257 : * may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().
3258 : *
3259 : * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
3260 : * has not been released.
3261 : *
3262 : * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
3263 : *
3264 : * Return: bitwise-OR of %VM_FAULT_ codes.
3265 : */
3266 430393028 : vm_fault_t filemap_fault(struct vm_fault *vmf)
3267 : {
3268 430393028 : int error;
3269 430393028 : struct file *file = vmf->vma->vm_file;
3270 430393028 : struct file *fpin = NULL;
3271 430393028 : struct address_space *mapping = file->f_mapping;
3272 430393028 : struct inode *inode = mapping->host;
3273 430393028 : pgoff_t max_idx, index = vmf->pgoff;
3274 430393028 : struct folio *folio;
3275 430393028 : vm_fault_t ret = 0;
3276 430393028 : bool mapping_locked = false;
3277 :
3278 430393028 : max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3279 430393028 : if (unlikely(index >= max_idx))
3280 : return VM_FAULT_SIGBUS;
3281 :
3282 : /*
3283 : * Do we have something in the page cache already?
3284 : */
3285 430389187 : folio = filemap_get_folio(mapping, index);
3286 430604934 : if (likely(!IS_ERR(folio))) {
3287 : /*
3288 : * We found the page, so try async readahead before waiting for
3289 : * the lock.
3290 : */
3291 426137226 : if (!(vmf->flags & FAULT_FLAG_TRIED))
3292 421437010 : fpin = do_async_mmap_readahead(vmf, folio);
3293 425831973 : if (unlikely(!folio_test_uptodate(folio))) {
3294 515069 : filemap_invalidate_lock_shared(mapping);
3295 515069 : mapping_locked = true;
3296 : }
3297 : } else {
3298 : /* No page in the page cache at all */
3299 4467708 : count_vm_event(PGMAJFAULT);
3300 4467917 : count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
3301 4468153 : ret = VM_FAULT_MAJOR;
3302 4468153 : fpin = do_sync_mmap_readahead(vmf);
3303 4473943 : retry_find:
3304 : /*
3305 : * See comment in filemap_create_folio() why we need
3306 : * invalidate_lock
3307 : */
3308 4473943 : if (!mapping_locked) {
3309 4473364 : filemap_invalidate_lock_shared(mapping);
3310 4473364 : mapping_locked = true;
3311 : }
3312 4473964 : folio = __filemap_get_folio(mapping, index,
3313 : FGP_CREAT|FGP_FOR_MMAP,
3314 4473964 : vmf->gfp_mask);
3315 4473951 : if (IS_ERR(folio)) {
3316 0 : if (fpin)
3317 0 : goto out_retry;
3318 0 : filemap_invalidate_unlock_shared(mapping);
3319 0 : return VM_FAULT_OOM;
3320 : }
3321 : }
3322 :
3323 430293607 : if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
3324 175 : goto out_retry;
3325 :
3326 : /* Did it get truncated? */
3327 430442696 : if (unlikely(folio->mapping != mapping)) {
3328 4997 : folio_unlock(folio);
3329 4997 : folio_put(folio);
3330 4997 : goto retry_find;
3331 : }
3332 430437699 : VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);
3333 :
3334 : /*
3335 : * We have a locked page in the page cache, now we need to check
3336 : * that it's up-to-date. If not, it is going to be due to an error.
3337 : */
3338 430389853 : if (unlikely(!folio_test_uptodate(folio))) {
3339 : /*
3340 : * The page was in cache and uptodate and now it is not.
3341 : * Strange but possible since we didn't hold the page lock all
3342 : * the time. Let's drop everything get the invalidate lock and
3343 : * try again.
3344 : */
3345 66641 : if (!mapping_locked) {
3346 0 : folio_unlock(folio);
3347 0 : folio_put(folio);
3348 0 : goto retry_find;
3349 : }
3350 66641 : goto page_not_uptodate;
3351 : }
3352 :
3353 : /*
3354 : * We've made it this far and we had to drop our mmap_lock, now is the
3355 : * time to return to the upper layer and have it re-find the vma and
3356 : * redo the fault.
3357 : */
3358 430327830 : if (fpin) {
3359 6591290 : folio_unlock(folio);
3360 6591294 : goto out_retry;
3361 : }
3362 423736540 : if (mapping_locked)
3363 508 : filemap_invalidate_unlock_shared(mapping);
3364 :
3365 : /*
3366 : * Found the page and have a reference on it.
3367 : * We must recheck i_size under page lock.
3368 : */
3369 423736540 : max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3370 423736540 : if (unlikely(index >= max_idx)) {
3371 2 : folio_unlock(folio);
3372 2 : folio_put(folio);
3373 2 : return VM_FAULT_SIGBUS;
3374 : }
3375 :
3376 423736538 : vmf->page = folio_file_page(folio, index);
3377 423690349 : return ret | VM_FAULT_LOCKED;
3378 :
3379 : page_not_uptodate:
3380 : /*
3381 : * Umm, take care of errors if the page isn't up-to-date.
3382 : * Try to re-read it _once_. We do this synchronously,
3383 : * because there really aren't any performance issues here
3384 : * and we need to check for errors.
3385 : */
3386 66641 : fpin = maybe_unlock_mmap_for_io(vmf, fpin);
3387 66664 : error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);
3388 66681 : if (fpin)
3389 66055 : goto out_retry;
3390 626 : folio_put(folio);
3391 :
3392 626 : if (!error || error == AOP_TRUNCATED_PAGE)
3393 524 : goto retry_find;
3394 102 : filemap_invalidate_unlock_shared(mapping);
3395 :
3396 102 : return VM_FAULT_SIGBUS;
3397 :
3398 6657524 : out_retry:
3399 : /*
3400 : * We dropped the mmap_lock, we need to return to the fault handler to
3401 : * re-find the vma and come back and find our hopefully still populated
3402 : * page.
3403 : */
3404 6657524 : if (!IS_ERR(folio))
3405 6657524 : folio_put(folio);
3406 6657541 : if (mapping_locked)
3407 4987945 : filemap_invalidate_unlock_shared(mapping);
3408 6657535 : if (fpin)
3409 6657535 : fput(fpin);
3410 6657564 : return ret | VM_FAULT_RETRY;
3411 : }
3412 : EXPORT_SYMBOL(filemap_fault);
3413 :
3414 2344287988 : static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,
3415 : pgoff_t start)
3416 : {
3417 2344287988 : struct mm_struct *mm = vmf->vma->vm_mm;
3418 :
3419 : /* Huge page is mapped? No need to proceed. */
3420 2344287988 : if (pmd_trans_huge(*vmf->pmd)) {
3421 0 : folio_unlock(folio);
3422 0 : folio_put(folio);
3423 0 : return true;
3424 : }
3425 :
3426 2418299318 : if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {
3427 36684 : struct page *page = folio_file_page(folio, start);
3428 36684 : vm_fault_t ret = do_set_pmd(vmf, page);
3429 36696 : if (!ret) {
3430 : /* The page is mapped successfully, reference consumed. */
3431 36696 : folio_unlock(folio);
3432 36696 : return true;
3433 : }
3434 : }
3435 :
3436 2344250132 : if (pmd_none(*vmf->pmd))
3437 73970896 : pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);
3438 :
3439 : return false;
3440 : }
3441 :
3442 33319404164 : static struct folio *next_uptodate_page(struct folio *folio,
3443 : struct address_space *mapping,
3444 : struct xa_state *xas, pgoff_t end_pgoff)
3445 : {
3446 36372925303 : unsigned long max_idx;
3447 :
3448 36372925303 : do {
3449 36372925303 : if (!folio)
3450 : return NULL;
3451 34046070341 : if (xas_retry(xas, folio))
3452 0 : continue;
3453 34046070341 : if (xa_is_value(folio))
3454 141990820 : continue;
3455 33904079521 : if (folio_test_locked(folio))
3456 9420054 : continue;
3457 33861123472 : if (!folio_try_get_rcu(folio))
3458 0 : continue;
3459 : /* Has the page moved or been split? */
3460 33897795310 : if (unlikely(folio != xas_reload(xas)))
3461 0 : goto skip;
3462 67757767767 : if (!folio_test_uptodate(folio) || folio_test_readahead(folio))
3463 3145521960 : goto skip;
3464 30730646467 : if (!folio_trylock(folio))
3465 6255439 : goto skip;
3466 30730723582 : if (folio->mapping != mapping)
3467 0 : goto unlock;
3468 30730723582 : if (!folio_test_uptodate(folio))
3469 0 : goto unlock;
3470 30715747745 : max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
3471 30715747745 : if (xas->xa_index >= max_idx)
3472 0 : goto unlock;
3473 : return folio;
3474 0 : unlock:
3475 0 : folio_unlock(folio);
3476 3151777399 : skip:
3477 3151777399 : folio_put(folio);
3478 3304373957 : } while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);
3479 :
3480 : return NULL;
3481 : }
3482 :
3483 2577543637 : static inline struct folio *first_map_page(struct address_space *mapping,
3484 : struct xa_state *xas,
3485 : pgoff_t end_pgoff)
3486 : {
3487 2577543637 : return next_uptodate_page(xas_find(xas, end_pgoff),
3488 : mapping, xas, end_pgoff);
3489 : }
3490 :
3491 30753417102 : static inline struct folio *next_map_page(struct address_space *mapping,
3492 : struct xa_state *xas,
3493 : pgoff_t end_pgoff)
3494 : {
3495 30753417102 : return next_uptodate_page(xas_next_entry(xas, end_pgoff),
3496 : mapping, xas, end_pgoff);
3497 : }
3498 :
3499 2578000599 : vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3500 : pgoff_t start_pgoff, pgoff_t end_pgoff)
3501 29007647836 : {
3502 2578000599 : struct vm_area_struct *vma = vmf->vma;
3503 2578000599 : struct file *file = vma->vm_file;
3504 2578000599 : struct address_space *mapping = file->f_mapping;
3505 2578000599 : pgoff_t last_pgoff = start_pgoff;
3506 2578000599 : unsigned long addr;
3507 2578000599 : XA_STATE(xas, &mapping->i_pages, start_pgoff);
3508 2578000599 : struct folio *folio;
3509 2578000599 : struct page *page;
3510 2578000599 : unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3511 2578000599 : vm_fault_t ret = 0;
3512 :
3513 2578000599 : rcu_read_lock();
3514 2577514624 : folio = first_map_page(mapping, &xas, end_pgoff);
3515 2577699265 : if (!folio)
3516 233350724 : goto out;
3517 :
3518 2344348541 : if (filemap_map_pmd(vmf, folio, start_pgoff)) {
3519 36701 : ret = VM_FAULT_NOPAGE;
3520 36701 : goto out;
3521 : }
3522 :
3523 2344167936 : addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3524 2344167936 : vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);
3525 2344717803 : if (!vmf->pte) {
3526 0 : folio_unlock(folio);
3527 0 : folio_put(folio);
3528 0 : goto out;
3529 : }
3530 30732553192 : do {
3531 30724138686 : again:
3532 30732553192 : page = folio_file_page(folio, xas.xa_index);
3533 30719264272 : if (PageHWPoison(page))
3534 0 : goto unlock;
3535 :
3536 30719264272 : if (mmap_miss > 0)
3537 1206781 : mmap_miss--;
3538 :
3539 30719264272 : addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3540 30719264272 : vmf->pte += xas.xa_index - last_pgoff;
3541 30719264272 : last_pgoff = xas.xa_index;
3542 :
3543 : /*
3544 : * NOTE: If there're PTE markers, we'll leave them to be
3545 : * handled in the specific fault path, and it'll prohibit the
3546 : * fault-around logic.
3547 : */
3548 30719264272 : if (!pte_none(ptep_get(vmf->pte)))
3549 1758072757 : goto unlock;
3550 :
3551 : /* We're about to handle the fault */
3552 28961191515 : if (vmf->address == addr)
3553 2342785823 : ret = VM_FAULT_NOPAGE;
3554 :
3555 28961191515 : do_set_pte(vmf, page, addr);
3556 : /* no need to invalidate: a not-present page won't be cached */
3557 29014905308 : update_mmu_cache(vma, addr, vmf->pte);
3558 29014905308 : if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3559 7208587 : xas.xa_index++;
3560 7208587 : folio_ref_inc(folio);
3561 7209761 : goto again;
3562 : }
3563 28989348051 : folio_unlock(folio);
3564 29007647836 : continue;
3565 1758072757 : unlock:
3566 1758072757 : if (folio_more_pages(folio, xas.xa_index, end_pgoff)) {
3567 1204745 : xas.xa_index++;
3568 1204745 : goto again;
3569 : }
3570 1756850990 : folio_unlock(folio);
3571 1756979019 : folio_put(folio);
3572 30764605129 : } while ((folio = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3573 2344525653 : pte_unmap_unlock(vmf->pte, vmf->ptl);
3574 2578075721 : out:
3575 2578075721 : rcu_read_unlock();
3576 2577960910 : WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3577 2577960910 : return ret;
3578 : }
3579 : EXPORT_SYMBOL(filemap_map_pages);
3580 :
3581 2219 : vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3582 : {
3583 2219 : struct address_space *mapping = vmf->vma->vm_file->f_mapping;
3584 2219 : struct folio *folio = page_folio(vmf->page);
3585 2219 : vm_fault_t ret = VM_FAULT_LOCKED;
3586 :
3587 2219 : sb_start_pagefault(mapping->host->i_sb);
3588 2219 : file_update_time(vmf->vma->vm_file);
3589 2219 : folio_lock(folio);
3590 2219 : if (folio->mapping != mapping) {
3591 0 : folio_unlock(folio);
3592 0 : ret = VM_FAULT_NOPAGE;
3593 0 : goto out;
3594 : }
3595 : /*
3596 : * We mark the folio dirty already here so that when freeze is in
3597 : * progress, we are guaranteed that writeback during freezing will
3598 : * see the dirty folio and writeprotect it again.
3599 : */
3600 2219 : folio_mark_dirty(folio);
3601 2219 : folio_wait_stable(folio);
3602 2219 : out:
3603 2219 : sb_end_pagefault(mapping->host->i_sb);
3604 2219 : return ret;
3605 : }
3606 :
3607 : const struct vm_operations_struct generic_file_vm_ops = {
3608 : .fault = filemap_fault,
3609 : .map_pages = filemap_map_pages,
3610 : .page_mkwrite = filemap_page_mkwrite,
3611 : };
3612 :
3613 : /* This is used for a general mmap of a disk file */
3614 :
3615 642674361 : int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3616 : {
3617 642674361 : struct address_space *mapping = file->f_mapping;
3618 :
3619 642674361 : if (!mapping->a_ops->read_folio)
3620 : return -ENOEXEC;
3621 642674361 : file_accessed(file);
3622 642594418 : vma->vm_ops = &generic_file_vm_ops;
3623 642594418 : return 0;
3624 : }
3625 :
3626 : /*
3627 : * This is for filesystems which do not implement ->writepage.
3628 : */
3629 0 : int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3630 : {
3631 0 : if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3632 : return -EINVAL;
3633 0 : return generic_file_mmap(file, vma);
3634 : }
3635 : #else
3636 : vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3637 : {
3638 : return VM_FAULT_SIGBUS;
3639 : }
3640 : int generic_file_mmap(struct file *file, struct vm_area_struct *vma)
3641 : {
3642 : return -ENOSYS;
3643 : }
3644 : int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3645 : {
3646 : return -ENOSYS;
3647 : }
3648 : #endif /* CONFIG_MMU */
3649 :
3650 : EXPORT_SYMBOL(filemap_page_mkwrite);
3651 : EXPORT_SYMBOL(generic_file_mmap);
3652 : EXPORT_SYMBOL(generic_file_readonly_mmap);
3653 :
3654 2845385318 : static struct folio *do_read_cache_folio(struct address_space *mapping,
3655 : pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)
3656 : {
3657 2845385318 : struct folio *folio;
3658 2845385318 : int err;
3659 :
3660 2845385318 : if (!filler)
3661 2834437180 : filler = mapping->a_ops->read_folio;
3662 2845385318 : repeat:
3663 2845409019 : folio = filemap_get_folio(mapping, index);
3664 2845929425 : if (IS_ERR(folio)) {
3665 1212093205 : folio = filemap_alloc_folio(gfp, 0);
3666 1212342995 : if (!folio)
3667 : return ERR_PTR(-ENOMEM);
3668 1212342995 : err = filemap_add_folio(mapping, folio, index, gfp);
3669 1212926790 : if (unlikely(err)) {
3670 92 : folio_put(folio);
3671 92 : if (err == -EEXIST)
3672 92 : goto repeat;
3673 : /* Presumably ENOMEM for xarray node */
3674 0 : return ERR_PTR(err);
3675 : }
3676 :
3677 1212926698 : goto filler;
3678 : }
3679 1633836220 : if (folio_test_uptodate(folio))
3680 1633432896 : goto out;
3681 :
3682 24395 : if (!folio_trylock(folio)) {
3683 23608 : folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);
3684 23609 : goto repeat;
3685 : }
3686 :
3687 : /* Folio was truncated from mapping */
3688 787 : if (!folio->mapping) {
3689 0 : folio_unlock(folio);
3690 0 : folio_put(folio);
3691 0 : goto repeat;
3692 : }
3693 :
3694 : /* Someone else locked and filled the page in a very small window */
3695 787 : if (folio_test_uptodate(folio)) {
3696 0 : folio_unlock(folio);
3697 0 : goto out;
3698 : }
3699 :
3700 787 : filler:
3701 1212927485 : err = filemap_read_folio(file, filler, folio);
3702 1212222422 : if (err) {
3703 2100 : folio_put(folio);
3704 2101 : if (err == AOP_TRUNCATED_PAGE)
3705 0 : goto repeat;
3706 2101 : return ERR_PTR(err);
3707 : }
3708 :
3709 1212220322 : out:
3710 2845653218 : folio_mark_accessed(folio);
3711 2845653218 : return folio;
3712 : }
3713 :
3714 : /**
3715 : * read_cache_folio - Read into page cache, fill it if needed.
3716 : * @mapping: The address_space to read from.
3717 : * @index: The index to read.
3718 : * @filler: Function to perform the read, or NULL to use aops->read_folio().
3719 : * @file: Passed to filler function, may be NULL if not required.
3720 : *
3721 : * Read one page into the page cache. If it succeeds, the folio returned
3722 : * will contain @index, but it may not be the first page of the folio.
3723 : *
3724 : * If the filler function returns an error, it will be returned to the
3725 : * caller.
3726 : *
3727 : * Context: May sleep. Expects mapping->invalidate_lock to be held.
3728 : * Return: An uptodate folio on success, ERR_PTR() on failure.
3729 : */
3730 2834454582 : struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,
3731 : filler_t filler, struct file *file)
3732 : {
3733 2834454582 : return do_read_cache_folio(mapping, index, filler, file,
3734 : mapping_gfp_mask(mapping));
3735 : }
3736 : EXPORT_SYMBOL(read_cache_folio);
3737 :
3738 : /**
3739 : * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3740 : * @mapping: The address_space for the folio.
3741 : * @index: The index that the allocated folio will contain.
3742 : * @gfp: The page allocator flags to use if allocating.
3743 : *
3744 : * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3745 : * any new memory allocations done using the specified allocation flags.
3746 : *
3747 : * The most likely error from this function is EIO, but ENOMEM is
3748 : * possible and so is EINTR. If ->read_folio returns another error,
3749 : * that will be returned to the caller.
3750 : *
3751 : * The function expects mapping->invalidate_lock to be already held.
3752 : *
3753 : * Return: Uptodate folio on success, ERR_PTR() on failure.
3754 : */
3755 0 : struct folio *mapping_read_folio_gfp(struct address_space *mapping,
3756 : pgoff_t index, gfp_t gfp)
3757 : {
3758 0 : return do_read_cache_folio(mapping, index, NULL, NULL, gfp);
3759 : }
3760 : EXPORT_SYMBOL(mapping_read_folio_gfp);
3761 :
3762 10997319 : static struct page *do_read_cache_page(struct address_space *mapping,
3763 : pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)
3764 : {
3765 10997319 : struct folio *folio;
3766 :
3767 10997319 : folio = do_read_cache_folio(mapping, index, filler, file, gfp);
3768 10997014 : if (IS_ERR(folio))
3769 50 : return &folio->page;
3770 10996964 : return folio_file_page(folio, index);
3771 : }
3772 :
3773 10984331 : struct page *read_cache_page(struct address_space *mapping,
3774 : pgoff_t index, filler_t *filler, struct file *file)
3775 : {
3776 10984331 : return do_read_cache_page(mapping, index, filler, file,
3777 : mapping_gfp_mask(mapping));
3778 : }
3779 : EXPORT_SYMBOL(read_cache_page);
3780 :
3781 : /**
3782 : * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3783 : * @mapping: the page's address_space
3784 : * @index: the page index
3785 : * @gfp: the page allocator flags to use if allocating
3786 : *
3787 : * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3788 : * any new page allocations done using the specified allocation flags.
3789 : *
3790 : * If the page does not get brought uptodate, return -EIO.
3791 : *
3792 : * The function expects mapping->invalidate_lock to be already held.
3793 : *
3794 : * Return: up to date page on success, ERR_PTR() on failure.
3795 : */
3796 13271 : struct page *read_cache_page_gfp(struct address_space *mapping,
3797 : pgoff_t index,
3798 : gfp_t gfp)
3799 : {
3800 13271 : return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3801 : }
3802 : EXPORT_SYMBOL(read_cache_page_gfp);
3803 :
3804 : /*
3805 : * Warn about a page cache invalidation failure during a direct I/O write.
3806 : */
3807 3644 : static void dio_warn_stale_pagecache(struct file *filp)
3808 : {
3809 3644 : static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3810 3644 : char pathname[128];
3811 3644 : char *path;
3812 :
3813 3644 : errseq_set(&filp->f_mapping->wb_err, -EIO);
3814 3644 : if (__ratelimit(&_rs)) {
3815 130 : path = file_path(filp, pathname, sizeof(pathname));
3816 130 : if (IS_ERR(path))
3817 0 : path = "(unknown)";
3818 130 : pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");
3819 130 : pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3820 : current->comm);
3821 : }
3822 3644 : }
3823 :
3824 23356700 : void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count)
3825 : {
3826 23356700 : struct address_space *mapping = iocb->ki_filp->f_mapping;
3827 :
3828 32501397 : if (mapping->nrpages &&
3829 9144867 : invalidate_inode_pages2_range(mapping,
3830 9144867 : iocb->ki_pos >> PAGE_SHIFT,
3831 9144867 : (iocb->ki_pos + count - 1) >> PAGE_SHIFT))
3832 3644 : dio_warn_stale_pagecache(iocb->ki_filp);
3833 23356530 : }
3834 :
3835 : ssize_t
3836 5227783 : generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3837 : {
3838 5227783 : struct address_space *mapping = iocb->ki_filp->f_mapping;
3839 5227783 : size_t write_len = iov_iter_count(from);
3840 5227783 : ssize_t written;
3841 :
3842 : /*
3843 : * If a page can not be invalidated, return 0 to fall back
3844 : * to buffered write.
3845 : */
3846 5227783 : written = kiocb_invalidate_pages(iocb, write_len);
3847 5227783 : if (written) {
3848 0 : if (written == -EBUSY)
3849 : return 0;
3850 0 : return written;
3851 : }
3852 :
3853 5227783 : written = mapping->a_ops->direct_IO(iocb, from);
3854 :
3855 : /*
3856 : * Finally, try again to invalidate clean pages which might have been
3857 : * cached by non-direct readahead, or faulted in by get_user_pages()
3858 : * if the source of the write was an mmap'ed region of the file
3859 : * we're writing. Either one is a pretty crazy thing to do,
3860 : * so we don't support it 100%. If this invalidation
3861 : * fails, tough, the write still worked...
3862 : *
3863 : * Most of the time we do not need this since dio_complete() will do
3864 : * the invalidation for us. However there are some file systems that
3865 : * do not end up with dio_complete() being called, so let's not break
3866 : * them by removing it completely.
3867 : *
3868 : * Noticeable example is a blkdev_direct_IO().
3869 : *
3870 : * Skip invalidation for async writes or if mapping has no pages.
3871 : */
3872 5227783 : if (written > 0) {
3873 5219414 : struct inode *inode = mapping->host;
3874 5219414 : loff_t pos = iocb->ki_pos;
3875 :
3876 5219414 : kiocb_invalidate_post_direct_write(iocb, written);
3877 5219414 : pos += written;
3878 5219414 : write_len -= written;
3879 5219414 : if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3880 0 : i_size_write(inode, pos);
3881 0 : mark_inode_dirty(inode);
3882 : }
3883 5219414 : iocb->ki_pos = pos;
3884 : }
3885 5227783 : if (written != -EIOCBQUEUED)
3886 5219425 : iov_iter_revert(from, write_len - iov_iter_count(from));
3887 : return written;
3888 : }
3889 : EXPORT_SYMBOL(generic_file_direct_write);
3890 :
3891 341770316 : ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)
3892 : {
3893 341770316 : struct file *file = iocb->ki_filp;
3894 341770316 : loff_t pos = iocb->ki_pos;
3895 341770316 : struct address_space *mapping = file->f_mapping;
3896 341770316 : const struct address_space_operations *a_ops = mapping->a_ops;
3897 341770316 : long status = 0;
3898 341770316 : ssize_t written = 0;
3899 :
3900 442406866 : do {
3901 442406866 : struct page *page;
3902 442406866 : unsigned long offset; /* Offset into pagecache page */
3903 442406866 : unsigned long bytes; /* Bytes to write to page */
3904 442406866 : size_t copied; /* Bytes copied from user */
3905 442406866 : void *fsdata = NULL;
3906 :
3907 442406866 : offset = (pos & (PAGE_SIZE - 1));
3908 442406866 : bytes = min_t(unsigned long, PAGE_SIZE - offset,
3909 : iov_iter_count(i));
3910 :
3911 442406866 : again:
3912 : /*
3913 : * Bring in the user page that we will copy from _first_.
3914 : * Otherwise there's a nasty deadlock on copying from the
3915 : * same page as we're writing to, without it being marked
3916 : * up-to-date.
3917 : */
3918 442406866 : if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
3919 : status = -EFAULT;
3920 43335 : break;
3921 : }
3922 :
3923 442391352 : if (fatal_signal_pending(current)) {
3924 : status = -EINTR;
3925 : break;
3926 : }
3927 :
3928 442334905 : status = a_ops->write_begin(file, mapping, pos, bytes,
3929 : &page, &fsdata);
3930 442404740 : if (unlikely(status < 0))
3931 : break;
3932 :
3933 442365137 : if (mapping_writably_mapped(mapping))
3934 : flush_dcache_page(page);
3935 :
3936 442365137 : copied = copy_page_from_iter_atomic(page, offset, bytes, i);
3937 442445182 : flush_dcache_page(page);
3938 :
3939 442445182 : status = a_ops->write_end(file, mapping, pos, bytes, copied,
3940 : page, fsdata);
3941 442454088 : if (unlikely(status != copied)) {
3942 0 : iov_iter_revert(i, copied - max(status, 0L));
3943 0 : if (unlikely(status < 0))
3944 : break;
3945 : }
3946 442454088 : cond_resched();
3947 :
3948 442439828 : if (unlikely(status == 0)) {
3949 : /*
3950 : * A short copy made ->write_end() reject the
3951 : * thing entirely. Might be memory poisoning
3952 : * halfway through, might be a race with munmap,
3953 : * might be severe memory pressure.
3954 : */
3955 0 : if (copied)
3956 0 : bytes = copied;
3957 0 : goto again;
3958 : }
3959 442439828 : pos += status;
3960 442439828 : written += status;
3961 :
3962 442439828 : balance_dirty_pages_ratelimited(mapping);
3963 442399598 : } while (iov_iter_count(i));
3964 :
3965 341806380 : if (!written)
3966 : return status;
3967 341774819 : iocb->ki_pos += written;
3968 341774819 : return written;
3969 : }
3970 : EXPORT_SYMBOL(generic_perform_write);
3971 :
3972 : /**
3973 : * __generic_file_write_iter - write data to a file
3974 : * @iocb: IO state structure (file, offset, etc.)
3975 : * @from: iov_iter with data to write
3976 : *
3977 : * This function does all the work needed for actually writing data to a
3978 : * file. It does all basic checks, removes SUID from the file, updates
3979 : * modification times and calls proper subroutines depending on whether we
3980 : * do direct IO or a standard buffered write.
3981 : *
3982 : * It expects i_rwsem to be grabbed unless we work on a block device or similar
3983 : * object which does not need locking at all.
3984 : *
3985 : * This function does *not* take care of syncing data in case of O_SYNC write.
3986 : * A caller has to handle it. This is mainly due to the fact that we want to
3987 : * avoid syncing under i_rwsem.
3988 : *
3989 : * Return:
3990 : * * number of bytes written, even for truncated writes
3991 : * * negative error code if no data has been written at all
3992 : */
3993 331472159 : ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3994 : {
3995 331472159 : struct file *file = iocb->ki_filp;
3996 331472159 : struct address_space *mapping = file->f_mapping;
3997 331472159 : struct inode *inode = mapping->host;
3998 331472159 : ssize_t ret;
3999 :
4000 331472159 : ret = file_remove_privs(file);
4001 331471979 : if (ret)
4002 : return ret;
4003 :
4004 331471966 : ret = file_update_time(file);
4005 331471702 : if (ret)
4006 : return ret;
4007 :
4008 331471702 : if (iocb->ki_flags & IOCB_DIRECT) {
4009 5227783 : ret = generic_file_direct_write(iocb, from);
4010 : /*
4011 : * If the write stopped short of completing, fall back to
4012 : * buffered writes. Some filesystems do this for writes to
4013 : * holes, for example. For DAX files, a buffered write will
4014 : * not succeed (even if it did, DAX does not handle dirty
4015 : * page-cache pages correctly).
4016 : */
4017 5227783 : if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode))
4018 : return ret;
4019 0 : return direct_write_fallback(iocb, from, ret,
4020 : generic_perform_write(iocb, from));
4021 : }
4022 :
4023 326243919 : return generic_perform_write(iocb, from);
4024 : }
4025 : EXPORT_SYMBOL(__generic_file_write_iter);
4026 :
4027 : /**
4028 : * generic_file_write_iter - write data to a file
4029 : * @iocb: IO state structure
4030 : * @from: iov_iter with data to write
4031 : *
4032 : * This is a wrapper around __generic_file_write_iter() to be used by most
4033 : * filesystems. It takes care of syncing the file in case of O_SYNC file
4034 : * and acquires i_rwsem as needed.
4035 : * Return:
4036 : * * negative error code if no data has been written at all of
4037 : * vfs_fsync_range() failed for a synchronous write
4038 : * * number of bytes written, even for truncated writes
4039 : */
4040 312460396 : ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4041 : {
4042 312460396 : struct file *file = iocb->ki_filp;
4043 312460396 : struct inode *inode = file->f_mapping->host;
4044 312460396 : ssize_t ret;
4045 :
4046 312460396 : inode_lock(inode);
4047 312460515 : ret = generic_write_checks(iocb, from);
4048 312460458 : if (ret > 0)
4049 312460461 : ret = __generic_file_write_iter(iocb, from);
4050 312460420 : inode_unlock(inode);
4051 :
4052 312460519 : if (ret > 0)
4053 312460508 : ret = generic_write_sync(iocb, ret);
4054 312460573 : return ret;
4055 : }
4056 : EXPORT_SYMBOL(generic_file_write_iter);
4057 :
4058 : /**
4059 : * filemap_release_folio() - Release fs-specific metadata on a folio.
4060 : * @folio: The folio which the kernel is trying to free.
4061 : * @gfp: Memory allocation flags (and I/O mode).
4062 : *
4063 : * The address_space is trying to release any data attached to a folio
4064 : * (presumably at folio->private).
4065 : *
4066 : * This will also be called if the private_2 flag is set on a page,
4067 : * indicating that the folio has other metadata associated with it.
4068 : *
4069 : * The @gfp argument specifies whether I/O may be performed to release
4070 : * this page (__GFP_IO), and whether the call may block
4071 : * (__GFP_RECLAIM & __GFP_FS).
4072 : *
4073 : * Return: %true if the release was successful, otherwise %false.
4074 : */
4075 94293219 : bool filemap_release_folio(struct folio *folio, gfp_t gfp)
4076 : {
4077 94293219 : struct address_space * const mapping = folio->mapping;
4078 :
4079 94293219 : BUG_ON(!folio_test_locked(folio));
4080 94290657 : if (folio_test_writeback(folio))
4081 : return false;
4082 :
4083 94291175 : if (mapping && mapping->a_ops->release_folio)
4084 81973570 : return mapping->a_ops->release_folio(folio, gfp);
4085 12317605 : return try_to_free_buffers(folio);
4086 : }
4087 : EXPORT_SYMBOL(filemap_release_folio);
4088 :
4089 : #ifdef CONFIG_CACHESTAT_SYSCALL
4090 : /**
4091 : * filemap_cachestat() - compute the page cache statistics of a mapping
4092 : * @mapping: The mapping to compute the statistics for.
4093 : * @first_index: The starting page cache index.
4094 : * @last_index: The final page index (inclusive).
4095 : * @cs: the cachestat struct to write the result to.
4096 : *
4097 : * This will query the page cache statistics of a mapping in the
4098 : * page range of [first_index, last_index] (inclusive). The statistics
4099 : * queried include: number of dirty pages, number of pages marked for
4100 : * writeback, and the number of (recently) evicted pages.
4101 : */
4102 0 : static void filemap_cachestat(struct address_space *mapping,
4103 : pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)
4104 : {
4105 0 : XA_STATE(xas, &mapping->i_pages, first_index);
4106 0 : struct folio *folio;
4107 :
4108 0 : rcu_read_lock();
4109 0 : xas_for_each(&xas, folio, last_index) {
4110 0 : unsigned long nr_pages;
4111 0 : pgoff_t folio_first_index, folio_last_index;
4112 :
4113 0 : if (xas_retry(&xas, folio))
4114 0 : continue;
4115 :
4116 0 : if (xa_is_value(folio)) {
4117 : /* page is evicted */
4118 0 : void *shadow = (void *)folio;
4119 0 : bool workingset; /* not used */
4120 0 : int order = xa_get_order(xas.xa, xas.xa_index);
4121 :
4122 0 : nr_pages = 1 << order;
4123 0 : folio_first_index = round_down(xas.xa_index, 1 << order);
4124 0 : folio_last_index = folio_first_index + nr_pages - 1;
4125 :
4126 : /* Folios might straddle the range boundaries, only count covered pages */
4127 0 : if (folio_first_index < first_index)
4128 0 : nr_pages -= first_index - folio_first_index;
4129 :
4130 0 : if (folio_last_index > last_index)
4131 0 : nr_pages -= folio_last_index - last_index;
4132 :
4133 0 : cs->nr_evicted += nr_pages;
4134 :
4135 : #ifdef CONFIG_SWAP /* implies CONFIG_MMU */
4136 0 : if (shmem_mapping(mapping)) {
4137 : /* shmem file - in swap cache */
4138 0 : swp_entry_t swp = radix_to_swp_entry(folio);
4139 :
4140 0 : shadow = get_shadow_from_swap_cache(swp);
4141 : }
4142 : #endif
4143 0 : if (workingset_test_recent(shadow, true, &workingset))
4144 0 : cs->nr_recently_evicted += nr_pages;
4145 :
4146 0 : goto resched;
4147 : }
4148 :
4149 0 : nr_pages = folio_nr_pages(folio);
4150 0 : folio_first_index = folio_pgoff(folio);
4151 0 : folio_last_index = folio_first_index + nr_pages - 1;
4152 :
4153 : /* Folios might straddle the range boundaries, only count covered pages */
4154 0 : if (folio_first_index < first_index)
4155 0 : nr_pages -= first_index - folio_first_index;
4156 :
4157 0 : if (folio_last_index > last_index)
4158 0 : nr_pages -= folio_last_index - last_index;
4159 :
4160 : /* page is in cache */
4161 0 : cs->nr_cache += nr_pages;
4162 :
4163 0 : if (folio_test_dirty(folio))
4164 0 : cs->nr_dirty += nr_pages;
4165 :
4166 0 : if (folio_test_writeback(folio))
4167 0 : cs->nr_writeback += nr_pages;
4168 :
4169 0 : resched:
4170 0 : if (need_resched()) {
4171 0 : xas_pause(&xas);
4172 0 : cond_resched_rcu();
4173 : }
4174 : }
4175 0 : rcu_read_unlock();
4176 0 : }
4177 :
4178 : /*
4179 : * The cachestat(2) system call.
4180 : *
4181 : * cachestat() returns the page cache statistics of a file in the
4182 : * bytes range specified by `off` and `len`: number of cached pages,
4183 : * number of dirty pages, number of pages marked for writeback,
4184 : * number of evicted pages, and number of recently evicted pages.
4185 : *
4186 : * An evicted page is a page that is previously in the page cache
4187 : * but has been evicted since. A page is recently evicted if its last
4188 : * eviction was recent enough that its reentry to the cache would
4189 : * indicate that it is actively being used by the system, and that
4190 : * there is memory pressure on the system.
4191 : *
4192 : * `off` and `len` must be non-negative integers. If `len` > 0,
4193 : * the queried range is [`off`, `off` + `len`]. If `len` == 0,
4194 : * we will query in the range from `off` to the end of the file.
4195 : *
4196 : * The `flags` argument is unused for now, but is included for future
4197 : * extensibility. User should pass 0 (i.e no flag specified).
4198 : *
4199 : * Currently, hugetlbfs is not supported.
4200 : *
4201 : * Because the status of a page can change after cachestat() checks it
4202 : * but before it returns to the application, the returned values may
4203 : * contain stale information.
4204 : *
4205 : * return values:
4206 : * zero - success
4207 : * -EFAULT - cstat or cstat_range points to an illegal address
4208 : * -EINVAL - invalid flags
4209 : * -EBADF - invalid file descriptor
4210 : * -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4211 : */
4212 0 : SYSCALL_DEFINE4(cachestat, unsigned int, fd,
4213 : struct cachestat_range __user *, cstat_range,
4214 : struct cachestat __user *, cstat, unsigned int, flags)
4215 : {
4216 0 : struct fd f = fdget(fd);
4217 0 : struct address_space *mapping;
4218 0 : struct cachestat_range csr;
4219 0 : struct cachestat cs;
4220 0 : pgoff_t first_index, last_index;
4221 :
4222 0 : if (!f.file)
4223 : return -EBADF;
4224 :
4225 0 : if (copy_from_user(&csr, cstat_range,
4226 : sizeof(struct cachestat_range))) {
4227 0 : fdput(f);
4228 0 : return -EFAULT;
4229 : }
4230 :
4231 : /* hugetlbfs is not supported */
4232 0 : if (is_file_hugepages(f.file)) {
4233 0 : fdput(f);
4234 0 : return -EOPNOTSUPP;
4235 : }
4236 :
4237 0 : if (flags != 0) {
4238 0 : fdput(f);
4239 0 : return -EINVAL;
4240 : }
4241 :
4242 0 : first_index = csr.off >> PAGE_SHIFT;
4243 0 : last_index =
4244 0 : csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;
4245 0 : memset(&cs, 0, sizeof(struct cachestat));
4246 0 : mapping = f.file->f_mapping;
4247 0 : filemap_cachestat(mapping, first_index, last_index, &cs);
4248 0 : fdput(f);
4249 :
4250 0 : if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))
4251 0 : return -EFAULT;
4252 :
4253 : return 0;
4254 : }
4255 : #endif /* CONFIG_CACHESTAT_SYSCALL */
|