Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_format.h"
14 : #include "scrub/xfile.h"
15 : #include "scrub/xfarray.h"
16 : #include "scrub/scrub.h"
17 : #include "scrub/trace.h"
18 : #include <linux/shmem_fs.h>
19 :
20 : /*
21 : * Swappable Temporary Memory
22 : * ==========================
23 : *
24 : * Online checking sometimes needs to be able to stage a large amount of data
25 : * in memory. This information might not fit in the available memory and it
26 : * doesn't all need to be accessible at all times. In other words, we want an
27 : * indexed data buffer to store data that can be paged out.
28 : *
29 : * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those
30 : * requirements. Therefore, the xfile mechanism uses an unlinked shmem file to
31 : * store our staging data. This file is not installed in the file descriptor
32 : * table so that user programs cannot access the data, which means that the
33 : * xfile must be freed with xfile_destroy.
34 : *
35 : * xfiles assume that the caller will handle all required concurrency
36 : * management; standard vfs locks (freezer and inode) are not taken. Reads
37 : * and writes are satisfied directly from the page cache.
38 : *
39 : * NOTE: The current shmemfs implementation has a quirk that in-kernel reads
40 : * of a hole cause a page to be mapped into the file. If you are going to
41 : * create a sparse xfile, please be careful about reading from uninitialized
42 : * parts of the file. These pages are !Uptodate and will eventually be
43 : * reclaimed if not written, but in the short term this boosts memory
44 : * consumption.
45 : */
46 :
47 : /*
48 : * xfiles must not be exposed to userspace and require upper layers to
49 : * coordinate access to the one handle returned by the constructor, so
50 : * establish a separate lock class for xfiles to avoid confusing lockdep.
51 : */
52 : static struct lock_class_key xfile_i_mutex_key;
53 :
54 : /*
55 : * Create an xfile of the given size. The description will be used in the
56 : * trace output.
57 : */
58 : int
59 353043787 : xfile_create(
60 : const char *description,
61 : loff_t isize,
62 : struct xfile **xfilep)
63 : {
64 353043787 : struct inode *inode;
65 353043787 : struct xfile *xf;
66 353043787 : int error = -ENOMEM;
67 :
68 353043787 : xf = kzalloc(sizeof(struct xfile), XCHK_GFP_FLAGS);
69 354343733 : if (!xf)
70 : return -ENOMEM;
71 :
72 354343733 : xf->file = shmem_file_setup(description, isize, 0);
73 355544777 : if (!xf->file)
74 0 : goto out_xfile;
75 355544777 : if (IS_ERR(xf->file)) {
76 0 : error = PTR_ERR(xf->file);
77 0 : goto out_xfile;
78 : }
79 :
80 : /*
81 : * We want a large sparse file that we can pread, pwrite, and seek.
82 : * xfile users are responsible for keeping the xfile hidden away from
83 : * all other callers, so we skip timestamp updates and security checks.
84 : * Make the inode only accessible by root, just in case the xfile ever
85 : * escapes.
86 : */
87 355544777 : xf->file->f_mode |= FMODE_PREAD | FMODE_PWRITE | FMODE_NOCMTIME |
88 : FMODE_LSEEK;
89 355544777 : xf->file->f_flags |= O_RDWR | O_LARGEFILE | O_NOATIME;
90 355544777 : inode = file_inode(xf->file);
91 355544777 : inode->i_flags |= S_PRIVATE | S_NOCMTIME | S_NOATIME;
92 355544777 : inode->i_mode &= ~0177;
93 355544777 : inode->i_uid = GLOBAL_ROOT_UID;
94 355544777 : inode->i_gid = GLOBAL_ROOT_GID;
95 :
96 355544777 : lockdep_set_class(&inode->i_rwsem, &xfile_i_mutex_key);
97 :
98 355544777 : trace_xfile_create(xf);
99 :
100 355042091 : *xfilep = xf;
101 355042091 : return 0;
102 0 : out_xfile:
103 0 : kfree(xf);
104 0 : return error;
105 : }
106 :
107 : /* Evict a cache entry and release the page. */
108 : static inline int
109 0 : xfile_cache_evict(
110 : struct xfile *xf,
111 : struct xfile_cache *entry)
112 : {
113 0 : int error;
114 :
115 0 : if (!entry->xfpage.page)
116 : return 0;
117 :
118 0 : lock_page(entry->xfpage.page);
119 0 : kunmap(entry->kaddr);
120 :
121 0 : error = xfile_put_page(xf, &entry->xfpage);
122 0 : memset(entry, 0, sizeof(struct xfile_cache));
123 0 : return error;
124 : }
125 :
126 : /*
127 : * Grab a page, map it into the kernel address space, and fill out the cache
128 : * entry.
129 : */
130 : static int
131 0 : xfile_cache_fill(
132 : struct xfile *xf,
133 : loff_t key,
134 : struct xfile_cache *entry)
135 : {
136 0 : int error;
137 :
138 0 : error = xfile_get_page(xf, key, PAGE_SIZE, &entry->xfpage);
139 0 : if (error)
140 : return error;
141 :
142 0 : entry->kaddr = kmap(entry->xfpage.page);
143 0 : unlock_page(entry->xfpage.page);
144 0 : return 0;
145 : }
146 :
147 : /*
148 : * Return the kernel address of a cached position in the xfile. If the cache
149 : * misses, the relevant page will be brought into memory, mapped, and returned.
150 : * If the cache is disabled, returns NULL.
151 : */
152 : static void *
153 22051259499 : xfile_cache_lookup(
154 : struct xfile *xf,
155 : loff_t pos)
156 : {
157 22051259499 : loff_t key = round_down(pos, PAGE_SIZE);
158 22051259499 : unsigned int i;
159 22051259499 : int ret;
160 :
161 22051259499 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
162 : return NULL;
163 :
164 : /* Is it already in the cache? */
165 102 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
166 102 : if (!xf->cached[i].xfpage.page)
167 0 : continue;
168 0 : if (page_offset(xf->cached[i].xfpage.page) != key)
169 0 : continue;
170 :
171 0 : goto found;
172 : }
173 :
174 : /* Find the least-used slot here so we can evict it. */
175 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
176 0 : if (!xf->cached[i].xfpage.page)
177 0 : goto insert;
178 : }
179 0 : i = min_t(unsigned int, i, XFILE_CACHE_ENTRIES - 1);
180 :
181 0 : ret = xfile_cache_evict(xf, &xf->cached[i]);
182 0 : if (ret)
183 0 : return ERR_PTR(ret);
184 :
185 0 : insert:
186 0 : ret = xfile_cache_fill(xf, key, &xf->cached[i]);
187 0 : if (ret)
188 0 : return ERR_PTR(ret);
189 :
190 0 : found:
191 : /* Stupid MRU moves this cache entry to the front. */
192 0 : if (i != 0)
193 0 : swap(xf->cached[0], xf->cached[i]);
194 :
195 0 : return xf->cached[0].kaddr;
196 : }
197 :
198 : /* Drop all cached xfile pages. */
199 : static void
200 671931141 : xfile_cache_drop(
201 : struct xfile *xf)
202 : {
203 671931141 : unsigned int i;
204 :
205 671931141 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
206 : return;
207 :
208 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++)
209 0 : xfile_cache_evict(xf, &xf->cached[i]);
210 : }
211 :
212 : /* Enable the internal xfile cache. */
213 : void
214 0 : xfile_cache_enable(
215 : struct xfile *xf)
216 : {
217 0 : xf->flags |= XFILE_INTERNAL_CACHE;
218 0 : memset(xf->cached, 0, sizeof(struct xfile_cache) * XFILE_CACHE_ENTRIES);
219 0 : }
220 :
221 : /* Disable the internal xfile cache. */
222 : void
223 167770 : xfile_cache_disable(
224 : struct xfile *xf)
225 : {
226 167770 : xfile_cache_drop(xf);
227 167550 : xf->flags &= ~XFILE_INTERNAL_CACHE;
228 167550 : }
229 :
230 : /* Close the file and release all resources. */
231 : void
232 354645969 : xfile_destroy(
233 : struct xfile *xf)
234 : {
235 354645969 : struct inode *inode = file_inode(xf->file);
236 :
237 354645969 : trace_xfile_destroy(xf);
238 :
239 354345701 : xfile_cache_drop(xf);
240 :
241 354460811 : lockdep_set_class(&inode->i_rwsem, &inode->i_sb->s_type->i_mutex_key);
242 354460811 : fput(xf->file);
243 354988770 : kfree(xf);
244 355480097 : }
245 :
246 : /* Get a mapped page in the xfile, do not use internal cache. */
247 : static void *
248 22127045456 : xfile_uncached_get(
249 : struct xfile *xf,
250 : loff_t pos,
251 : struct xfile_page *xfpage)
252 : {
253 22127045456 : loff_t key = round_down(pos, PAGE_SIZE);
254 22127045456 : int error;
255 :
256 22127045456 : error = xfile_get_page(xf, key, PAGE_SIZE, xfpage);
257 22221818605 : if (error)
258 0 : return ERR_PTR(error);
259 :
260 22221818605 : return kmap_local_page(xfpage->page);
261 : }
262 :
263 : /* Release a mapped page that was obtained via xfile_uncached_get. */
264 : static int
265 : xfile_uncached_put(
266 : struct xfile *xf,
267 : struct xfile_page *xfpage,
268 : void *kaddr)
269 : {
270 22144455748 : kunmap_local(kaddr);
271 22144455748 : return xfile_put_page(xf, xfpage);
272 : }
273 :
274 : /*
275 : * Read a memory object directly from the xfile's page cache. Unlike regular
276 : * pread, we return -E2BIG and -EFBIG for reads that are too large or at too
277 : * high an offset, instead of truncating the read. Otherwise, we return
278 : * bytes read or an error code, like regular pread.
279 : */
280 : ssize_t
281 13931128347 : xfile_pread(
282 : struct xfile *xf,
283 : void *buf,
284 : size_t count,
285 : loff_t pos)
286 : {
287 13931128347 : struct inode *inode = file_inode(xf->file);
288 13931128347 : ssize_t read = 0;
289 13931128347 : unsigned int pflags;
290 13931128347 : int error = 0;
291 :
292 13931128347 : if (count > MAX_RW_COUNT)
293 : return -E2BIG;
294 13931128347 : if (inode->i_sb->s_maxbytes - pos < count)
295 : return -EFBIG;
296 :
297 13931128347 : trace_xfile_pread(xf, pos, count);
298 :
299 13930918806 : pflags = memalloc_nofs_save();
300 27866060675 : while (count > 0) {
301 13934579960 : struct xfile_page xfpage;
302 13934579960 : void *p, *kaddr;
303 13934579960 : unsigned int len;
304 13934579960 : bool cached = true;
305 :
306 13934579960 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
307 :
308 13934579960 : kaddr = xfile_cache_lookup(xf, pos);
309 13934469868 : if (!kaddr) {
310 13934687676 : cached = false;
311 13934687676 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
312 : }
313 13935687879 : if (IS_ERR(kaddr)) {
314 0 : error = PTR_ERR(kaddr);
315 0 : break;
316 : }
317 :
318 13935687879 : p = kaddr + offset_in_page(pos);
319 27871375758 : memcpy(buf, p, len);
320 :
321 13935687879 : if (!cached) {
322 13935687110 : error = xfile_uncached_put(xf, &xfpage, kaddr);
323 13935141100 : if (error)
324 : break;
325 : }
326 :
327 13935141869 : count -= len;
328 13935141869 : pos += len;
329 13935141869 : buf += len;
330 13935141869 : read += len;
331 : }
332 13931480715 : memalloc_nofs_restore(pflags);
333 :
334 13931480715 : if (read > 0)
335 : return read;
336 16495 : return error;
337 : }
338 :
339 : /*
340 : * Write a memory object directly to the xfile's page cache. Unlike regular
341 : * pwrite, we return -E2BIG and -EFBIG for writes that are too large or at too
342 : * high an offset, instead of truncating the write. Otherwise, we return
343 : * bytes written or an error code, like regular pwrite.
344 : */
345 : ssize_t
346 8155689193 : xfile_pwrite(
347 : struct xfile *xf,
348 : const void *buf,
349 : size_t count,
350 : loff_t pos)
351 : {
352 8155689193 : struct inode *inode = file_inode(xf->file);
353 8155689193 : ssize_t written = 0;
354 8155689193 : unsigned int pflags;
355 8155689193 : int error = 0;
356 :
357 8155689193 : if (count > MAX_RW_COUNT)
358 : return -E2BIG;
359 8155689193 : if (inode->i_sb->s_maxbytes - pos < count)
360 : return -EFBIG;
361 :
362 8155689193 : trace_xfile_pwrite(xf, pos, count);
363 :
364 8074622200 : pflags = memalloc_nofs_save();
365 16250823332 : while (count > 0) {
366 8092270746 : struct xfile_page xfpage;
367 8092270746 : void *p, *kaddr;
368 8092270746 : unsigned int len;
369 8092270746 : bool cached = true;
370 :
371 8092270746 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
372 :
373 8092270746 : kaddr = xfile_cache_lookup(xf, pos);
374 8125652994 : if (!kaddr) {
375 8108869693 : cached = false;
376 8108869693 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
377 : }
378 8207122441 : if (IS_ERR(kaddr)) {
379 0 : error = PTR_ERR(kaddr);
380 0 : break;
381 : }
382 :
383 8207122441 : p = kaddr + offset_in_page(pos);
384 16414244882 : memcpy(p, buf, len);
385 :
386 8207122441 : if (!cached) {
387 8207827368 : error = xfile_uncached_put(xf, &xfpage, kaddr);
388 8176906059 : if (error)
389 : break;
390 : }
391 :
392 8176201132 : written += len;
393 8176201132 : count -= len;
394 8176201132 : pos += len;
395 8176201132 : buf += len;
396 : }
397 8158552586 : memalloc_nofs_restore(pflags);
398 :
399 8158552586 : if (written > 0)
400 : return written;
401 16495 : return error;
402 : }
403 :
404 : /* Discard pages backing a range of the xfile. */
405 : void
406 317794071 : xfile_discard(
407 : struct xfile *xf,
408 : loff_t pos,
409 : u64 count)
410 : {
411 317794071 : trace_xfile_discard(xf, pos, count);
412 317602643 : xfile_cache_drop(xf);
413 317877727 : shmem_truncate_range(file_inode(xf->file), pos, pos + count - 1);
414 319009321 : }
415 :
416 : /* Ensure that there is storage backing the given range. */
417 : int
418 941262 : xfile_prealloc(
419 : struct xfile *xf,
420 : loff_t pos,
421 : u64 count)
422 : {
423 941262 : struct inode *inode = file_inode(xf->file);
424 941262 : unsigned int pflags;
425 941262 : int error = 0;
426 :
427 941262 : if (count > MAX_RW_COUNT)
428 : return -E2BIG;
429 941262 : if (inode->i_sb->s_maxbytes - pos < count)
430 : return -EFBIG;
431 :
432 941262 : trace_xfile_prealloc(xf, pos, count);
433 :
434 941259 : pflags = memalloc_nofs_save();
435 1882528 : while (count > 0) {
436 941259 : struct xfile_page xfpage;
437 941259 : void *kaddr;
438 941259 : unsigned int len;
439 :
440 941259 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
441 :
442 941259 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
443 941270 : if (IS_ERR(kaddr)) {
444 0 : error = PTR_ERR(kaddr);
445 0 : break;
446 : }
447 :
448 941270 : error = xfile_uncached_put(xf, &xfpage, kaddr);
449 941269 : if (error)
450 : break;
451 :
452 941269 : count -= len;
453 941269 : pos += len;
454 : }
455 941269 : memalloc_nofs_restore(pflags);
456 :
457 941269 : return error;
458 : }
459 :
460 : /* Find the next written area in the xfile data for a given offset. */
461 : loff_t
462 77591638 : xfile_seek_data(
463 : struct xfile *xf,
464 : loff_t pos)
465 : {
466 77591638 : loff_t ret;
467 :
468 77591638 : ret = vfs_llseek(xf->file, pos, SEEK_DATA);
469 77591567 : trace_xfile_seek_data(xf, pos, ret);
470 77591348 : return ret;
471 : }
472 :
473 : /* Query stat information for an xfile. */
474 : int
475 >10565*10^7 : xfile_stat(
476 : struct xfile *xf,
477 : struct xfile_stat *statbuf)
478 : {
479 >10565*10^7 : struct kstat ks;
480 >10565*10^7 : int error;
481 :
482 >10565*10^7 : error = vfs_getattr_nosec(&xf->file->f_path, &ks,
483 : STATX_SIZE | STATX_BLOCKS, AT_STATX_DONT_SYNC);
484 >10646*10^7 : if (error)
485 : return error;
486 :
487 >10646*10^7 : statbuf->size = ks.size;
488 >10646*10^7 : statbuf->bytes = ks.blocks << SECTOR_SHIFT;
489 >10646*10^7 : return 0;
490 : }
491 :
492 : /*
493 : * Grab the (locked) page for a memory object. The object cannot span a page
494 : * boundary. Returns 0 (and a locked page) if successful, -ENOTBLK if we
495 : * cannot grab the page, or the usual negative errno.
496 : */
497 : int
498 22550321124 : xfile_get_page(
499 : struct xfile *xf,
500 : loff_t pos,
501 : unsigned int len,
502 : struct xfile_page *xfpage)
503 : {
504 22550321124 : struct inode *inode = file_inode(xf->file);
505 22550321124 : struct address_space *mapping = inode->i_mapping;
506 22550321124 : const struct address_space_operations *aops = mapping->a_ops;
507 22550321124 : struct page *page = NULL;
508 22550321124 : void *fsdata = NULL;
509 22550321124 : loff_t key = round_down(pos, PAGE_SIZE);
510 22550321124 : unsigned int pflags;
511 22550321124 : int error;
512 :
513 22550321124 : if (inode->i_sb->s_maxbytes - pos < len)
514 : return -ENOMEM;
515 22550321124 : if (len > PAGE_SIZE - offset_in_page(pos))
516 : return -ENOTBLK;
517 :
518 22550321124 : trace_xfile_get_page(xf, pos, len);
519 :
520 22349136461 : pflags = memalloc_nofs_save();
521 :
522 : /*
523 : * We call write_begin directly here to avoid all the freezer
524 : * protection lock-taking that happens in the normal path. shmem
525 : * doesn't support fs freeze, but lockdep doesn't know that and will
526 : * trip over that.
527 : */
528 22349136461 : error = aops->write_begin(NULL, mapping, key, PAGE_SIZE, &page,
529 : &fsdata);
530 22632006907 : if (error)
531 0 : goto out_pflags;
532 :
533 : /* We got the page, so make sure we push out EOF. */
534 22632006907 : if (i_size_read(inode) < pos + len)
535 97704930 : i_size_write(inode, pos + len);
536 :
537 : /*
538 : * If the page isn't up to date, fill it with zeroes before we hand it
539 : * to the caller and make sure the backing store will hold on to them.
540 : */
541 22632006907 : if (!PageUptodate(page)) {
542 163931207 : void *kaddr;
543 :
544 163931207 : kaddr = kmap_local_page(page);
545 163931207 : memset(kaddr, 0, PAGE_SIZE);
546 163931207 : kunmap_local(kaddr);
547 163931207 : SetPageUptodate(page);
548 : }
549 :
550 : /*
551 : * Mark each page dirty so that the contents are written to some
552 : * backing store when we drop this buffer, and take an extra reference
553 : * to prevent the xfile page from being swapped or removed from the
554 : * page cache by reclaim if the caller unlocks the page.
555 : */
556 22535952616 : set_page_dirty(page);
557 22575160598 : get_page(page);
558 :
559 22610345112 : xfpage->page = page;
560 22610345112 : xfpage->fsdata = fsdata;
561 22610345112 : xfpage->pos = key;
562 22610345112 : out_pflags:
563 22610345112 : memalloc_nofs_restore(pflags);
564 22610345112 : return error;
565 : }
566 :
567 : /*
568 : * Release the (locked) page for a memory object. Returns 0 or a negative
569 : * errno.
570 : */
571 : int
572 22444874838 : xfile_put_page(
573 : struct xfile *xf,
574 : struct xfile_page *xfpage)
575 : {
576 22444874838 : struct inode *inode = file_inode(xf->file);
577 22444874838 : struct address_space *mapping = inode->i_mapping;
578 22444874838 : const struct address_space_operations *aops = mapping->a_ops;
579 22444874838 : unsigned int pflags;
580 22444874838 : int ret;
581 :
582 22444874838 : trace_xfile_put_page(xf, xfpage->pos, xfpage->page);
583 :
584 : /* Give back the reference that we took in xfile_get_page. */
585 22432130167 : put_page(xfpage->page);
586 :
587 22489926947 : pflags = memalloc_nofs_save();
588 22489926947 : ret = aops->write_end(NULL, mapping, xfpage->pos, PAGE_SIZE, PAGE_SIZE,
589 : xfpage->page, xfpage->fsdata);
590 22483202817 : memalloc_nofs_restore(pflags);
591 22483202817 : memset(xfpage, 0, sizeof(struct xfile_page));
592 :
593 22483202817 : if (ret < 0)
594 : return ret;
595 22483202817 : if (ret != PAGE_SIZE)
596 0 : return -EIO;
597 : return 0;
598 : }
599 :
600 : /* Dump an xfile to dmesg. */
601 : int
602 0 : xfile_dump(
603 : struct xfile *xf)
604 : {
605 0 : struct xfile_stat sb;
606 0 : struct inode *inode = file_inode(xf->file);
607 0 : struct address_space *mapping = inode->i_mapping;
608 0 : loff_t holepos = 0;
609 0 : loff_t datapos;
610 0 : loff_t ret;
611 0 : unsigned int pflags;
612 0 : bool all_zeroes = true;
613 0 : int error = 0;
614 :
615 0 : error = xfile_stat(xf, &sb);
616 0 : if (error)
617 : return error;
618 :
619 0 : printk(KERN_ALERT "xfile ino 0x%lx isize 0x%llx dump:", inode->i_ino,
620 : sb.size);
621 0 : pflags = memalloc_nofs_save();
622 :
623 0 : while ((ret = vfs_llseek(xf->file, holepos, SEEK_DATA)) >= 0) {
624 0 : datapos = rounddown_64(ret, PAGE_SIZE);
625 0 : ret = vfs_llseek(xf->file, datapos, SEEK_HOLE);
626 0 : if (ret < 0)
627 : break;
628 0 : holepos = min_t(loff_t, sb.size, roundup_64(ret, PAGE_SIZE));
629 :
630 0 : while (datapos < holepos) {
631 0 : struct page *page = NULL;
632 0 : void *p, *kaddr;
633 0 : u64 datalen = holepos - datapos;
634 0 : unsigned int pagepos;
635 0 : unsigned int pagelen;
636 :
637 0 : cond_resched();
638 :
639 0 : if (fatal_signal_pending(current)) {
640 0 : error = -EINTR;
641 0 : goto out_pflags;
642 : }
643 :
644 0 : pagelen = min_t(u64, datalen, PAGE_SIZE);
645 :
646 0 : page = shmem_read_mapping_page_gfp(mapping,
647 0 : datapos >> PAGE_SHIFT, __GFP_NOWARN);
648 0 : if (IS_ERR(page)) {
649 0 : error = PTR_ERR(page);
650 0 : if (error == -EIO)
651 0 : printk(KERN_ALERT "%.8llx: poisoned",
652 : datapos);
653 0 : else if (error != -ENOMEM)
654 0 : goto out_pflags;
655 :
656 0 : goto next_pgoff;
657 : }
658 :
659 0 : if (!PageUptodate(page))
660 0 : goto next_page;
661 :
662 0 : kaddr = kmap_local_page(page);
663 0 : p = kaddr;
664 :
665 0 : for (pagepos = 0; pagepos < pagelen; pagepos += 16) {
666 0 : char prefix[16];
667 0 : unsigned int linelen;
668 :
669 0 : linelen = min_t(unsigned int, pagelen, 16);
670 :
671 0 : if (!memchr_inv(p + pagepos, 0, linelen))
672 0 : continue;
673 :
674 0 : snprintf(prefix, 16, "%.8llx: ",
675 : datapos + pagepos);
676 :
677 0 : all_zeroes = false;
678 0 : print_hex_dump(KERN_ALERT, prefix,
679 : DUMP_PREFIX_NONE, 16, 1,
680 : p + pagepos, linelen, true);
681 : }
682 0 : kunmap_local(kaddr);
683 0 : next_page:
684 0 : put_page(page);
685 0 : next_pgoff:
686 0 : datapos += PAGE_SIZE;
687 : }
688 : }
689 0 : if (all_zeroes)
690 0 : printk(KERN_ALERT "<all zeroes>");
691 0 : if (ret != -ENXIO)
692 0 : error = ret;
693 0 : out_pflags:
694 0 : memalloc_nofs_restore(pflags);
695 0 : return error;
696 : }
|