Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_format.h"
14 : #include "scrub/xfile.h"
15 : #include "scrub/xfarray.h"
16 : #include "scrub/scrub.h"
17 : #include "scrub/trace.h"
18 : #include <linux/shmem_fs.h>
19 :
20 : /*
21 : * Swappable Temporary Memory
22 : * ==========================
23 : *
24 : * Online checking sometimes needs to be able to stage a large amount of data
25 : * in memory. This information might not fit in the available memory and it
26 : * doesn't all need to be accessible at all times. In other words, we want an
27 : * indexed data buffer to store data that can be paged out.
28 : *
29 : * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those
30 : * requirements. Therefore, the xfile mechanism uses an unlinked shmem file to
31 : * store our staging data. This file is not installed in the file descriptor
32 : * table so that user programs cannot access the data, which means that the
33 : * xfile must be freed with xfile_destroy.
34 : *
35 : * xfiles assume that the caller will handle all required concurrency
36 : * management; standard vfs locks (freezer and inode) are not taken. Reads
37 : * and writes are satisfied directly from the page cache.
38 : *
39 : * NOTE: The current shmemfs implementation has a quirk that in-kernel reads
40 : * of a hole cause a page to be mapped into the file. If you are going to
41 : * create a sparse xfile, please be careful about reading from uninitialized
42 : * parts of the file. These pages are !Uptodate and will eventually be
43 : * reclaimed if not written, but in the short term this boosts memory
44 : * consumption.
45 : */
46 :
47 : /*
48 : * xfiles must not be exposed to userspace and require upper layers to
49 : * coordinate access to the one handle returned by the constructor, so
50 : * establish a separate lock class for xfiles to avoid confusing lockdep.
51 : */
52 : static struct lock_class_key xfile_i_mutex_key;
53 :
54 : /*
55 : * Create an xfile of the given size. The description will be used in the
56 : * trace output.
57 : */
58 : int
59 436081634 : xfile_create(
60 : const char *description,
61 : loff_t isize,
62 : struct xfile **xfilep)
63 : {
64 436081634 : struct inode *inode;
65 436081634 : struct xfile *xf;
66 436081634 : int error = -ENOMEM;
67 :
68 436081634 : xf = kzalloc(sizeof(struct xfile), XCHK_GFP_FLAGS);
69 438317383 : if (!xf)
70 : return -ENOMEM;
71 :
72 438317383 : xf->file = shmem_file_setup(description, isize, 0);
73 439709248 : if (!xf->file)
74 0 : goto out_xfile;
75 439709248 : if (IS_ERR(xf->file)) {
76 0 : error = PTR_ERR(xf->file);
77 0 : goto out_xfile;
78 : }
79 :
80 : /*
81 : * We want a large sparse file that we can pread, pwrite, and seek.
82 : * xfile users are responsible for keeping the xfile hidden away from
83 : * all other callers, so we skip timestamp updates and security checks.
84 : * Make the inode only accessible by root, just in case the xfile ever
85 : * escapes.
86 : */
87 439709248 : xf->file->f_mode |= FMODE_PREAD | FMODE_PWRITE | FMODE_NOCMTIME |
88 : FMODE_LSEEK;
89 439709248 : xf->file->f_flags |= O_RDWR | O_LARGEFILE | O_NOATIME;
90 439709248 : inode = file_inode(xf->file);
91 439709248 : inode->i_flags |= S_PRIVATE | S_NOCMTIME | S_NOATIME;
92 439709248 : inode->i_mode &= ~0177;
93 439709248 : inode->i_uid = GLOBAL_ROOT_UID;
94 439709248 : inode->i_gid = GLOBAL_ROOT_GID;
95 :
96 439709248 : lockdep_set_class(&inode->i_rwsem, &xfile_i_mutex_key);
97 :
98 439709248 : trace_xfile_create(xf);
99 :
100 438887767 : *xfilep = xf;
101 438887767 : return 0;
102 0 : out_xfile:
103 0 : kfree(xf);
104 0 : return error;
105 : }
106 :
107 : /* Evict a cache entry and release the page. */
108 : static inline int
109 0 : xfile_cache_evict(
110 : struct xfile *xf,
111 : struct xfile_cache *entry)
112 : {
113 0 : int error;
114 :
115 0 : if (!entry->xfpage.page)
116 : return 0;
117 :
118 0 : lock_page(entry->xfpage.page);
119 0 : kunmap(entry->kaddr);
120 :
121 0 : error = xfile_put_page(xf, &entry->xfpage);
122 0 : memset(entry, 0, sizeof(struct xfile_cache));
123 0 : return error;
124 : }
125 :
126 : /*
127 : * Grab a page, map it into the kernel address space, and fill out the cache
128 : * entry.
129 : */
130 : static int
131 0 : xfile_cache_fill(
132 : struct xfile *xf,
133 : loff_t key,
134 : struct xfile_cache *entry)
135 : {
136 0 : int error;
137 :
138 0 : error = xfile_get_page(xf, key, PAGE_SIZE, &entry->xfpage);
139 0 : if (error)
140 : return error;
141 :
142 0 : entry->kaddr = kmap(entry->xfpage.page);
143 0 : unlock_page(entry->xfpage.page);
144 0 : return 0;
145 : }
146 :
147 : /*
148 : * Return the kernel address of a cached position in the xfile. If the cache
149 : * misses, the relevant page will be brought into memory, mapped, and returned.
150 : * If the cache is disabled, returns NULL.
151 : */
152 : static void *
153 17082736102 : xfile_cache_lookup(
154 : struct xfile *xf,
155 : loff_t pos)
156 : {
157 17082736102 : loff_t key = round_down(pos, PAGE_SIZE);
158 17082736102 : unsigned int i;
159 17082736102 : int ret;
160 :
161 17082736102 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
162 : return NULL;
163 :
164 : /* Is it already in the cache? */
165 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
166 0 : if (!xf->cached[i].xfpage.page)
167 0 : continue;
168 0 : if (page_offset(xf->cached[i].xfpage.page) != key)
169 0 : continue;
170 :
171 0 : goto found;
172 : }
173 :
174 : /* Find the least-used slot here so we can evict it. */
175 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
176 0 : if (!xf->cached[i].xfpage.page)
177 0 : goto insert;
178 : }
179 0 : i = min_t(unsigned int, i, XFILE_CACHE_ENTRIES - 1);
180 :
181 0 : ret = xfile_cache_evict(xf, &xf->cached[i]);
182 0 : if (ret)
183 0 : return ERR_PTR(ret);
184 :
185 0 : insert:
186 0 : ret = xfile_cache_fill(xf, key, &xf->cached[i]);
187 0 : if (ret)
188 0 : return ERR_PTR(ret);
189 :
190 0 : found:
191 : /* Stupid MRU moves this cache entry to the front. */
192 0 : if (i != 0)
193 0 : swap(xf->cached[0], xf->cached[i]);
194 :
195 0 : return xf->cached[0].kaddr;
196 : }
197 :
198 : /* Drop all cached xfile pages. */
199 : static void
200 831130163 : xfile_cache_drop(
201 : struct xfile *xf)
202 : {
203 831130163 : unsigned int i;
204 :
205 831130163 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
206 : return;
207 :
208 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++)
209 0 : xfile_cache_evict(xf, &xf->cached[i]);
210 : }
211 :
212 : /* Enable the internal xfile cache. */
213 : void
214 0 : xfile_cache_enable(
215 : struct xfile *xf)
216 : {
217 0 : xf->flags |= XFILE_INTERNAL_CACHE;
218 0 : memset(xf->cached, 0, sizeof(struct xfile_cache) * XFILE_CACHE_ENTRIES);
219 0 : }
220 :
221 : /* Disable the internal xfile cache. */
222 : void
223 47052 : xfile_cache_disable(
224 : struct xfile *xf)
225 : {
226 47052 : xfile_cache_drop(xf);
227 47027 : xf->flags &= ~XFILE_INTERNAL_CACHE;
228 47027 : }
229 :
230 : /* Close the file and release all resources. */
231 : void
232 438145821 : xfile_destroy(
233 : struct xfile *xf)
234 : {
235 438145821 : struct inode *inode = file_inode(xf->file);
236 :
237 438145821 : trace_xfile_destroy(xf);
238 :
239 437575136 : xfile_cache_drop(xf);
240 :
241 437762975 : lockdep_set_class(&inode->i_rwsem, &inode->i_sb->s_type->i_mutex_key);
242 437762975 : fput(xf->file);
243 439120324 : kfree(xf);
244 439651853 : }
245 :
246 : /* Get a mapped page in the xfile, do not use internal cache. */
247 : static void *
248 17110777623 : xfile_uncached_get(
249 : struct xfile *xf,
250 : loff_t pos,
251 : struct xfile_page *xfpage)
252 : {
253 17110777623 : loff_t key = round_down(pos, PAGE_SIZE);
254 17110777623 : int error;
255 :
256 17110777623 : error = xfile_get_page(xf, key, PAGE_SIZE, xfpage);
257 17188756287 : if (error)
258 0 : return ERR_PTR(error);
259 :
260 17188756287 : return kmap_local_page(xfpage->page);
261 : }
262 :
263 : /* Release a mapped page that was obtained via xfile_uncached_get. */
264 : static int
265 : xfile_uncached_put(
266 : struct xfile *xf,
267 : struct xfile_page *xfpage,
268 : void *kaddr)
269 : {
270 17166910669 : kunmap_local(kaddr);
271 17166910669 : return xfile_put_page(xf, xfpage);
272 : }
273 :
274 : /*
275 : * Read a memory object directly from the xfile's page cache. Unlike regular
276 : * pread, we return -E2BIG and -EFBIG for reads that are too large or at too
277 : * high an offset, instead of truncating the read. Otherwise, we return
278 : * bytes read or an error code, like regular pread.
279 : */
280 : ssize_t
281 9261149240 : xfile_pread(
282 : struct xfile *xf,
283 : void *buf,
284 : size_t count,
285 : loff_t pos)
286 : {
287 9261149240 : struct inode *inode = file_inode(xf->file);
288 9261149240 : ssize_t read = 0;
289 9261149240 : unsigned int pflags;
290 9261149240 : int error = 0;
291 :
292 9261149240 : if (count > MAX_RW_COUNT)
293 : return -E2BIG;
294 9261149240 : if (inode->i_sb->s_maxbytes - pos < count)
295 : return -EFBIG;
296 :
297 9261149240 : trace_xfile_pread(xf, pos, count);
298 :
299 9261067339 : pflags = memalloc_nofs_save();
300 18523059375 : while (count > 0) {
301 9261317514 : struct xfile_page xfpage;
302 9261317514 : void *p, *kaddr;
303 9261317514 : unsigned int len;
304 9261317514 : bool cached = true;
305 :
306 9261317514 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
307 :
308 9261317514 : kaddr = xfile_cache_lookup(xf, pos);
309 9261160923 : if (!kaddr) {
310 9261305254 : cached = false;
311 9261305254 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
312 : }
313 9262337447 : if (IS_ERR(kaddr)) {
314 0 : error = PTR_ERR(kaddr);
315 0 : break;
316 : }
317 :
318 9262337447 : p = kaddr + offset_in_page(pos);
319 18524674894 : memcpy(buf, p, len);
320 :
321 9262337447 : if (!cached) {
322 9262335431 : error = xfile_uncached_put(xf, &xfpage, kaddr);
323 9261990020 : if (error)
324 : break;
325 : }
326 :
327 9261992036 : count -= len;
328 9261992036 : pos += len;
329 9261992036 : buf += len;
330 9261992036 : read += len;
331 : }
332 9261741861 : memalloc_nofs_restore(pflags);
333 :
334 9261741861 : if (read > 0)
335 : return read;
336 10301 : return error;
337 : }
338 :
339 : /*
340 : * Write a memory object directly to the xfile's page cache. Unlike regular
341 : * pwrite, we return -E2BIG and -EFBIG for writes that are too large or at too
342 : * high an offset, instead of truncating the write. Otherwise, we return
343 : * bytes written or an error code, like regular pwrite.
344 : */
345 : ssize_t
346 7853797394 : xfile_pwrite(
347 : struct xfile *xf,
348 : const void *buf,
349 : size_t count,
350 : loff_t pos)
351 : {
352 7853797394 : struct inode *inode = file_inode(xf->file);
353 7853797394 : ssize_t written = 0;
354 7853797394 : unsigned int pflags;
355 7853797394 : int error = 0;
356 :
357 7853797394 : if (count > MAX_RW_COUNT)
358 : return -E2BIG;
359 7853797394 : if (inode->i_sb->s_maxbytes - pos < count)
360 : return -EFBIG;
361 :
362 7853797394 : trace_xfile_pwrite(xf, pos, count);
363 :
364 7808236896 : pflags = memalloc_nofs_save();
365 15635135847 : while (count > 0) {
366 7820657806 : struct xfile_page xfpage;
367 7820657806 : void *p, *kaddr;
368 7820657806 : unsigned int len;
369 7820657806 : bool cached = true;
370 :
371 7820657806 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
372 :
373 7820657806 : kaddr = xfile_cache_lookup(xf, pos);
374 7805563404 : if (!kaddr) {
375 7813181641 : cached = false;
376 7813181641 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
377 : }
378 7906104467 : if (IS_ERR(kaddr)) {
379 0 : error = PTR_ERR(kaddr);
380 0 : break;
381 : }
382 :
383 7906104467 : p = kaddr + offset_in_page(pos);
384 15812208934 : memcpy(p, buf, len);
385 :
386 7906104467 : if (!cached) {
387 7904196349 : error = xfile_uncached_put(xf, &xfpage, kaddr);
388 7824990833 : if (error)
389 : break;
390 : }
391 :
392 7826898951 : written += len;
393 7826898951 : count -= len;
394 7826898951 : pos += len;
395 7826898951 : buf += len;
396 : }
397 7814478041 : memalloc_nofs_restore(pflags);
398 :
399 7814478041 : if (written > 0)
400 : return written;
401 10301 : return error;
402 : }
403 :
404 : /* Discard pages backing a range of the xfile. */
405 : void
406 394087334 : xfile_discard(
407 : struct xfile *xf,
408 : loff_t pos,
409 : u64 count)
410 : {
411 394087334 : trace_xfile_discard(xf, pos, count);
412 393833773 : xfile_cache_drop(xf);
413 394029448 : shmem_truncate_range(file_inode(xf->file), pos, pos + count - 1);
414 395280492 : }
415 :
416 : /* Ensure that there is storage backing the given range. */
417 : int
418 378888 : xfile_prealloc(
419 : struct xfile *xf,
420 : loff_t pos,
421 : u64 count)
422 : {
423 378888 : struct inode *inode = file_inode(xf->file);
424 378888 : unsigned int pflags;
425 378888 : int error = 0;
426 :
427 378888 : if (count > MAX_RW_COUNT)
428 : return -E2BIG;
429 378888 : if (inode->i_sb->s_maxbytes - pos < count)
430 : return -EFBIG;
431 :
432 378888 : trace_xfile_prealloc(xf, pos, count);
433 :
434 378889 : pflags = memalloc_nofs_save();
435 757777 : while (count > 0) {
436 378889 : struct xfile_page xfpage;
437 378889 : void *kaddr;
438 378889 : unsigned int len;
439 :
440 378889 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
441 :
442 378889 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
443 378889 : if (IS_ERR(kaddr)) {
444 0 : error = PTR_ERR(kaddr);
445 0 : break;
446 : }
447 :
448 378889 : error = xfile_uncached_put(xf, &xfpage, kaddr);
449 378888 : if (error)
450 : break;
451 :
452 378888 : count -= len;
453 378888 : pos += len;
454 : }
455 378888 : memalloc_nofs_restore(pflags);
456 :
457 378888 : return error;
458 : }
459 :
460 : /* Find the next written area in the xfile data for a given offset. */
461 : loff_t
462 47136827 : xfile_seek_data(
463 : struct xfile *xf,
464 : loff_t pos)
465 : {
466 47136827 : loff_t ret;
467 :
468 47136827 : ret = vfs_llseek(xf->file, pos, SEEK_DATA);
469 47136926 : trace_xfile_seek_data(xf, pos, ret);
470 47136665 : return ret;
471 : }
472 :
473 : /* Query stat information for an xfile. */
474 : int
475 71119076634 : xfile_stat(
476 : struct xfile *xf,
477 : struct xfile_stat *statbuf)
478 : {
479 71119076634 : struct kstat ks;
480 71119076634 : int error;
481 :
482 71119076634 : error = vfs_getattr_nosec(&xf->file->f_path, &ks,
483 : STATX_SIZE | STATX_BLOCKS, AT_STATX_DONT_SYNC);
484 71178583099 : if (error)
485 : return error;
486 :
487 71178583099 : statbuf->size = ks.size;
488 71178583099 : statbuf->bytes = ks.blocks << SECTOR_SHIFT;
489 71178583099 : return 0;
490 : }
491 :
492 : /*
493 : * Grab the (locked) page for a memory object. The object cannot span a page
494 : * boundary. Returns 0 (and a locked page) if successful, -ENOTBLK if we
495 : * cannot grab the page, or the usual negative errno.
496 : */
497 : int
498 17330054926 : xfile_get_page(
499 : struct xfile *xf,
500 : loff_t pos,
501 : unsigned int len,
502 : struct xfile_page *xfpage)
503 : {
504 17330054926 : struct inode *inode = file_inode(xf->file);
505 17330054926 : struct address_space *mapping = inode->i_mapping;
506 17330054926 : const struct address_space_operations *aops = mapping->a_ops;
507 17330054926 : struct page *page = NULL;
508 17330054926 : void *fsdata = NULL;
509 17330054926 : loff_t key = round_down(pos, PAGE_SIZE);
510 17330054926 : unsigned int pflags;
511 17330054926 : int error;
512 :
513 17330054926 : if (inode->i_sb->s_maxbytes - pos < len)
514 : return -ENOMEM;
515 17330054926 : if (len > PAGE_SIZE - offset_in_page(pos))
516 : return -ENOTBLK;
517 :
518 17330054926 : trace_xfile_get_page(xf, pos, len);
519 :
520 17223317899 : pflags = memalloc_nofs_save();
521 :
522 : /*
523 : * We call write_begin directly here to avoid all the freezer
524 : * protection lock-taking that happens in the normal path. shmem
525 : * doesn't support fs freeze, but lockdep doesn't know that and will
526 : * trip over that.
527 : */
528 17223317899 : error = aops->write_begin(NULL, mapping, key, PAGE_SIZE, &page,
529 : &fsdata);
530 17396007540 : if (error)
531 0 : goto out_pflags;
532 :
533 : /* We got the page, so make sure we push out EOF. */
534 17396007540 : if (i_size_read(inode) < pos + len)
535 112235440 : i_size_write(inode, pos + len);
536 :
537 : /*
538 : * If the page isn't up to date, fill it with zeroes before we hand it
539 : * to the caller and make sure the backing store will hold on to them.
540 : */
541 17396007540 : if (!PageUptodate(page)) {
542 151221634 : void *kaddr;
543 :
544 151221634 : kaddr = kmap_local_page(page);
545 151221634 : memset(kaddr, 0, PAGE_SIZE);
546 151221634 : kunmap_local(kaddr);
547 151221634 : SetPageUptodate(page);
548 : }
549 :
550 : /*
551 : * Mark each page dirty so that the contents are written to some
552 : * backing store when we drop this buffer, and take an extra reference
553 : * to prevent the xfile page from being swapped or removed from the
554 : * page cache by reclaim if the caller unlocks the page.
555 : */
556 17288052490 : set_page_dirty(page);
557 17331901914 : get_page(page);
558 :
559 17383017616 : xfpage->page = page;
560 17383017616 : xfpage->fsdata = fsdata;
561 17383017616 : xfpage->pos = key;
562 17383017616 : out_pflags:
563 17383017616 : memalloc_nofs_restore(pflags);
564 17383017616 : return error;
565 : }
566 :
567 : /*
568 : * Release the (locked) page for a memory object. Returns 0 or a negative
569 : * errno.
570 : */
571 : int
572 17299278393 : xfile_put_page(
573 : struct xfile *xf,
574 : struct xfile_page *xfpage)
575 : {
576 17299278393 : struct inode *inode = file_inode(xf->file);
577 17299278393 : struct address_space *mapping = inode->i_mapping;
578 17299278393 : const struct address_space_operations *aops = mapping->a_ops;
579 17299278393 : unsigned int pflags;
580 17299278393 : int ret;
581 :
582 17299278393 : trace_xfile_put_page(xf, xfpage->pos, xfpage->page);
583 :
584 : /* Give back the reference that we took in xfile_get_page. */
585 17266711702 : put_page(xfpage->page);
586 :
587 17284412518 : pflags = memalloc_nofs_save();
588 17284412518 : ret = aops->write_end(NULL, mapping, xfpage->pos, PAGE_SIZE, PAGE_SIZE,
589 : xfpage->page, xfpage->fsdata);
590 17270511684 : memalloc_nofs_restore(pflags);
591 17270511684 : memset(xfpage, 0, sizeof(struct xfile_page));
592 :
593 17270511684 : if (ret < 0)
594 : return ret;
595 17270511684 : if (ret != PAGE_SIZE)
596 0 : return -EIO;
597 : return 0;
598 : }
599 :
600 : /* Dump an xfile to dmesg. */
601 : int
602 0 : xfile_dump(
603 : struct xfile *xf)
604 : {
605 0 : struct xfile_stat sb;
606 0 : struct inode *inode = file_inode(xf->file);
607 0 : struct address_space *mapping = inode->i_mapping;
608 0 : loff_t holepos = 0;
609 0 : loff_t datapos;
610 0 : loff_t ret;
611 0 : unsigned int pflags;
612 0 : bool all_zeroes = true;
613 0 : int error = 0;
614 :
615 0 : error = xfile_stat(xf, &sb);
616 0 : if (error)
617 : return error;
618 :
619 0 : printk(KERN_ALERT "xfile ino 0x%lx isize 0x%llx dump:", inode->i_ino,
620 : sb.size);
621 0 : pflags = memalloc_nofs_save();
622 :
623 0 : while ((ret = vfs_llseek(xf->file, holepos, SEEK_DATA)) >= 0) {
624 0 : datapos = rounddown_64(ret, PAGE_SIZE);
625 0 : ret = vfs_llseek(xf->file, datapos, SEEK_HOLE);
626 0 : if (ret < 0)
627 : break;
628 0 : holepos = min_t(loff_t, sb.size, roundup_64(ret, PAGE_SIZE));
629 :
630 0 : while (datapos < holepos) {
631 0 : struct page *page = NULL;
632 0 : void *p, *kaddr;
633 0 : u64 datalen = holepos - datapos;
634 0 : unsigned int pagepos;
635 0 : unsigned int pagelen;
636 :
637 0 : cond_resched();
638 :
639 0 : if (fatal_signal_pending(current)) {
640 0 : error = -EINTR;
641 0 : goto out_pflags;
642 : }
643 :
644 0 : pagelen = min_t(u64, datalen, PAGE_SIZE);
645 :
646 0 : page = shmem_read_mapping_page_gfp(mapping,
647 0 : datapos >> PAGE_SHIFT, __GFP_NOWARN);
648 0 : if (IS_ERR(page)) {
649 0 : error = PTR_ERR(page);
650 0 : if (error == -EIO)
651 0 : printk(KERN_ALERT "%.8llx: poisoned",
652 : datapos);
653 0 : else if (error != -ENOMEM)
654 0 : goto out_pflags;
655 :
656 0 : goto next_pgoff;
657 : }
658 :
659 0 : if (!PageUptodate(page))
660 0 : goto next_page;
661 :
662 0 : kaddr = kmap_local_page(page);
663 0 : p = kaddr;
664 :
665 0 : for (pagepos = 0; pagepos < pagelen; pagepos += 16) {
666 0 : char prefix[16];
667 0 : unsigned int linelen;
668 :
669 0 : linelen = min_t(unsigned int, pagelen, 16);
670 :
671 0 : if (!memchr_inv(p + pagepos, 0, linelen))
672 0 : continue;
673 :
674 0 : snprintf(prefix, 16, "%.8llx: ",
675 : datapos + pagepos);
676 :
677 0 : all_zeroes = false;
678 0 : print_hex_dump(KERN_ALERT, prefix,
679 : DUMP_PREFIX_NONE, 16, 1,
680 : p + pagepos, linelen, true);
681 : }
682 0 : kunmap_local(kaddr);
683 0 : next_page:
684 0 : put_page(page);
685 0 : next_pgoff:
686 0 : datapos += PAGE_SIZE;
687 : }
688 : }
689 0 : if (all_zeroes)
690 0 : printk(KERN_ALERT "<all zeroes>");
691 0 : if (ret != -ENXIO)
692 0 : error = ret;
693 0 : out_pflags:
694 0 : memalloc_nofs_restore(pflags);
695 0 : return error;
696 : }
|