Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_format.h"
14 : #include "scrub/xfile.h"
15 : #include "scrub/xfarray.h"
16 : #include "scrub/scrub.h"
17 : #include "scrub/trace.h"
18 : #include <linux/shmem_fs.h>
19 :
20 : /*
21 : * Swappable Temporary Memory
22 : * ==========================
23 : *
24 : * Online checking sometimes needs to be able to stage a large amount of data
25 : * in memory. This information might not fit in the available memory and it
26 : * doesn't all need to be accessible at all times. In other words, we want an
27 : * indexed data buffer to store data that can be paged out.
28 : *
29 : * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those
30 : * requirements. Therefore, the xfile mechanism uses an unlinked shmem file to
31 : * store our staging data. This file is not installed in the file descriptor
32 : * table so that user programs cannot access the data, which means that the
33 : * xfile must be freed with xfile_destroy.
34 : *
35 : * xfiles assume that the caller will handle all required concurrency
36 : * management; standard vfs locks (freezer and inode) are not taken. Reads
37 : * and writes are satisfied directly from the page cache.
38 : *
39 : * NOTE: The current shmemfs implementation has a quirk that in-kernel reads
40 : * of a hole cause a page to be mapped into the file. If you are going to
41 : * create a sparse xfile, please be careful about reading from uninitialized
42 : * parts of the file. These pages are !Uptodate and will eventually be
43 : * reclaimed if not written, but in the short term this boosts memory
44 : * consumption.
45 : */
46 :
47 : /*
48 : * xfiles must not be exposed to userspace and require upper layers to
49 : * coordinate access to the one handle returned by the constructor, so
50 : * establish a separate lock class for xfiles to avoid confusing lockdep.
51 : */
52 : static struct lock_class_key xfile_i_mutex_key;
53 :
54 : /*
55 : * Create an xfile of the given size. The description will be used in the
56 : * trace output.
57 : */
58 : int
59 202551064 : xfile_create(
60 : const char *description,
61 : loff_t isize,
62 : struct xfile **xfilep)
63 : {
64 202551064 : struct inode *inode;
65 202551064 : struct xfile *xf;
66 202551064 : int error = -ENOMEM;
67 :
68 202551064 : xf = kzalloc(sizeof(struct xfile), XCHK_GFP_FLAGS);
69 202546108 : if (!xf)
70 : return -ENOMEM;
71 :
72 202546108 : xf->file = shmem_file_setup(description, isize, 0);
73 202651319 : if (!xf->file)
74 0 : goto out_xfile;
75 202651319 : if (IS_ERR(xf->file)) {
76 0 : error = PTR_ERR(xf->file);
77 0 : goto out_xfile;
78 : }
79 :
80 : /*
81 : * We want a large sparse file that we can pread, pwrite, and seek.
82 : * xfile users are responsible for keeping the xfile hidden away from
83 : * all other callers, so we skip timestamp updates and security checks.
84 : * Make the inode only accessible by root, just in case the xfile ever
85 : * escapes.
86 : */
87 202651319 : xf->file->f_mode |= FMODE_PREAD | FMODE_PWRITE | FMODE_NOCMTIME |
88 : FMODE_LSEEK;
89 202651319 : xf->file->f_flags |= O_RDWR | O_LARGEFILE | O_NOATIME;
90 202651319 : inode = file_inode(xf->file);
91 202651319 : inode->i_flags |= S_PRIVATE | S_NOCMTIME | S_NOATIME;
92 202651319 : inode->i_mode &= ~0177;
93 202651319 : inode->i_uid = GLOBAL_ROOT_UID;
94 202651319 : inode->i_gid = GLOBAL_ROOT_GID;
95 :
96 202651319 : lockdep_set_class(&inode->i_rwsem, &xfile_i_mutex_key);
97 :
98 202651319 : trace_xfile_create(xf);
99 :
100 202655908 : *xfilep = xf;
101 202655908 : return 0;
102 0 : out_xfile:
103 0 : kfree(xf);
104 0 : return error;
105 : }
106 :
107 : /* Evict a cache entry and release the page. */
108 : static inline int
109 0 : xfile_cache_evict(
110 : struct xfile *xf,
111 : struct xfile_cache *entry)
112 : {
113 0 : int error;
114 :
115 0 : if (!entry->xfpage.page)
116 : return 0;
117 :
118 0 : lock_page(entry->xfpage.page);
119 0 : kunmap(entry->kaddr);
120 :
121 0 : error = xfile_put_page(xf, &entry->xfpage);
122 0 : memset(entry, 0, sizeof(struct xfile_cache));
123 0 : return error;
124 : }
125 :
126 : /*
127 : * Grab a page, map it into the kernel address space, and fill out the cache
128 : * entry.
129 : */
130 : static int
131 0 : xfile_cache_fill(
132 : struct xfile *xf,
133 : loff_t key,
134 : struct xfile_cache *entry)
135 : {
136 0 : int error;
137 :
138 0 : error = xfile_get_page(xf, key, PAGE_SIZE, &entry->xfpage);
139 0 : if (error)
140 : return error;
141 :
142 0 : entry->kaddr = kmap(entry->xfpage.page);
143 0 : unlock_page(entry->xfpage.page);
144 0 : return 0;
145 : }
146 :
147 : /*
148 : * Return the kernel address of a cached position in the xfile. If the cache
149 : * misses, the relevant page will be brought into memory, mapped, and returned.
150 : * If the cache is disabled, returns NULL.
151 : */
152 : static void *
153 26163495237 : xfile_cache_lookup(
154 : struct xfile *xf,
155 : loff_t pos)
156 : {
157 26163495237 : loff_t key = round_down(pos, PAGE_SIZE);
158 26163495237 : unsigned int i;
159 26163495237 : int ret;
160 :
161 26163495237 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
162 : return NULL;
163 :
164 : /* Is it already in the cache? */
165 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
166 0 : if (!xf->cached[i].xfpage.page)
167 0 : continue;
168 0 : if (page_offset(xf->cached[i].xfpage.page) != key)
169 0 : continue;
170 :
171 0 : goto found;
172 : }
173 :
174 : /* Find the least-used slot here so we can evict it. */
175 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
176 0 : if (!xf->cached[i].xfpage.page)
177 0 : goto insert;
178 : }
179 0 : i = min_t(unsigned int, i, XFILE_CACHE_ENTRIES - 1);
180 :
181 0 : ret = xfile_cache_evict(xf, &xf->cached[i]);
182 0 : if (ret)
183 0 : return ERR_PTR(ret);
184 :
185 0 : insert:
186 0 : ret = xfile_cache_fill(xf, key, &xf->cached[i]);
187 0 : if (ret)
188 0 : return ERR_PTR(ret);
189 :
190 0 : found:
191 : /* Stupid MRU moves this cache entry to the front. */
192 0 : if (i != 0)
193 0 : swap(xf->cached[0], xf->cached[i]);
194 :
195 0 : return xf->cached[0].kaddr;
196 : }
197 :
198 : /* Drop all cached xfile pages. */
199 : static void
200 384187190 : xfile_cache_drop(
201 : struct xfile *xf)
202 : {
203 384187190 : unsigned int i;
204 :
205 384187190 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
206 : return;
207 :
208 1419 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++)
209 1419 : xfile_cache_evict(xf, &xf->cached[i]);
210 : }
211 :
212 : /* Enable the internal xfile cache. */
213 : void
214 0 : xfile_cache_enable(
215 : struct xfile *xf)
216 : {
217 0 : xf->flags |= XFILE_INTERNAL_CACHE;
218 0 : memset(xf->cached, 0, sizeof(struct xfile_cache) * XFILE_CACHE_ENTRIES);
219 0 : }
220 :
221 : /* Disable the internal xfile cache. */
222 : void
223 80708 : xfile_cache_disable(
224 : struct xfile *xf)
225 : {
226 80708 : xfile_cache_drop(xf);
227 80705 : xf->flags &= ~XFILE_INTERNAL_CACHE;
228 80705 : }
229 :
230 : /* Close the file and release all resources. */
231 : void
232 202680273 : xfile_destroy(
233 : struct xfile *xf)
234 : {
235 202680273 : struct inode *inode = file_inode(xf->file);
236 :
237 202680273 : trace_xfile_destroy(xf);
238 :
239 202683571 : xfile_cache_drop(xf);
240 :
241 202684382 : lockdep_set_class(&inode->i_rwsem, &inode->i_sb->s_type->i_mutex_key);
242 202684382 : fput(xf->file);
243 202706161 : kfree(xf);
244 202710621 : }
245 :
246 : /* Get a mapped page in the xfile, do not use internal cache. */
247 : static void *
248 26163114885 : xfile_uncached_get(
249 : struct xfile *xf,
250 : loff_t pos,
251 : struct xfile_page *xfpage)
252 : {
253 26163114885 : loff_t key = round_down(pos, PAGE_SIZE);
254 26163114885 : int error;
255 :
256 26163114885 : error = xfile_get_page(xf, key, PAGE_SIZE, xfpage);
257 26193704897 : if (error)
258 0 : return ERR_PTR(error);
259 :
260 26193704897 : return kmap_local_page(xfpage->page);
261 : }
262 :
263 : /* Release a mapped page that was obtained via xfile_uncached_get. */
264 : static int
265 : xfile_uncached_put(
266 : struct xfile *xf,
267 : struct xfile_page *xfpage,
268 : void *kaddr)
269 : {
270 26192181934 : kunmap_local(kaddr);
271 26192181934 : return xfile_put_page(xf, xfpage);
272 : }
273 :
274 : /*
275 : * Read a memory object directly from the xfile's page cache. Unlike regular
276 : * pread, we return -E2BIG and -EFBIG for reads that are too large or at too
277 : * high an offset, instead of truncating the read. Otherwise, we return
278 : * bytes read or an error code, like regular pread.
279 : */
280 : ssize_t
281 21179227666 : xfile_pread(
282 : struct xfile *xf,
283 : void *buf,
284 : size_t count,
285 : loff_t pos)
286 : {
287 21179227666 : struct inode *inode = file_inode(xf->file);
288 21179227666 : ssize_t read = 0;
289 21179227666 : unsigned int pflags;
290 21179227666 : int error = 0;
291 :
292 21179227666 : if (count > MAX_RW_COUNT)
293 : return -E2BIG;
294 21179227666 : if (inode->i_sb->s_maxbytes - pos < count)
295 : return -EFBIG;
296 :
297 21179227666 : trace_xfile_pread(xf, pos, count);
298 :
299 21179231645 : pflags = memalloc_nofs_save();
300 42361978782 : while (count > 0) {
301 21179356853 : struct xfile_page xfpage;
302 21179356853 : void *p, *kaddr;
303 21179356853 : unsigned int len;
304 21179356853 : bool cached = true;
305 :
306 21179356853 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
307 :
308 21179356853 : kaddr = xfile_cache_lookup(xf, pos);
309 21178990733 : if (!kaddr) {
310 21179276744 : cached = false;
311 21179276744 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
312 : }
313 21182133355 : if (IS_ERR(kaddr)) {
314 0 : error = PTR_ERR(kaddr);
315 0 : break;
316 : }
317 :
318 21182133355 : p = kaddr + offset_in_page(pos);
319 42364266710 : memcpy(buf, p, len);
320 :
321 21182133355 : if (!cached) {
322 21182132891 : error = xfile_uncached_put(xf, &xfpage, kaddr);
323 21182746673 : if (error)
324 : break;
325 : }
326 :
327 21182747137 : count -= len;
328 21182747137 : pos += len;
329 21182747137 : buf += len;
330 21182747137 : read += len;
331 : }
332 21182621929 : memalloc_nofs_restore(pflags);
333 :
334 21182621929 : if (read > 0)
335 : return read;
336 8738 : return error;
337 : }
338 :
339 : /*
340 : * Write a memory object directly to the xfile's page cache. Unlike regular
341 : * pwrite, we return -E2BIG and -EFBIG for writes that are too large or at too
342 : * high an offset, instead of truncating the write. Otherwise, we return
343 : * bytes written or an error code, like regular pwrite.
344 : */
345 : ssize_t
346 4986705267 : xfile_pwrite(
347 : struct xfile *xf,
348 : const void *buf,
349 : size_t count,
350 : loff_t pos)
351 : {
352 4986705267 : struct inode *inode = file_inode(xf->file);
353 4986705267 : ssize_t written = 0;
354 4986705267 : unsigned int pflags;
355 4986705267 : int error = 0;
356 :
357 4986705267 : if (count > MAX_RW_COUNT)
358 : return -E2BIG;
359 4986705267 : if (inode->i_sb->s_maxbytes - pos < count)
360 : return -EFBIG;
361 :
362 4986705267 : trace_xfile_pwrite(xf, pos, count);
363 :
364 4983277808 : pflags = memalloc_nofs_save();
365 9997811098 : while (count > 0) {
366 4983240919 : struct xfile_page xfpage;
367 4983240919 : void *p, *kaddr;
368 4983240919 : unsigned int len;
369 4983240919 : bool cached = true;
370 :
371 4983240919 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
372 :
373 4983240919 : kaddr = xfile_cache_lookup(xf, pos);
374 4987675328 : if (!kaddr) {
375 4987425141 : cached = false;
376 4987425141 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
377 : }
378 5010066788 : if (IS_ERR(kaddr)) {
379 0 : error = PTR_ERR(kaddr);
380 0 : break;
381 : }
382 :
383 5010066788 : p = kaddr + offset_in_page(pos);
384 10020133576 : memcpy(p, buf, len);
385 :
386 5010066788 : if (!cached) {
387 5010037852 : error = xfile_uncached_put(xf, &xfpage, kaddr);
388 5014504354 : if (error)
389 : break;
390 : }
391 :
392 5014533290 : written += len;
393 5014533290 : count -= len;
394 5014533290 : pos += len;
395 5014533290 : buf += len;
396 : }
397 5014570179 : memalloc_nofs_restore(pflags);
398 :
399 5014570179 : if (written > 0)
400 : return written;
401 8738 : return error;
402 : }
403 :
404 : /* Discard pages backing a range of the xfile. */
405 : void
406 181481307 : xfile_discard(
407 : struct xfile *xf,
408 : loff_t pos,
409 : u64 count)
410 : {
411 181481307 : trace_xfile_discard(xf, pos, count);
412 181481797 : xfile_cache_drop(xf);
413 181484611 : shmem_truncate_range(file_inode(xf->file), pos, pos + count - 1);
414 181513852 : }
415 :
416 : /* Ensure that there is storage backing the given range. */
417 : int
418 11191 : xfile_prealloc(
419 : struct xfile *xf,
420 : loff_t pos,
421 : u64 count)
422 : {
423 11191 : struct inode *inode = file_inode(xf->file);
424 11191 : unsigned int pflags;
425 11191 : int error = 0;
426 :
427 11191 : if (count > MAX_RW_COUNT)
428 : return -E2BIG;
429 11191 : if (inode->i_sb->s_maxbytes - pos < count)
430 : return -EFBIG;
431 :
432 11191 : trace_xfile_prealloc(xf, pos, count);
433 :
434 11191 : pflags = memalloc_nofs_save();
435 22382 : while (count > 0) {
436 11191 : struct xfile_page xfpage;
437 11191 : void *kaddr;
438 11191 : unsigned int len;
439 :
440 11191 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
441 :
442 11191 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
443 11191 : if (IS_ERR(kaddr)) {
444 0 : error = PTR_ERR(kaddr);
445 0 : break;
446 : }
447 :
448 11191 : error = xfile_uncached_put(xf, &xfpage, kaddr);
449 11191 : if (error)
450 : break;
451 :
452 11191 : count -= len;
453 11191 : pos += len;
454 : }
455 11191 : memalloc_nofs_restore(pflags);
456 :
457 11191 : return error;
458 : }
459 :
460 : /* Find the next written area in the xfile data for a given offset. */
461 : loff_t
462 9278261 : xfile_seek_data(
463 : struct xfile *xf,
464 : loff_t pos)
465 : {
466 9278261 : loff_t ret;
467 :
468 9278261 : ret = vfs_llseek(xf->file, pos, SEEK_DATA);
469 9278257 : trace_xfile_seek_data(xf, pos, ret);
470 9278256 : return ret;
471 : }
472 :
473 : /* Query stat information for an xfile. */
474 : int
475 7543232870 : xfile_stat(
476 : struct xfile *xf,
477 : struct xfile_stat *statbuf)
478 : {
479 7543232870 : struct kstat ks;
480 7543232870 : int error;
481 :
482 7543232870 : error = vfs_getattr_nosec(&xf->file->f_path, &ks,
483 : STATX_SIZE | STATX_BLOCKS, AT_STATX_DONT_SYNC);
484 7543287504 : if (error)
485 : return error;
486 :
487 7543287504 : statbuf->size = ks.size;
488 7543287504 : statbuf->bytes = ks.blocks << SECTOR_SHIFT;
489 7543287504 : return 0;
490 : }
491 :
492 : /*
493 : * Grab the (locked) page for a memory object. The object cannot span a page
494 : * boundary. Returns 0 (and a locked page) if successful, -ENOTBLK if we
495 : * cannot grab the page, or the usual negative errno.
496 : */
497 : int
498 26223884943 : xfile_get_page(
499 : struct xfile *xf,
500 : loff_t pos,
501 : unsigned int len,
502 : struct xfile_page *xfpage)
503 : {
504 26223884943 : struct inode *inode = file_inode(xf->file);
505 26223884943 : struct address_space *mapping = inode->i_mapping;
506 26223884943 : const struct address_space_operations *aops = mapping->a_ops;
507 26223884943 : struct page *page = NULL;
508 26223884943 : void *fsdata = NULL;
509 26223884943 : loff_t key = round_down(pos, PAGE_SIZE);
510 26223884943 : unsigned int pflags;
511 26223884943 : int error;
512 :
513 26223884943 : if (inode->i_sb->s_maxbytes - pos < len)
514 : return -ENOMEM;
515 26223884943 : if (len > PAGE_SIZE - offset_in_page(pos))
516 : return -ENOTBLK;
517 :
518 26223884943 : trace_xfile_get_page(xf, pos, len);
519 :
520 26223966578 : pflags = memalloc_nofs_save();
521 :
522 : /*
523 : * We call write_begin directly here to avoid all the freezer
524 : * protection lock-taking that happens in the normal path. shmem
525 : * doesn't support fs freeze, but lockdep doesn't know that and will
526 : * trip over that.
527 : */
528 26223966578 : error = aops->write_begin(NULL, mapping, key, PAGE_SIZE, &page,
529 : &fsdata);
530 26248720096 : if (error)
531 0 : goto out_pflags;
532 :
533 : /* We got the page, so make sure we push out EOF. */
534 26248720096 : if (i_size_read(inode) < pos + len)
535 46663128 : i_size_write(inode, pos + len);
536 :
537 : /*
538 : * If the page isn't up to date, fill it with zeroes before we hand it
539 : * to the caller and make sure the backing store will hold on to them.
540 : */
541 26248720096 : if (!PageUptodate(page)) {
542 54716228 : void *kaddr;
543 :
544 54716228 : kaddr = kmap_local_page(page);
545 54716228 : memset(kaddr, 0, PAGE_SIZE);
546 54716228 : kunmap_local(kaddr);
547 54716228 : SetPageUptodate(page);
548 : }
549 :
550 : /*
551 : * Mark each page dirty so that the contents are written to some
552 : * backing store when we drop this buffer, and take an extra reference
553 : * to prevent the xfile page from being swapped or removed from the
554 : * page cache by reclaim if the caller unlocks the page.
555 : */
556 26250550221 : set_page_dirty(page);
557 26249873776 : get_page(page);
558 :
559 26248980854 : xfpage->page = page;
560 26248980854 : xfpage->fsdata = fsdata;
561 26248980854 : xfpage->pos = key;
562 26248980854 : out_pflags:
563 26248980854 : memalloc_nofs_restore(pflags);
564 26248980854 : return error;
565 : }
566 :
567 : /*
568 : * Release the (locked) page for a memory object. Returns 0 or a negative
569 : * errno.
570 : */
571 : int
572 26248925856 : xfile_put_page(
573 : struct xfile *xf,
574 : struct xfile_page *xfpage)
575 : {
576 26248925856 : struct inode *inode = file_inode(xf->file);
577 26248925856 : struct address_space *mapping = inode->i_mapping;
578 26248925856 : const struct address_space_operations *aops = mapping->a_ops;
579 26248925856 : unsigned int pflags;
580 26248925856 : int ret;
581 :
582 26248925856 : trace_xfile_put_page(xf, xfpage->pos, xfpage->page);
583 :
584 : /* Give back the reference that we took in xfile_get_page. */
585 26249763433 : put_page(xfpage->page);
586 :
587 26251050625 : pflags = memalloc_nofs_save();
588 26251050625 : ret = aops->write_end(NULL, mapping, xfpage->pos, PAGE_SIZE, PAGE_SIZE,
589 : xfpage->page, xfpage->fsdata);
590 26253582896 : memalloc_nofs_restore(pflags);
591 26253582896 : memset(xfpage, 0, sizeof(struct xfile_page));
592 :
593 26253582896 : if (ret < 0)
594 : return ret;
595 26253582896 : if (ret != PAGE_SIZE)
596 0 : return -EIO;
597 : return 0;
598 : }
599 :
600 : /* Dump an xfile to dmesg. */
601 : int
602 0 : xfile_dump(
603 : struct xfile *xf)
604 : {
605 0 : struct xfile_stat sb;
606 0 : struct inode *inode = file_inode(xf->file);
607 0 : struct address_space *mapping = inode->i_mapping;
608 0 : loff_t holepos = 0;
609 0 : loff_t datapos;
610 0 : loff_t ret;
611 0 : unsigned int pflags;
612 0 : bool all_zeroes = true;
613 0 : int error = 0;
614 :
615 0 : error = xfile_stat(xf, &sb);
616 0 : if (error)
617 : return error;
618 :
619 0 : printk(KERN_ALERT "xfile ino 0x%lx isize 0x%llx dump:", inode->i_ino,
620 : sb.size);
621 0 : pflags = memalloc_nofs_save();
622 :
623 0 : while ((ret = vfs_llseek(xf->file, holepos, SEEK_DATA)) >= 0) {
624 0 : datapos = rounddown_64(ret, PAGE_SIZE);
625 0 : ret = vfs_llseek(xf->file, datapos, SEEK_HOLE);
626 0 : if (ret < 0)
627 : break;
628 0 : holepos = min_t(loff_t, sb.size, roundup_64(ret, PAGE_SIZE));
629 :
630 0 : while (datapos < holepos) {
631 0 : struct page *page = NULL;
632 0 : void *p, *kaddr;
633 0 : u64 datalen = holepos - datapos;
634 0 : unsigned int pagepos;
635 0 : unsigned int pagelen;
636 :
637 0 : cond_resched();
638 :
639 0 : if (fatal_signal_pending(current)) {
640 0 : error = -EINTR;
641 0 : goto out_pflags;
642 : }
643 :
644 0 : pagelen = min_t(u64, datalen, PAGE_SIZE);
645 :
646 0 : page = shmem_read_mapping_page_gfp(mapping,
647 0 : datapos >> PAGE_SHIFT, __GFP_NOWARN);
648 0 : if (IS_ERR(page)) {
649 0 : error = PTR_ERR(page);
650 0 : if (error == -EIO)
651 0 : printk(KERN_ALERT "%.8llx: poisoned",
652 : datapos);
653 0 : else if (error != -ENOMEM)
654 0 : goto out_pflags;
655 :
656 0 : goto next_pgoff;
657 : }
658 :
659 0 : if (!PageUptodate(page))
660 0 : goto next_page;
661 :
662 0 : kaddr = kmap_local_page(page);
663 0 : p = kaddr;
664 :
665 0 : for (pagepos = 0; pagepos < pagelen; pagepos += 16) {
666 0 : char prefix[16];
667 0 : unsigned int linelen;
668 :
669 0 : linelen = min_t(unsigned int, pagelen, 16);
670 :
671 0 : if (!memchr_inv(p + pagepos, 0, linelen))
672 0 : continue;
673 :
674 0 : snprintf(prefix, 16, "%.8llx: ",
675 : datapos + pagepos);
676 :
677 0 : all_zeroes = false;
678 0 : print_hex_dump(KERN_ALERT, prefix,
679 : DUMP_PREFIX_NONE, 16, 1,
680 : p + pagepos, linelen, true);
681 : }
682 0 : kunmap_local(kaddr);
683 0 : next_page:
684 0 : put_page(page);
685 0 : next_pgoff:
686 0 : datapos += PAGE_SIZE;
687 : }
688 : }
689 0 : if (all_zeroes)
690 0 : printk(KERN_ALERT "<all zeroes>");
691 0 : if (ret != -ENXIO)
692 0 : error = ret;
693 0 : out_pflags:
694 0 : memalloc_nofs_restore(pflags);
695 0 : return error;
696 : }
|