Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_log_format.h"
11 : #include "xfs_trans_resv.h"
12 : #include "xfs_mount.h"
13 : #include "xfs_format.h"
14 : #include "scrub/xfile.h"
15 : #include "scrub/xfarray.h"
16 : #include "scrub/scrub.h"
17 : #include "scrub/trace.h"
18 : #include <linux/shmem_fs.h>
19 :
20 : /*
21 : * Swappable Temporary Memory
22 : * ==========================
23 : *
24 : * Online checking sometimes needs to be able to stage a large amount of data
25 : * in memory. This information might not fit in the available memory and it
26 : * doesn't all need to be accessible at all times. In other words, we want an
27 : * indexed data buffer to store data that can be paged out.
28 : *
29 : * When CONFIG_TMPFS=y, shmemfs is enough of a filesystem to meet those
30 : * requirements. Therefore, the xfile mechanism uses an unlinked shmem file to
31 : * store our staging data. This file is not installed in the file descriptor
32 : * table so that user programs cannot access the data, which means that the
33 : * xfile must be freed with xfile_destroy.
34 : *
35 : * xfiles assume that the caller will handle all required concurrency
36 : * management; standard vfs locks (freezer and inode) are not taken. Reads
37 : * and writes are satisfied directly from the page cache.
38 : *
39 : * NOTE: The current shmemfs implementation has a quirk that in-kernel reads
40 : * of a hole cause a page to be mapped into the file. If you are going to
41 : * create a sparse xfile, please be careful about reading from uninitialized
42 : * parts of the file. These pages are !Uptodate and will eventually be
43 : * reclaimed if not written, but in the short term this boosts memory
44 : * consumption.
45 : */
46 :
47 : /*
48 : * xfiles must not be exposed to userspace and require upper layers to
49 : * coordinate access to the one handle returned by the constructor, so
50 : * establish a separate lock class for xfiles to avoid confusing lockdep.
51 : */
52 : static struct lock_class_key xfile_i_mutex_key;
53 :
54 : /*
55 : * Create an xfile of the given size. The description will be used in the
56 : * trace output.
57 : */
58 : int
59 256552241 : xfile_create(
60 : const char *description,
61 : loff_t isize,
62 : struct xfile **xfilep)
63 : {
64 256552241 : struct inode *inode;
65 256552241 : struct xfile *xf;
66 256552241 : int error = -ENOMEM;
67 :
68 256552241 : xf = kzalloc(sizeof(struct xfile), XCHK_GFP_FLAGS);
69 256427193 : if (!xf)
70 : return -ENOMEM;
71 :
72 256427193 : xf->file = shmem_file_setup(description, isize, 0);
73 256711961 : if (!xf->file)
74 0 : goto out_xfile;
75 256711961 : if (IS_ERR(xf->file)) {
76 0 : error = PTR_ERR(xf->file);
77 0 : goto out_xfile;
78 : }
79 :
80 : /*
81 : * We want a large sparse file that we can pread, pwrite, and seek.
82 : * xfile users are responsible for keeping the xfile hidden away from
83 : * all other callers, so we skip timestamp updates and security checks.
84 : * Make the inode only accessible by root, just in case the xfile ever
85 : * escapes.
86 : */
87 256711961 : xf->file->f_mode |= FMODE_PREAD | FMODE_PWRITE | FMODE_NOCMTIME |
88 : FMODE_LSEEK;
89 256711961 : xf->file->f_flags |= O_RDWR | O_LARGEFILE | O_NOATIME;
90 256711961 : inode = file_inode(xf->file);
91 256711961 : inode->i_flags |= S_PRIVATE | S_NOCMTIME | S_NOATIME;
92 256711961 : inode->i_mode &= ~0177;
93 256711961 : inode->i_uid = GLOBAL_ROOT_UID;
94 256711961 : inode->i_gid = GLOBAL_ROOT_GID;
95 :
96 256711961 : lockdep_set_class(&inode->i_rwsem, &xfile_i_mutex_key);
97 :
98 256711961 : trace_xfile_create(xf);
99 :
100 256710482 : *xfilep = xf;
101 256710482 : return 0;
102 0 : out_xfile:
103 0 : kfree(xf);
104 0 : return error;
105 : }
106 :
107 : /* Evict a cache entry and release the page. */
108 : static inline int
109 0 : xfile_cache_evict(
110 : struct xfile *xf,
111 : struct xfile_cache *entry)
112 : {
113 0 : int error;
114 :
115 0 : if (!entry->xfpage.page)
116 : return 0;
117 :
118 0 : lock_page(entry->xfpage.page);
119 0 : kunmap(entry->kaddr);
120 :
121 0 : error = xfile_put_page(xf, &entry->xfpage);
122 0 : memset(entry, 0, sizeof(struct xfile_cache));
123 0 : return error;
124 : }
125 :
126 : /*
127 : * Grab a page, map it into the kernel address space, and fill out the cache
128 : * entry.
129 : */
130 : static int
131 0 : xfile_cache_fill(
132 : struct xfile *xf,
133 : loff_t key,
134 : struct xfile_cache *entry)
135 : {
136 0 : int error;
137 :
138 0 : error = xfile_get_page(xf, key, PAGE_SIZE, &entry->xfpage);
139 0 : if (error)
140 : return error;
141 :
142 0 : entry->kaddr = kmap(entry->xfpage.page);
143 0 : unlock_page(entry->xfpage.page);
144 0 : return 0;
145 : }
146 :
147 : /*
148 : * Return the kernel address of a cached position in the xfile. If the cache
149 : * misses, the relevant page will be brought into memory, mapped, and returned.
150 : * If the cache is disabled, returns NULL.
151 : */
152 : static void *
153 19208780274 : xfile_cache_lookup(
154 : struct xfile *xf,
155 : loff_t pos)
156 : {
157 19208780274 : loff_t key = round_down(pos, PAGE_SIZE);
158 19208780274 : unsigned int i;
159 19208780274 : int ret;
160 :
161 19208780274 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
162 : return NULL;
163 :
164 : /* Is it already in the cache? */
165 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
166 0 : if (!xf->cached[i].xfpage.page)
167 0 : continue;
168 0 : if (page_offset(xf->cached[i].xfpage.page) != key)
169 0 : continue;
170 :
171 0 : goto found;
172 : }
173 :
174 : /* Find the least-used slot here so we can evict it. */
175 0 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++) {
176 0 : if (!xf->cached[i].xfpage.page)
177 0 : goto insert;
178 : }
179 0 : i = min_t(unsigned int, i, XFILE_CACHE_ENTRIES - 1);
180 :
181 0 : ret = xfile_cache_evict(xf, &xf->cached[i]);
182 0 : if (ret)
183 0 : return ERR_PTR(ret);
184 :
185 0 : insert:
186 0 : ret = xfile_cache_fill(xf, key, &xf->cached[i]);
187 0 : if (ret)
188 0 : return ERR_PTR(ret);
189 :
190 0 : found:
191 : /* Stupid MRU moves this cache entry to the front. */
192 0 : if (i != 0)
193 0 : swap(xf->cached[0], xf->cached[i]);
194 :
195 0 : return xf->cached[0].kaddr;
196 : }
197 :
198 : /* Drop all cached xfile pages. */
199 : static void
200 486482205 : xfile_cache_drop(
201 : struct xfile *xf)
202 : {
203 486482205 : unsigned int i;
204 :
205 486482205 : if (!(xf->flags & XFILE_INTERNAL_CACHE))
206 : return;
207 :
208 280 : for (i = 0; i < XFILE_CACHE_ENTRIES; i++)
209 280 : xfile_cache_evict(xf, &xf->cached[i]);
210 : }
211 :
212 : /* Enable the internal xfile cache. */
213 : void
214 0 : xfile_cache_enable(
215 : struct xfile *xf)
216 : {
217 0 : xf->flags |= XFILE_INTERNAL_CACHE;
218 0 : memset(xf->cached, 0, sizeof(struct xfile_cache) * XFILE_CACHE_ENTRIES);
219 0 : }
220 :
221 : /* Disable the internal xfile cache. */
222 : void
223 21353 : xfile_cache_disable(
224 : struct xfile *xf)
225 : {
226 21353 : xfile_cache_drop(xf);
227 21352 : xf->flags &= ~XFILE_INTERNAL_CACHE;
228 21352 : }
229 :
230 : /* Close the file and release all resources. */
231 : void
232 256726398 : xfile_destroy(
233 : struct xfile *xf)
234 : {
235 256726398 : struct inode *inode = file_inode(xf->file);
236 :
237 256726398 : trace_xfile_destroy(xf);
238 :
239 256730258 : xfile_cache_drop(xf);
240 :
241 256728021 : lockdep_set_class(&inode->i_rwsem, &inode->i_sb->s_type->i_mutex_key);
242 256728021 : fput(xf->file);
243 256757150 : kfree(xf);
244 256762798 : }
245 :
246 : /* Get a mapped page in the xfile, do not use internal cache. */
247 : static void *
248 19206846692 : xfile_uncached_get(
249 : struct xfile *xf,
250 : loff_t pos,
251 : struct xfile_page *xfpage)
252 : {
253 19206846692 : loff_t key = round_down(pos, PAGE_SIZE);
254 19206846692 : int error;
255 :
256 19206846692 : error = xfile_get_page(xf, key, PAGE_SIZE, xfpage);
257 19238518889 : if (error)
258 0 : return ERR_PTR(error);
259 :
260 19238518889 : return kmap_local_page(xfpage->page);
261 : }
262 :
263 : /* Release a mapped page that was obtained via xfile_uncached_get. */
264 : static int
265 : xfile_uncached_put(
266 : struct xfile *xf,
267 : struct xfile_page *xfpage,
268 : void *kaddr)
269 : {
270 19238301961 : kunmap_local(kaddr);
271 19238301961 : return xfile_put_page(xf, xfpage);
272 : }
273 :
274 : /*
275 : * Read a memory object directly from the xfile's page cache. Unlike regular
276 : * pread, we return -E2BIG and -EFBIG for reads that are too large or at too
277 : * high an offset, instead of truncating the read. Otherwise, we return
278 : * bytes read or an error code, like regular pread.
279 : */
280 : ssize_t
281 13973588330 : xfile_pread(
282 : struct xfile *xf,
283 : void *buf,
284 : size_t count,
285 : loff_t pos)
286 : {
287 13973588330 : struct inode *inode = file_inode(xf->file);
288 13973588330 : ssize_t read = 0;
289 13973588330 : unsigned int pflags;
290 13973588330 : int error = 0;
291 :
292 13973588330 : if (count > MAX_RW_COUNT)
293 : return -E2BIG;
294 13973588330 : if (inode->i_sb->s_maxbytes - pos < count)
295 : return -EFBIG;
296 :
297 13973588330 : trace_xfile_pread(xf, pos, count);
298 :
299 13973557065 : pflags = memalloc_nofs_save();
300 27949583576 : while (count > 0) {
301 13973512563 : struct xfile_page xfpage;
302 13973512563 : void *p, *kaddr;
303 13973512563 : unsigned int len;
304 13973512563 : bool cached = true;
305 :
306 13973512563 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
307 :
308 13973512563 : kaddr = xfile_cache_lookup(xf, pos);
309 13972498755 : if (!kaddr) {
310 13973602483 : cached = false;
311 13973602483 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
312 : }
313 13974600774 : if (IS_ERR(kaddr)) {
314 0 : error = PTR_ERR(kaddr);
315 0 : break;
316 : }
317 :
318 13974600774 : p = kaddr + offset_in_page(pos);
319 27949201548 : memcpy(buf, p, len);
320 :
321 13974600774 : if (!cached) {
322 13974631438 : error = xfile_uncached_put(xf, &xfpage, kaddr);
323 13976057175 : if (error)
324 : break;
325 : }
326 :
327 13976026511 : count -= len;
328 13976026511 : pos += len;
329 13976026511 : buf += len;
330 13976026511 : read += len;
331 : }
332 13976071013 : memalloc_nofs_restore(pflags);
333 :
334 13976071013 : if (read > 0)
335 : return read;
336 9229 : return error;
337 : }
338 :
339 : /*
340 : * Write a memory object directly to the xfile's page cache. Unlike regular
341 : * pwrite, we return -E2BIG and -EFBIG for writes that are too large or at too
342 : * high an offset, instead of truncating the write. Otherwise, we return
343 : * bytes written or an error code, like regular pwrite.
344 : */
345 : ssize_t
346 5237216545 : xfile_pwrite(
347 : struct xfile *xf,
348 : const void *buf,
349 : size_t count,
350 : loff_t pos)
351 : {
352 5237216545 : struct inode *inode = file_inode(xf->file);
353 5237216545 : ssize_t written = 0;
354 5237216545 : unsigned int pflags;
355 5237216545 : int error = 0;
356 :
357 5237216545 : if (count > MAX_RW_COUNT)
358 : return -E2BIG;
359 5237216545 : if (inode->i_sb->s_maxbytes - pos < count)
360 : return -EFBIG;
361 :
362 5237216545 : trace_xfile_pwrite(xf, pos, count);
363 :
364 5237434388 : pflags = memalloc_nofs_save();
365 10505564727 : while (count > 0) {
366 5237325370 : struct xfile_page xfpage;
367 5237325370 : void *p, *kaddr;
368 5237325370 : unsigned int len;
369 5237325370 : bool cached = true;
370 :
371 5237325370 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
372 :
373 5237325370 : kaddr = xfile_cache_lookup(xf, pos);
374 5243308856 : if (!kaddr) {
375 5241735777 : cached = false;
376 5241735777 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
377 : }
378 5263699837 : if (IS_ERR(kaddr)) {
379 0 : error = PTR_ERR(kaddr);
380 0 : break;
381 : }
382 :
383 5263699837 : p = kaddr + offset_in_page(pos);
384 10527399674 : memcpy(p, buf, len);
385 :
386 5263699837 : if (!cached) {
387 5263665780 : error = xfile_uncached_put(xf, &xfpage, kaddr);
388 5268096282 : if (error)
389 : break;
390 : }
391 :
392 5268130339 : written += len;
393 5268130339 : count -= len;
394 5268130339 : pos += len;
395 5268130339 : buf += len;
396 : }
397 5268239357 : memalloc_nofs_restore(pflags);
398 :
399 5268239357 : if (written > 0)
400 : return written;
401 9229 : return error;
402 : }
403 :
404 : /* Discard pages backing a range of the xfile. */
405 : void
406 229803500 : xfile_discard(
407 : struct xfile *xf,
408 : loff_t pos,
409 : u64 count)
410 : {
411 229803500 : trace_xfile_discard(xf, pos, count);
412 229807246 : xfile_cache_drop(xf);
413 229803725 : shmem_truncate_range(file_inode(xf->file), pos, pos + count - 1);
414 229848646 : }
415 :
416 : /* Ensure that there is storage backing the given range. */
417 : int
418 4743 : xfile_prealloc(
419 : struct xfile *xf,
420 : loff_t pos,
421 : u64 count)
422 : {
423 4743 : struct inode *inode = file_inode(xf->file);
424 4743 : unsigned int pflags;
425 4743 : int error = 0;
426 :
427 4743 : if (count > MAX_RW_COUNT)
428 : return -E2BIG;
429 4743 : if (inode->i_sb->s_maxbytes - pos < count)
430 : return -EFBIG;
431 :
432 4743 : trace_xfile_prealloc(xf, pos, count);
433 :
434 4743 : pflags = memalloc_nofs_save();
435 9486 : while (count > 0) {
436 4743 : struct xfile_page xfpage;
437 4743 : void *kaddr;
438 4743 : unsigned int len;
439 :
440 4743 : len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos));
441 :
442 4743 : kaddr = xfile_uncached_get(xf, pos, &xfpage);
443 4743 : if (IS_ERR(kaddr)) {
444 0 : error = PTR_ERR(kaddr);
445 0 : break;
446 : }
447 :
448 4743 : error = xfile_uncached_put(xf, &xfpage, kaddr);
449 4743 : if (error)
450 : break;
451 :
452 4743 : count -= len;
453 4743 : pos += len;
454 : }
455 4743 : memalloc_nofs_restore(pflags);
456 :
457 4743 : return error;
458 : }
459 :
460 : /* Find the next written area in the xfile data for a given offset. */
461 : loff_t
462 5645910 : xfile_seek_data(
463 : struct xfile *xf,
464 : loff_t pos)
465 : {
466 5645910 : loff_t ret;
467 :
468 5645910 : ret = vfs_llseek(xf->file, pos, SEEK_DATA);
469 5645949 : trace_xfile_seek_data(xf, pos, ret);
470 5645951 : return ret;
471 : }
472 :
473 : /* Query stat information for an xfile. */
474 : int
475 4228362556 : xfile_stat(
476 : struct xfile *xf,
477 : struct xfile_stat *statbuf)
478 : {
479 4228362556 : struct kstat ks;
480 4228362556 : int error;
481 :
482 4228362556 : error = vfs_getattr_nosec(&xf->file->f_path, &ks,
483 : STATX_SIZE | STATX_BLOCKS, AT_STATX_DONT_SYNC);
484 4228156471 : if (error)
485 : return error;
486 :
487 4228156471 : statbuf->size = ks.size;
488 4228156471 : statbuf->bytes = ks.blocks << SECTOR_SHIFT;
489 4228156471 : return 0;
490 : }
491 :
492 : /*
493 : * Grab the (locked) page for a memory object. The object cannot span a page
494 : * boundary. Returns 0 (and a locked page) if successful, -ENOTBLK if we
495 : * cannot grab the page, or the usual negative errno.
496 : */
497 : int
498 19207361509 : xfile_get_page(
499 : struct xfile *xf,
500 : loff_t pos,
501 : unsigned int len,
502 : struct xfile_page *xfpage)
503 : {
504 19207361509 : struct inode *inode = file_inode(xf->file);
505 19207361509 : struct address_space *mapping = inode->i_mapping;
506 19207361509 : const struct address_space_operations *aops = mapping->a_ops;
507 19207361509 : struct page *page = NULL;
508 19207361509 : void *fsdata = NULL;
509 19207361509 : loff_t key = round_down(pos, PAGE_SIZE);
510 19207361509 : unsigned int pflags;
511 19207361509 : int error;
512 :
513 19207361509 : if (inode->i_sb->s_maxbytes - pos < len)
514 : return -ENOMEM;
515 19207361509 : if (len > PAGE_SIZE - offset_in_page(pos))
516 : return -ENOTBLK;
517 :
518 19207361509 : trace_xfile_get_page(xf, pos, len);
519 :
520 19207816147 : pflags = memalloc_nofs_save();
521 :
522 : /*
523 : * We call write_begin directly here to avoid all the freezer
524 : * protection lock-taking that happens in the normal path. shmem
525 : * doesn't support fs freeze, but lockdep doesn't know that and will
526 : * trip over that.
527 : */
528 19207816147 : error = aops->write_begin(NULL, mapping, key, PAGE_SIZE, &page,
529 : &fsdata);
530 19236546825 : if (error)
531 0 : goto out_pflags;
532 :
533 : /* We got the page, so make sure we push out EOF. */
534 19236546825 : if (i_size_read(inode) < pos + len)
535 55030206 : i_size_write(inode, pos + len);
536 :
537 : /*
538 : * If the page isn't up to date, fill it with zeroes before we hand it
539 : * to the caller and make sure the backing store will hold on to them.
540 : */
541 19236546825 : if (!PageUptodate(page)) {
542 59890059 : void *kaddr;
543 :
544 59890059 : kaddr = kmap_local_page(page);
545 59890059 : memset(kaddr, 0, PAGE_SIZE);
546 59890059 : kunmap_local(kaddr);
547 59890059 : SetPageUptodate(page);
548 : }
549 :
550 : /*
551 : * Mark each page dirty so that the contents are written to some
552 : * backing store when we drop this buffer, and take an extra reference
553 : * to prevent the xfile page from being swapped or removed from the
554 : * page cache by reclaim if the caller unlocks the page.
555 : */
556 19240186804 : set_page_dirty(page);
557 19239632370 : get_page(page);
558 :
559 19238912896 : xfpage->page = page;
560 19238912896 : xfpage->fsdata = fsdata;
561 19238912896 : xfpage->pos = key;
562 19238912896 : out_pflags:
563 19238912896 : memalloc_nofs_restore(pflags);
564 19238912896 : return error;
565 : }
566 :
567 : /*
568 : * Release the (locked) page for a memory object. Returns 0 or a negative
569 : * errno.
570 : */
571 : int
572 19239733784 : xfile_put_page(
573 : struct xfile *xf,
574 : struct xfile_page *xfpage)
575 : {
576 19239733784 : struct inode *inode = file_inode(xf->file);
577 19239733784 : struct address_space *mapping = inode->i_mapping;
578 19239733784 : const struct address_space_operations *aops = mapping->a_ops;
579 19239733784 : unsigned int pflags;
580 19239733784 : int ret;
581 :
582 19239733784 : trace_xfile_put_page(xf, xfpage->pos, xfpage->page);
583 :
584 : /* Give back the reference that we took in xfile_get_page. */
585 19240476520 : put_page(xfpage->page);
586 :
587 19240550285 : pflags = memalloc_nofs_save();
588 19240550285 : ret = aops->write_end(NULL, mapping, xfpage->pos, PAGE_SIZE, PAGE_SIZE,
589 : xfpage->page, xfpage->fsdata);
590 19244309839 : memalloc_nofs_restore(pflags);
591 19244309839 : memset(xfpage, 0, sizeof(struct xfile_page));
592 :
593 19244309839 : if (ret < 0)
594 : return ret;
595 19244309839 : if (ret != PAGE_SIZE)
596 0 : return -EIO;
597 : return 0;
598 : }
599 :
600 : /* Dump an xfile to dmesg. */
601 : int
602 0 : xfile_dump(
603 : struct xfile *xf)
604 : {
605 0 : struct xfile_stat sb;
606 0 : struct inode *inode = file_inode(xf->file);
607 0 : struct address_space *mapping = inode->i_mapping;
608 0 : loff_t holepos = 0;
609 0 : loff_t datapos;
610 0 : loff_t ret;
611 0 : unsigned int pflags;
612 0 : bool all_zeroes = true;
613 0 : int error = 0;
614 :
615 0 : error = xfile_stat(xf, &sb);
616 0 : if (error)
617 : return error;
618 :
619 0 : printk(KERN_ALERT "xfile ino 0x%lx isize 0x%llx dump:", inode->i_ino,
620 : sb.size);
621 0 : pflags = memalloc_nofs_save();
622 :
623 0 : while ((ret = vfs_llseek(xf->file, holepos, SEEK_DATA)) >= 0) {
624 0 : datapos = rounddown_64(ret, PAGE_SIZE);
625 0 : ret = vfs_llseek(xf->file, datapos, SEEK_HOLE);
626 0 : if (ret < 0)
627 : break;
628 0 : holepos = min_t(loff_t, sb.size, roundup_64(ret, PAGE_SIZE));
629 :
630 0 : while (datapos < holepos) {
631 0 : struct page *page = NULL;
632 0 : void *p, *kaddr;
633 0 : u64 datalen = holepos - datapos;
634 0 : unsigned int pagepos;
635 0 : unsigned int pagelen;
636 :
637 0 : cond_resched();
638 :
639 0 : if (fatal_signal_pending(current)) {
640 0 : error = -EINTR;
641 0 : goto out_pflags;
642 : }
643 :
644 0 : pagelen = min_t(u64, datalen, PAGE_SIZE);
645 :
646 0 : page = shmem_read_mapping_page_gfp(mapping,
647 0 : datapos >> PAGE_SHIFT, __GFP_NOWARN);
648 0 : if (IS_ERR(page)) {
649 0 : error = PTR_ERR(page);
650 0 : if (error == -EIO)
651 0 : printk(KERN_ALERT "%.8llx: poisoned",
652 : datapos);
653 0 : else if (error != -ENOMEM)
654 0 : goto out_pflags;
655 :
656 0 : goto next_pgoff;
657 : }
658 :
659 0 : if (!PageUptodate(page))
660 0 : goto next_page;
661 :
662 0 : kaddr = kmap_local_page(page);
663 0 : p = kaddr;
664 :
665 0 : for (pagepos = 0; pagepos < pagelen; pagepos += 16) {
666 0 : char prefix[16];
667 0 : unsigned int linelen;
668 :
669 0 : linelen = min_t(unsigned int, pagelen, 16);
670 :
671 0 : if (!memchr_inv(p + pagepos, 0, linelen))
672 0 : continue;
673 :
674 0 : snprintf(prefix, 16, "%.8llx: ",
675 : datapos + pagepos);
676 :
677 0 : all_zeroes = false;
678 0 : print_hex_dump(KERN_ALERT, prefix,
679 : DUMP_PREFIX_NONE, 16, 1,
680 : p + pagepos, linelen, true);
681 : }
682 0 : kunmap_local(kaddr);
683 0 : next_page:
684 0 : put_page(page);
685 0 : next_pgoff:
686 0 : datapos += PAGE_SIZE;
687 : }
688 : }
689 0 : if (all_zeroes)
690 0 : printk(KERN_ALERT "<all zeroes>");
691 0 : if (ret != -ENXIO)
692 0 : error = ret;
693 0 : out_pflags:
694 0 : memalloc_nofs_restore(pflags);
695 0 : return error;
696 : }
|