Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Copyright (C) 2011, 2012 STRATO. All rights reserved.
4 : */
5 :
6 : #include <linux/blkdev.h>
7 : #include <linux/ratelimit.h>
8 : #include <linux/sched/mm.h>
9 : #include <crypto/hash.h>
10 : #include "ctree.h"
11 : #include "discard.h"
12 : #include "volumes.h"
13 : #include "disk-io.h"
14 : #include "ordered-data.h"
15 : #include "transaction.h"
16 : #include "backref.h"
17 : #include "extent_io.h"
18 : #include "dev-replace.h"
19 : #include "check-integrity.h"
20 : #include "raid56.h"
21 : #include "block-group.h"
22 : #include "zoned.h"
23 : #include "fs.h"
24 : #include "accessors.h"
25 : #include "file-item.h"
26 : #include "scrub.h"
27 :
28 : /*
29 : * This is only the first step towards a full-features scrub. It reads all
30 : * extent and super block and verifies the checksums. In case a bad checksum
31 : * is found or the extent cannot be read, good data will be written back if
32 : * any can be found.
33 : *
34 : * Future enhancements:
35 : * - In case an unrepairable extent is encountered, track which files are
36 : * affected and report them
37 : * - track and record media errors, throw out bad devices
38 : * - add a mode to also read unallocated space
39 : */
40 :
41 : struct scrub_ctx;
42 :
43 : /*
44 : * The following value only influences the performance.
45 : *
46 : * This determines the batch size for stripe submitted in one go.
47 : */
48 : #define SCRUB_STRIPES_PER_SCTX 8 /* That would be 8 64K stripe per-device. */
49 :
50 : /*
51 : * The following value times PAGE_SIZE needs to be large enough to match the
52 : * largest node/leaf/sector size that shall be supported.
53 : */
54 : #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
55 :
56 : /* Represent one sector and its needed info to verify the content. */
57 : struct scrub_sector_verification {
58 : bool is_metadata;
59 :
60 : union {
61 : /*
62 : * Csum pointer for data csum verification. Should point to a
63 : * sector csum inside scrub_stripe::csums.
64 : *
65 : * NULL if this data sector has no csum.
66 : */
67 : u8 *csum;
68 :
69 : /*
70 : * Extra info for metadata verification. All sectors inside a
71 : * tree block share the same generation.
72 : */
73 : u64 generation;
74 : };
75 : };
76 :
77 : enum scrub_stripe_flags {
78 : /* Set when @mirror_num, @dev, @physical and @logical are set. */
79 : SCRUB_STRIPE_FLAG_INITIALIZED,
80 :
81 : /* Set when the read-repair is finished. */
82 : SCRUB_STRIPE_FLAG_REPAIR_DONE,
83 :
84 : /*
85 : * Set for data stripes if it's triggered from P/Q stripe.
86 : * During such scrub, we should not report errors in data stripes, nor
87 : * update the accounting.
88 : */
89 : SCRUB_STRIPE_FLAG_NO_REPORT,
90 : };
91 :
92 : #define SCRUB_STRIPE_PAGES (BTRFS_STRIPE_LEN / PAGE_SIZE)
93 :
94 : /*
95 : * Represent one contiguous range with a length of BTRFS_STRIPE_LEN.
96 : */
97 : struct scrub_stripe {
98 : struct scrub_ctx *sctx;
99 : struct btrfs_block_group *bg;
100 :
101 : struct page *pages[SCRUB_STRIPE_PAGES];
102 : struct scrub_sector_verification *sectors;
103 :
104 : struct btrfs_device *dev;
105 : u64 logical;
106 : u64 physical;
107 :
108 : u16 mirror_num;
109 :
110 : /* Should be BTRFS_STRIPE_LEN / sectorsize. */
111 : u16 nr_sectors;
112 :
113 : /*
114 : * How many data/meta extents are in this stripe. Only for scrub status
115 : * reporting purposes.
116 : */
117 : u16 nr_data_extents;
118 : u16 nr_meta_extents;
119 :
120 : atomic_t pending_io;
121 : wait_queue_head_t io_wait;
122 : wait_queue_head_t repair_wait;
123 :
124 : /*
125 : * Indicate the states of the stripe. Bits are defined in
126 : * scrub_stripe_flags enum.
127 : */
128 : unsigned long state;
129 :
130 : /* Indicate which sectors are covered by extent items. */
131 : unsigned long extent_sector_bitmap;
132 :
133 : /*
134 : * The errors hit during the initial read of the stripe.
135 : *
136 : * Would be utilized for error reporting and repair.
137 : *
138 : * The remaining init_nr_* records the number of errors hit, only used
139 : * by error reporting.
140 : */
141 : unsigned long init_error_bitmap;
142 : unsigned int init_nr_io_errors;
143 : unsigned int init_nr_csum_errors;
144 : unsigned int init_nr_meta_errors;
145 :
146 : /*
147 : * The following error bitmaps are all for the current status.
148 : * Every time we submit a new read, these bitmaps may be updated.
149 : *
150 : * error_bitmap = io_error_bitmap | csum_error_bitmap | meta_error_bitmap;
151 : *
152 : * IO and csum errors can happen for both metadata and data.
153 : */
154 : unsigned long error_bitmap;
155 : unsigned long io_error_bitmap;
156 : unsigned long csum_error_bitmap;
157 : unsigned long meta_error_bitmap;
158 :
159 : /* For writeback (repair or replace) error reporting. */
160 : unsigned long write_error_bitmap;
161 :
162 : /* Writeback can be concurrent, thus we need to protect the bitmap. */
163 : spinlock_t write_error_lock;
164 :
165 : /*
166 : * Checksum for the whole stripe if this stripe is inside a data block
167 : * group.
168 : */
169 : u8 *csums;
170 :
171 : struct work_struct work;
172 : };
173 :
174 : struct scrub_ctx {
175 : struct scrub_stripe stripes[SCRUB_STRIPES_PER_SCTX];
176 : struct scrub_stripe *raid56_data_stripes;
177 : struct btrfs_fs_info *fs_info;
178 : int first_free;
179 : int cur_stripe;
180 : atomic_t cancel_req;
181 : int readonly;
182 : int sectors_per_bio;
183 :
184 : /* State of IO submission throttling affecting the associated device */
185 : ktime_t throttle_deadline;
186 : u64 throttle_sent;
187 :
188 : int is_dev_replace;
189 : u64 write_pointer;
190 :
191 : struct mutex wr_lock;
192 : struct btrfs_device *wr_tgtdev;
193 :
194 : /*
195 : * statistics
196 : */
197 : struct btrfs_scrub_progress stat;
198 : spinlock_t stat_lock;
199 :
200 : /*
201 : * Use a ref counter to avoid use-after-free issues. Scrub workers
202 : * decrement bios_in_flight and workers_pending and then do a wakeup
203 : * on the list_wait wait queue. We must ensure the main scrub task
204 : * doesn't free the scrub context before or while the workers are
205 : * doing the wakeup() call.
206 : */
207 : refcount_t refs;
208 : };
209 :
210 : struct scrub_warning {
211 : struct btrfs_path *path;
212 : u64 extent_item_size;
213 : const char *errstr;
214 : u64 physical;
215 : u64 logical;
216 : struct btrfs_device *dev;
217 : };
218 :
219 0 : static void release_scrub_stripe(struct scrub_stripe *stripe)
220 : {
221 0 : if (!stripe)
222 : return;
223 :
224 0 : for (int i = 0; i < SCRUB_STRIPE_PAGES; i++) {
225 0 : if (stripe->pages[i])
226 0 : __free_page(stripe->pages[i]);
227 0 : stripe->pages[i] = NULL;
228 : }
229 0 : kfree(stripe->sectors);
230 0 : kfree(stripe->csums);
231 0 : stripe->sectors = NULL;
232 0 : stripe->csums = NULL;
233 0 : stripe->sctx = NULL;
234 0 : stripe->state = 0;
235 : }
236 :
237 0 : static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
238 : struct scrub_stripe *stripe)
239 : {
240 0 : int ret;
241 :
242 0 : memset(stripe, 0, sizeof(*stripe));
243 :
244 0 : stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
245 0 : stripe->state = 0;
246 :
247 0 : init_waitqueue_head(&stripe->io_wait);
248 0 : init_waitqueue_head(&stripe->repair_wait);
249 0 : atomic_set(&stripe->pending_io, 0);
250 0 : spin_lock_init(&stripe->write_error_lock);
251 :
252 0 : ret = btrfs_alloc_page_array(SCRUB_STRIPE_PAGES, stripe->pages);
253 0 : if (ret < 0)
254 0 : goto error;
255 :
256 0 : stripe->sectors = kcalloc(stripe->nr_sectors,
257 : sizeof(struct scrub_sector_verification),
258 : GFP_KERNEL);
259 0 : if (!stripe->sectors)
260 0 : goto error;
261 :
262 0 : stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
263 0 : fs_info->csum_size, GFP_KERNEL);
264 0 : if (!stripe->csums)
265 0 : goto error;
266 : return 0;
267 0 : error:
268 0 : release_scrub_stripe(stripe);
269 0 : return -ENOMEM;
270 : }
271 :
272 0 : static void wait_scrub_stripe_io(struct scrub_stripe *stripe)
273 : {
274 0 : wait_event(stripe->io_wait, atomic_read(&stripe->pending_io) == 0);
275 0 : }
276 :
277 : static void scrub_put_ctx(struct scrub_ctx *sctx);
278 :
279 0 : static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
280 : {
281 0 : while (atomic_read(&fs_info->scrub_pause_req)) {
282 0 : mutex_unlock(&fs_info->scrub_lock);
283 0 : wait_event(fs_info->scrub_pause_wait,
284 : atomic_read(&fs_info->scrub_pause_req) == 0);
285 0 : mutex_lock(&fs_info->scrub_lock);
286 : }
287 0 : }
288 :
289 0 : static void scrub_pause_on(struct btrfs_fs_info *fs_info)
290 : {
291 0 : atomic_inc(&fs_info->scrubs_paused);
292 0 : wake_up(&fs_info->scrub_pause_wait);
293 0 : }
294 :
295 0 : static void scrub_pause_off(struct btrfs_fs_info *fs_info)
296 : {
297 0 : mutex_lock(&fs_info->scrub_lock);
298 0 : __scrub_blocked_if_needed(fs_info);
299 0 : atomic_dec(&fs_info->scrubs_paused);
300 0 : mutex_unlock(&fs_info->scrub_lock);
301 :
302 0 : wake_up(&fs_info->scrub_pause_wait);
303 0 : }
304 :
305 : static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
306 : {
307 0 : scrub_pause_on(fs_info);
308 0 : scrub_pause_off(fs_info);
309 0 : }
310 :
311 0 : static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
312 : {
313 0 : int i;
314 :
315 0 : if (!sctx)
316 : return;
317 :
318 0 : for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++)
319 0 : release_scrub_stripe(&sctx->stripes[i]);
320 :
321 0 : kfree(sctx);
322 : }
323 :
324 0 : static void scrub_put_ctx(struct scrub_ctx *sctx)
325 : {
326 0 : if (refcount_dec_and_test(&sctx->refs))
327 0 : scrub_free_ctx(sctx);
328 0 : }
329 :
330 0 : static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
331 : struct btrfs_fs_info *fs_info, int is_dev_replace)
332 : {
333 0 : struct scrub_ctx *sctx;
334 0 : int i;
335 :
336 0 : sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
337 0 : if (!sctx)
338 0 : goto nomem;
339 0 : refcount_set(&sctx->refs, 1);
340 0 : sctx->is_dev_replace = is_dev_replace;
341 0 : sctx->fs_info = fs_info;
342 0 : for (i = 0; i < SCRUB_STRIPES_PER_SCTX; i++) {
343 0 : int ret;
344 :
345 0 : ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
346 0 : if (ret < 0)
347 0 : goto nomem;
348 0 : sctx->stripes[i].sctx = sctx;
349 : }
350 0 : sctx->first_free = 0;
351 0 : atomic_set(&sctx->cancel_req, 0);
352 :
353 0 : spin_lock_init(&sctx->stat_lock);
354 0 : sctx->throttle_deadline = 0;
355 :
356 0 : mutex_init(&sctx->wr_lock);
357 0 : if (is_dev_replace) {
358 0 : WARN_ON(!fs_info->dev_replace.tgtdev);
359 0 : sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
360 : }
361 :
362 : return sctx;
363 :
364 0 : nomem:
365 0 : scrub_free_ctx(sctx);
366 0 : return ERR_PTR(-ENOMEM);
367 : }
368 :
369 0 : static int scrub_print_warning_inode(u64 inum, u64 offset, u64 num_bytes,
370 : u64 root, void *warn_ctx)
371 : {
372 0 : u32 nlink;
373 0 : int ret;
374 0 : int i;
375 0 : unsigned nofs_flag;
376 0 : struct extent_buffer *eb;
377 0 : struct btrfs_inode_item *inode_item;
378 0 : struct scrub_warning *swarn = warn_ctx;
379 0 : struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
380 0 : struct inode_fs_paths *ipath = NULL;
381 0 : struct btrfs_root *local_root;
382 0 : struct btrfs_key key;
383 :
384 0 : local_root = btrfs_get_fs_root(fs_info, root, true);
385 0 : if (IS_ERR(local_root)) {
386 0 : ret = PTR_ERR(local_root);
387 0 : goto err;
388 : }
389 :
390 : /*
391 : * this makes the path point to (inum INODE_ITEM ioff)
392 : */
393 0 : key.objectid = inum;
394 0 : key.type = BTRFS_INODE_ITEM_KEY;
395 0 : key.offset = 0;
396 :
397 0 : ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
398 0 : if (ret) {
399 0 : btrfs_put_root(local_root);
400 0 : btrfs_release_path(swarn->path);
401 0 : goto err;
402 : }
403 :
404 0 : eb = swarn->path->nodes[0];
405 0 : inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
406 : struct btrfs_inode_item);
407 0 : nlink = btrfs_inode_nlink(eb, inode_item);
408 0 : btrfs_release_path(swarn->path);
409 :
410 : /*
411 : * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub
412 : * uses GFP_NOFS in this context, so we keep it consistent but it does
413 : * not seem to be strictly necessary.
414 : */
415 0 : nofs_flag = memalloc_nofs_save();
416 0 : ipath = init_ipath(4096, local_root, swarn->path);
417 0 : memalloc_nofs_restore(nofs_flag);
418 0 : if (IS_ERR(ipath)) {
419 0 : btrfs_put_root(local_root);
420 0 : ret = PTR_ERR(ipath);
421 0 : ipath = NULL;
422 0 : goto err;
423 : }
424 0 : ret = paths_from_inode(inum, ipath);
425 :
426 0 : if (ret < 0)
427 0 : goto err;
428 :
429 : /*
430 : * we deliberately ignore the bit ipath might have been too small to
431 : * hold all of the paths here
432 : */
433 0 : for (i = 0; i < ipath->fspath->elem_cnt; ++i)
434 0 : btrfs_warn_in_rcu(fs_info,
435 : "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu, length %u, links %u (path: %s)",
436 : swarn->errstr, swarn->logical,
437 : btrfs_dev_name(swarn->dev),
438 : swarn->physical,
439 : root, inum, offset,
440 : fs_info->sectorsize, nlink,
441 : (char *)(unsigned long)ipath->fspath->val[i]);
442 :
443 0 : btrfs_put_root(local_root);
444 0 : free_ipath(ipath);
445 0 : return 0;
446 :
447 0 : err:
448 0 : btrfs_warn_in_rcu(fs_info,
449 : "%s at logical %llu on dev %s, physical %llu, root %llu, inode %llu, offset %llu: path resolving failed with ret=%d",
450 : swarn->errstr, swarn->logical,
451 : btrfs_dev_name(swarn->dev),
452 : swarn->physical,
453 : root, inum, offset, ret);
454 :
455 0 : free_ipath(ipath);
456 0 : return 0;
457 : }
458 :
459 0 : static void scrub_print_common_warning(const char *errstr, struct btrfs_device *dev,
460 : bool is_super, u64 logical, u64 physical)
461 : {
462 0 : struct btrfs_fs_info *fs_info = dev->fs_info;
463 0 : struct btrfs_path *path;
464 0 : struct btrfs_key found_key;
465 0 : struct extent_buffer *eb;
466 0 : struct btrfs_extent_item *ei;
467 0 : struct scrub_warning swarn;
468 0 : u64 flags = 0;
469 0 : u32 item_size;
470 0 : int ret;
471 :
472 : /* Super block error, no need to search extent tree. */
473 0 : if (is_super) {
474 0 : btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
475 : errstr, btrfs_dev_name(dev), physical);
476 0 : return;
477 : }
478 0 : path = btrfs_alloc_path();
479 0 : if (!path)
480 : return;
481 :
482 0 : swarn.physical = physical;
483 0 : swarn.logical = logical;
484 0 : swarn.errstr = errstr;
485 0 : swarn.dev = NULL;
486 :
487 0 : ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
488 : &flags);
489 0 : if (ret < 0)
490 0 : goto out;
491 :
492 0 : swarn.extent_item_size = found_key.offset;
493 :
494 0 : eb = path->nodes[0];
495 0 : ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
496 0 : item_size = btrfs_item_size(eb, path->slots[0]);
497 :
498 0 : if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
499 0 : unsigned long ptr = 0;
500 0 : u8 ref_level;
501 0 : u64 ref_root;
502 :
503 0 : while (true) {
504 0 : ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
505 : item_size, &ref_root,
506 : &ref_level);
507 0 : if (ret < 0) {
508 0 : btrfs_warn(fs_info,
509 : "failed to resolve tree backref for logical %llu: %d",
510 : swarn.logical, ret);
511 0 : break;
512 : }
513 0 : if (ret > 0)
514 : break;
515 0 : btrfs_warn_in_rcu(fs_info,
516 : "%s at logical %llu on dev %s, physical %llu: metadata %s (level %d) in tree %llu",
517 : errstr, swarn.logical, btrfs_dev_name(dev),
518 : swarn.physical, (ref_level ? "node" : "leaf"),
519 : ref_level, ref_root);
520 : }
521 0 : btrfs_release_path(path);
522 : } else {
523 0 : struct btrfs_backref_walk_ctx ctx = { 0 };
524 :
525 0 : btrfs_release_path(path);
526 :
527 0 : ctx.bytenr = found_key.objectid;
528 0 : ctx.extent_item_pos = swarn.logical - found_key.objectid;
529 0 : ctx.fs_info = fs_info;
530 :
531 0 : swarn.path = path;
532 0 : swarn.dev = dev;
533 :
534 0 : iterate_extent_inodes(&ctx, true, scrub_print_warning_inode, &swarn);
535 : }
536 :
537 0 : out:
538 0 : btrfs_free_path(path);
539 : }
540 :
541 0 : static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
542 : {
543 0 : int ret = 0;
544 0 : u64 length;
545 :
546 0 : if (!btrfs_is_zoned(sctx->fs_info))
547 : return 0;
548 :
549 0 : if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
550 : return 0;
551 :
552 0 : if (sctx->write_pointer < physical) {
553 0 : length = physical - sctx->write_pointer;
554 :
555 0 : ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
556 : sctx->write_pointer, length);
557 0 : if (!ret)
558 0 : sctx->write_pointer = physical;
559 : }
560 : return ret;
561 : }
562 :
563 : static struct page *scrub_stripe_get_page(struct scrub_stripe *stripe, int sector_nr)
564 : {
565 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
566 0 : int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
567 :
568 0 : return stripe->pages[page_index];
569 : }
570 :
571 : static unsigned int scrub_stripe_get_page_offset(struct scrub_stripe *stripe,
572 : int sector_nr)
573 : {
574 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
575 :
576 0 : return offset_in_page(sector_nr << fs_info->sectorsize_bits);
577 : }
578 :
579 0 : static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr)
580 : {
581 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
582 0 : const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
583 0 : const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
584 0 : const struct page *first_page = scrub_stripe_get_page(stripe, sector_nr);
585 0 : const unsigned int first_off = scrub_stripe_get_page_offset(stripe, sector_nr);
586 0 : SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
587 0 : u8 on_disk_csum[BTRFS_CSUM_SIZE];
588 0 : u8 calculated_csum[BTRFS_CSUM_SIZE];
589 0 : struct btrfs_header *header;
590 :
591 : /*
592 : * Here we don't have a good way to attach the pages (and subpages)
593 : * to a dummy extent buffer, thus we have to directly grab the members
594 : * from pages.
595 : */
596 0 : header = (struct btrfs_header *)(page_address(first_page) + first_off);
597 0 : memcpy(on_disk_csum, header->csum, fs_info->csum_size);
598 :
599 0 : if (logical != btrfs_stack_header_bytenr(header)) {
600 0 : bitmap_set(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
601 0 : bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
602 0 : btrfs_warn_rl(fs_info,
603 : "tree block %llu mirror %u has bad bytenr, has %llu want %llu",
604 : logical, stripe->mirror_num,
605 : btrfs_stack_header_bytenr(header), logical);
606 0 : return;
607 : }
608 0 : if (memcmp(header->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE) != 0) {
609 0 : bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
610 0 : bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
611 0 : btrfs_warn_rl(fs_info,
612 : "tree block %llu mirror %u has bad fsid, has %pU want %pU",
613 : logical, stripe->mirror_num,
614 : header->fsid, fs_info->fs_devices->fsid);
615 0 : return;
616 : }
617 0 : if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
618 : BTRFS_UUID_SIZE) != 0) {
619 0 : bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
620 0 : bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
621 0 : btrfs_warn_rl(fs_info,
622 : "tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
623 : logical, stripe->mirror_num,
624 : header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
625 0 : return;
626 : }
627 :
628 : /* Now check tree block csum. */
629 0 : shash->tfm = fs_info->csum_shash;
630 0 : crypto_shash_init(shash);
631 0 : crypto_shash_update(shash, page_address(first_page) + first_off +
632 0 : BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
633 :
634 0 : for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
635 0 : struct page *page = scrub_stripe_get_page(stripe, i);
636 0 : unsigned int page_off = scrub_stripe_get_page_offset(stripe, i);
637 :
638 0 : crypto_shash_update(shash, page_address(page) + page_off,
639 : fs_info->sectorsize);
640 : }
641 :
642 0 : crypto_shash_final(shash, calculated_csum);
643 0 : if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
644 0 : bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
645 0 : bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
646 0 : btrfs_warn_rl(fs_info,
647 : "tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
648 : logical, stripe->mirror_num,
649 : CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
650 : CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
651 0 : return;
652 : }
653 0 : if (stripe->sectors[sector_nr].generation !=
654 : btrfs_stack_header_generation(header)) {
655 0 : bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
656 0 : bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
657 0 : btrfs_warn_rl(fs_info,
658 : "tree block %llu mirror %u has bad generation, has %llu want %llu",
659 : logical, stripe->mirror_num,
660 : btrfs_stack_header_generation(header),
661 : stripe->sectors[sector_nr].generation);
662 0 : return;
663 : }
664 0 : bitmap_clear(&stripe->error_bitmap, sector_nr, sectors_per_tree);
665 0 : bitmap_clear(&stripe->csum_error_bitmap, sector_nr, sectors_per_tree);
666 0 : bitmap_clear(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
667 : }
668 :
669 0 : static void scrub_verify_one_sector(struct scrub_stripe *stripe, int sector_nr)
670 : {
671 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
672 0 : struct scrub_sector_verification *sector = &stripe->sectors[sector_nr];
673 0 : const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
674 0 : struct page *page = scrub_stripe_get_page(stripe, sector_nr);
675 0 : unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
676 0 : u8 csum_buf[BTRFS_CSUM_SIZE];
677 0 : int ret;
678 :
679 0 : ASSERT(sector_nr >= 0 && sector_nr < stripe->nr_sectors);
680 :
681 : /* Sector not utilized, skip it. */
682 0 : if (!test_bit(sector_nr, &stripe->extent_sector_bitmap))
683 0 : return;
684 :
685 : /* IO error, no need to check. */
686 0 : if (test_bit(sector_nr, &stripe->io_error_bitmap))
687 : return;
688 :
689 : /* Metadata, verify the full tree block. */
690 0 : if (sector->is_metadata) {
691 : /*
692 : * Check if the tree block crosses the stripe boudary. If
693 : * crossed the boundary, we cannot verify it but only give a
694 : * warning.
695 : *
696 : * This can only happen on a very old filesystem where chunks
697 : * are not ensured to be stripe aligned.
698 : */
699 0 : if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
700 0 : btrfs_warn_rl(fs_info,
701 : "tree block at %llu crosses stripe boundary %llu",
702 : stripe->logical +
703 : (sector_nr << fs_info->sectorsize_bits),
704 : stripe->logical);
705 0 : return;
706 : }
707 0 : scrub_verify_one_metadata(stripe, sector_nr);
708 0 : return;
709 : }
710 :
711 : /*
712 : * Data is easier, we just verify the data csum (if we have it). For
713 : * cases without csum, we have no other choice but to trust it.
714 : */
715 0 : if (!sector->csum) {
716 0 : clear_bit(sector_nr, &stripe->error_bitmap);
717 0 : return;
718 : }
719 :
720 0 : ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
721 0 : if (ret < 0) {
722 0 : set_bit(sector_nr, &stripe->csum_error_bitmap);
723 0 : set_bit(sector_nr, &stripe->error_bitmap);
724 : } else {
725 0 : clear_bit(sector_nr, &stripe->csum_error_bitmap);
726 0 : clear_bit(sector_nr, &stripe->error_bitmap);
727 : }
728 : }
729 :
730 : /* Verify specified sectors of a stripe. */
731 0 : static void scrub_verify_one_stripe(struct scrub_stripe *stripe, unsigned long bitmap)
732 : {
733 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
734 0 : const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
735 0 : int sector_nr;
736 :
737 0 : for_each_set_bit(sector_nr, &bitmap, stripe->nr_sectors) {
738 0 : scrub_verify_one_sector(stripe, sector_nr);
739 0 : if (stripe->sectors[sector_nr].is_metadata)
740 0 : sector_nr += sectors_per_tree - 1;
741 : }
742 0 : }
743 :
744 0 : static int calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
745 : {
746 0 : int i;
747 :
748 0 : for (i = 0; i < stripe->nr_sectors; i++) {
749 0 : if (scrub_stripe_get_page(stripe, i) == first_bvec->bv_page &&
750 0 : scrub_stripe_get_page_offset(stripe, i) == first_bvec->bv_offset)
751 : break;
752 : }
753 0 : ASSERT(i < stripe->nr_sectors);
754 0 : return i;
755 : }
756 :
757 : /*
758 : * Repair read is different to the regular read:
759 : *
760 : * - Only reads the failed sectors
761 : * - May have extra blocksize limits
762 : */
763 0 : static void scrub_repair_read_endio(struct btrfs_bio *bbio)
764 : {
765 0 : struct scrub_stripe *stripe = bbio->private;
766 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
767 0 : struct bio_vec *bvec;
768 0 : int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
769 0 : u32 bio_size = 0;
770 0 : int i;
771 :
772 0 : ASSERT(sector_nr < stripe->nr_sectors);
773 :
774 0 : bio_for_each_bvec_all(bvec, &bbio->bio, i)
775 0 : bio_size += bvec->bv_len;
776 :
777 0 : if (bbio->bio.bi_status) {
778 0 : bitmap_set(&stripe->io_error_bitmap, sector_nr,
779 0 : bio_size >> fs_info->sectorsize_bits);
780 0 : bitmap_set(&stripe->error_bitmap, sector_nr,
781 0 : bio_size >> fs_info->sectorsize_bits);
782 : } else {
783 0 : bitmap_clear(&stripe->io_error_bitmap, sector_nr,
784 0 : bio_size >> fs_info->sectorsize_bits);
785 : }
786 0 : bio_put(&bbio->bio);
787 0 : if (atomic_dec_and_test(&stripe->pending_io))
788 0 : wake_up(&stripe->io_wait);
789 0 : }
790 :
791 : static int calc_next_mirror(int mirror, int num_copies)
792 : {
793 0 : ASSERT(mirror <= num_copies);
794 0 : return (mirror + 1 > num_copies) ? 1 : mirror + 1;
795 : }
796 :
797 0 : static void scrub_stripe_submit_repair_read(struct scrub_stripe *stripe,
798 : int mirror, int blocksize, bool wait)
799 : {
800 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
801 0 : struct btrfs_bio *bbio = NULL;
802 0 : const unsigned long old_error_bitmap = stripe->error_bitmap;
803 0 : int i;
804 :
805 0 : ASSERT(stripe->mirror_num >= 1);
806 0 : ASSERT(atomic_read(&stripe->pending_io) == 0);
807 :
808 0 : for_each_set_bit(i, &old_error_bitmap, stripe->nr_sectors) {
809 0 : struct page *page;
810 0 : int pgoff;
811 0 : int ret;
812 :
813 0 : page = scrub_stripe_get_page(stripe, i);
814 0 : pgoff = scrub_stripe_get_page_offset(stripe, i);
815 :
816 : /* The current sector cannot be merged, submit the bio. */
817 0 : if (bbio && ((i > 0 && !test_bit(i - 1, &stripe->error_bitmap)) ||
818 0 : bbio->bio.bi_iter.bi_size >= blocksize)) {
819 0 : ASSERT(bbio->bio.bi_iter.bi_size);
820 0 : atomic_inc(&stripe->pending_io);
821 0 : btrfs_submit_bio(bbio, mirror);
822 0 : if (wait)
823 0 : wait_scrub_stripe_io(stripe);
824 : bbio = NULL;
825 : }
826 :
827 0 : if (!bbio) {
828 0 : bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_READ,
829 : fs_info, scrub_repair_read_endio, stripe);
830 0 : bbio->bio.bi_iter.bi_sector = (stripe->logical +
831 0 : (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
832 : }
833 :
834 0 : ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
835 0 : ASSERT(ret == fs_info->sectorsize);
836 : }
837 0 : if (bbio) {
838 0 : ASSERT(bbio->bio.bi_iter.bi_size);
839 0 : atomic_inc(&stripe->pending_io);
840 0 : btrfs_submit_bio(bbio, mirror);
841 0 : if (wait)
842 0 : wait_scrub_stripe_io(stripe);
843 : }
844 0 : }
845 :
846 0 : static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
847 : struct scrub_stripe *stripe)
848 : {
849 0 : static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
850 : DEFAULT_RATELIMIT_BURST);
851 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
852 0 : struct btrfs_device *dev = NULL;
853 0 : u64 physical = 0;
854 0 : int nr_data_sectors = 0;
855 0 : int nr_meta_sectors = 0;
856 0 : int nr_nodatacsum_sectors = 0;
857 0 : int nr_repaired_sectors = 0;
858 0 : int sector_nr;
859 :
860 0 : if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state))
861 : return;
862 :
863 : /*
864 : * Init needed infos for error reporting.
865 : *
866 : * Although our scrub_stripe infrastucture is mostly based on btrfs_submit_bio()
867 : * thus no need for dev/physical, error reporting still needs dev and physical.
868 : */
869 0 : if (!bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors)) {
870 0 : u64 mapped_len = fs_info->sectorsize;
871 0 : struct btrfs_io_context *bioc = NULL;
872 0 : int stripe_index = stripe->mirror_num - 1;
873 0 : int ret;
874 :
875 : /* For scrub, our mirror_num should always start at 1. */
876 0 : ASSERT(stripe->mirror_num >= 1);
877 0 : ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
878 : stripe->logical, &mapped_len, &bioc,
879 : NULL, NULL, 1);
880 : /*
881 : * If we failed, dev will be NULL, and later detailed reports
882 : * will just be skipped.
883 : */
884 0 : if (ret < 0)
885 0 : goto skip;
886 0 : physical = bioc->stripes[stripe_index].physical;
887 0 : dev = bioc->stripes[stripe_index].dev;
888 0 : btrfs_put_bioc(bioc);
889 : }
890 :
891 0 : skip:
892 0 : for_each_set_bit(sector_nr, &stripe->extent_sector_bitmap, stripe->nr_sectors) {
893 0 : bool repaired = false;
894 :
895 0 : if (stripe->sectors[sector_nr].is_metadata) {
896 0 : nr_meta_sectors++;
897 : } else {
898 0 : nr_data_sectors++;
899 0 : if (!stripe->sectors[sector_nr].csum)
900 0 : nr_nodatacsum_sectors++;
901 : }
902 :
903 0 : if (test_bit(sector_nr, &stripe->init_error_bitmap) &&
904 0 : !test_bit(sector_nr, &stripe->error_bitmap)) {
905 0 : nr_repaired_sectors++;
906 0 : repaired = true;
907 : }
908 :
909 : /* Good sector from the beginning, nothing need to be done. */
910 0 : if (!test_bit(sector_nr, &stripe->init_error_bitmap))
911 0 : continue;
912 :
913 : /*
914 : * Report error for the corrupted sectors. If repaired, just
915 : * output the message of repaired message.
916 : */
917 0 : if (repaired) {
918 0 : if (dev) {
919 0 : btrfs_err_rl_in_rcu(fs_info,
920 : "fixed up error at logical %llu on dev %s physical %llu",
921 : stripe->logical, btrfs_dev_name(dev),
922 : physical);
923 : } else {
924 0 : btrfs_err_rl_in_rcu(fs_info,
925 : "fixed up error at logical %llu on mirror %u",
926 : stripe->logical, stripe->mirror_num);
927 : }
928 0 : continue;
929 : }
930 :
931 : /* The remaining are all for unrepaired. */
932 0 : if (dev) {
933 0 : btrfs_err_rl_in_rcu(fs_info,
934 : "unable to fixup (regular) error at logical %llu on dev %s physical %llu",
935 : stripe->logical, btrfs_dev_name(dev),
936 : physical);
937 : } else {
938 0 : btrfs_err_rl_in_rcu(fs_info,
939 : "unable to fixup (regular) error at logical %llu on mirror %u",
940 : stripe->logical, stripe->mirror_num);
941 : }
942 :
943 0 : if (test_bit(sector_nr, &stripe->io_error_bitmap))
944 0 : if (__ratelimit(&rs) && dev)
945 0 : scrub_print_common_warning("i/o error", dev, false,
946 : stripe->logical, physical);
947 0 : if (test_bit(sector_nr, &stripe->csum_error_bitmap))
948 0 : if (__ratelimit(&rs) && dev)
949 0 : scrub_print_common_warning("checksum error", dev, false,
950 : stripe->logical, physical);
951 0 : if (test_bit(sector_nr, &stripe->meta_error_bitmap))
952 0 : if (__ratelimit(&rs) && dev)
953 0 : scrub_print_common_warning("header error", dev, false,
954 : stripe->logical, physical);
955 : }
956 :
957 0 : spin_lock(&sctx->stat_lock);
958 0 : sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
959 0 : sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
960 0 : sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
961 0 : sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
962 0 : sctx->stat.no_csum += nr_nodatacsum_sectors;
963 0 : sctx->stat.read_errors += stripe->init_nr_io_errors;
964 0 : sctx->stat.csum_errors += stripe->init_nr_csum_errors;
965 0 : sctx->stat.verify_errors += stripe->init_nr_meta_errors;
966 0 : sctx->stat.uncorrectable_errors +=
967 0 : bitmap_weight(&stripe->error_bitmap, stripe->nr_sectors);
968 0 : sctx->stat.corrected_errors += nr_repaired_sectors;
969 0 : spin_unlock(&sctx->stat_lock);
970 : }
971 :
972 : /*
973 : * The main entrance for all read related scrub work, including:
974 : *
975 : * - Wait for the initial read to finish
976 : * - Verify and locate any bad sectors
977 : * - Go through the remaining mirrors and try to read as large blocksize as
978 : * possible
979 : * - Go through all mirrors (including the failed mirror) sector-by-sector
980 : *
981 : * Writeback does not happen here, it needs extra synchronization.
982 : */
983 0 : static void scrub_stripe_read_repair_worker(struct work_struct *work)
984 : {
985 0 : struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work);
986 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
987 0 : int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
988 : stripe->bg->length);
989 0 : int mirror;
990 0 : int i;
991 :
992 0 : ASSERT(stripe->mirror_num > 0);
993 :
994 0 : wait_scrub_stripe_io(stripe);
995 0 : scrub_verify_one_stripe(stripe, stripe->extent_sector_bitmap);
996 : /* Save the initial failed bitmap for later repair and report usage. */
997 0 : stripe->init_error_bitmap = stripe->error_bitmap;
998 0 : stripe->init_nr_io_errors = bitmap_weight(&stripe->io_error_bitmap,
999 0 : stripe->nr_sectors);
1000 0 : stripe->init_nr_csum_errors = bitmap_weight(&stripe->csum_error_bitmap,
1001 0 : stripe->nr_sectors);
1002 0 : stripe->init_nr_meta_errors = bitmap_weight(&stripe->meta_error_bitmap,
1003 0 : stripe->nr_sectors);
1004 :
1005 0 : if (bitmap_empty(&stripe->init_error_bitmap, stripe->nr_sectors))
1006 0 : goto out;
1007 :
1008 : /*
1009 : * Try all remaining mirrors.
1010 : *
1011 : * Here we still try to read as large block as possible, as this is
1012 : * faster and we have extra safety nets to rely on.
1013 : */
1014 0 : for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
1015 0 : mirror != stripe->mirror_num;
1016 0 : mirror = calc_next_mirror(mirror, num_copies)) {
1017 0 : const unsigned long old_error_bitmap = stripe->error_bitmap;
1018 :
1019 0 : scrub_stripe_submit_repair_read(stripe, mirror,
1020 : BTRFS_STRIPE_LEN, false);
1021 0 : wait_scrub_stripe_io(stripe);
1022 0 : scrub_verify_one_stripe(stripe, old_error_bitmap);
1023 0 : if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1024 0 : goto out;
1025 : }
1026 :
1027 : /*
1028 : * Last safety net, try re-checking all mirrors, including the failed
1029 : * one, sector-by-sector.
1030 : *
1031 : * As if one sector failed the drive's internal csum, the whole read
1032 : * containing the offending sector would be marked as error.
1033 : * Thus here we do sector-by-sector read.
1034 : *
1035 : * This can be slow, thus we only try it as the last resort.
1036 : */
1037 :
1038 : for (i = 0, mirror = stripe->mirror_num;
1039 0 : i < num_copies;
1040 0 : i++, mirror = calc_next_mirror(mirror, num_copies)) {
1041 0 : const unsigned long old_error_bitmap = stripe->error_bitmap;
1042 :
1043 0 : scrub_stripe_submit_repair_read(stripe, mirror,
1044 0 : fs_info->sectorsize, true);
1045 0 : wait_scrub_stripe_io(stripe);
1046 0 : scrub_verify_one_stripe(stripe, old_error_bitmap);
1047 0 : if (bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
1048 0 : goto out;
1049 : }
1050 0 : out:
1051 0 : scrub_stripe_report_errors(stripe->sctx, stripe);
1052 0 : set_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state);
1053 0 : wake_up(&stripe->repair_wait);
1054 0 : }
1055 :
1056 0 : static void scrub_read_endio(struct btrfs_bio *bbio)
1057 : {
1058 0 : struct scrub_stripe *stripe = bbio->private;
1059 :
1060 0 : if (bbio->bio.bi_status) {
1061 0 : bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1062 0 : bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
1063 : } else {
1064 0 : bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
1065 : }
1066 0 : bio_put(&bbio->bio);
1067 0 : if (atomic_dec_and_test(&stripe->pending_io)) {
1068 0 : wake_up(&stripe->io_wait);
1069 0 : INIT_WORK(&stripe->work, scrub_stripe_read_repair_worker);
1070 0 : queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1071 : }
1072 0 : }
1073 :
1074 0 : static void scrub_write_endio(struct btrfs_bio *bbio)
1075 : {
1076 0 : struct scrub_stripe *stripe = bbio->private;
1077 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1078 0 : struct bio_vec *bvec;
1079 0 : int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
1080 0 : u32 bio_size = 0;
1081 0 : int i;
1082 :
1083 0 : bio_for_each_bvec_all(bvec, &bbio->bio, i)
1084 0 : bio_size += bvec->bv_len;
1085 :
1086 0 : if (bbio->bio.bi_status) {
1087 0 : unsigned long flags;
1088 :
1089 0 : spin_lock_irqsave(&stripe->write_error_lock, flags);
1090 0 : bitmap_set(&stripe->write_error_bitmap, sector_nr,
1091 0 : bio_size >> fs_info->sectorsize_bits);
1092 0 : spin_unlock_irqrestore(&stripe->write_error_lock, flags);
1093 : }
1094 0 : bio_put(&bbio->bio);
1095 :
1096 0 : if (atomic_dec_and_test(&stripe->pending_io))
1097 0 : wake_up(&stripe->io_wait);
1098 0 : }
1099 :
1100 0 : static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1101 : struct scrub_stripe *stripe,
1102 : struct btrfs_bio *bbio, bool dev_replace)
1103 : {
1104 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
1105 0 : u32 bio_len = bbio->bio.bi_iter.bi_size;
1106 0 : u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
1107 0 : stripe->logical;
1108 :
1109 0 : fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1110 0 : atomic_inc(&stripe->pending_io);
1111 0 : btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
1112 0 : if (!btrfs_is_zoned(fs_info))
1113 : return;
1114 : /*
1115 : * For zoned writeback, queue depth must be 1, thus we must wait for
1116 : * the write to finish before the next write.
1117 : */
1118 0 : wait_scrub_stripe_io(stripe);
1119 :
1120 : /*
1121 : * And also need to update the write pointer if write finished
1122 : * successfully.
1123 : */
1124 0 : if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1125 : &stripe->write_error_bitmap))
1126 0 : sctx->write_pointer += bio_len;
1127 : }
1128 :
1129 : /*
1130 : * Submit the write bio(s) for the sectors specified by @write_bitmap.
1131 : *
1132 : * Here we utilize btrfs_submit_repair_write(), which has some extra benefits:
1133 : *
1134 : * - Only needs logical bytenr and mirror_num
1135 : * Just like the scrub read path
1136 : *
1137 : * - Would only result in writes to the specified mirror
1138 : * Unlike the regular writeback path, which would write back to all stripes
1139 : *
1140 : * - Handle dev-replace and read-repair writeback differently
1141 : */
1142 0 : static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1143 : unsigned long write_bitmap, bool dev_replace)
1144 : {
1145 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1146 0 : struct btrfs_bio *bbio = NULL;
1147 0 : int sector_nr;
1148 :
1149 0 : for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
1150 0 : struct page *page = scrub_stripe_get_page(stripe, sector_nr);
1151 0 : unsigned int pgoff = scrub_stripe_get_page_offset(stripe, sector_nr);
1152 0 : int ret;
1153 :
1154 : /* We should only writeback sectors covered by an extent. */
1155 0 : ASSERT(test_bit(sector_nr, &stripe->extent_sector_bitmap));
1156 :
1157 : /* Cannot merge with previous sector, submit the current one. */
1158 0 : if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
1159 0 : scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1160 0 : bbio = NULL;
1161 : }
1162 0 : if (!bbio) {
1163 0 : bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
1164 : fs_info, scrub_write_endio, stripe);
1165 0 : bbio->bio.bi_iter.bi_sector = (stripe->logical +
1166 0 : (sector_nr << fs_info->sectorsize_bits)) >>
1167 : SECTOR_SHIFT;
1168 : }
1169 0 : ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1170 0 : ASSERT(ret == fs_info->sectorsize);
1171 : }
1172 0 : if (bbio)
1173 0 : scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1174 0 : }
1175 :
1176 : /*
1177 : * Throttling of IO submission, bandwidth-limit based, the timeslice is 1
1178 : * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
1179 : */
1180 0 : static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1181 : unsigned int bio_size)
1182 : {
1183 0 : const int time_slice = 1000;
1184 0 : s64 delta;
1185 0 : ktime_t now;
1186 0 : u32 div;
1187 0 : u64 bwlimit;
1188 :
1189 0 : bwlimit = READ_ONCE(device->scrub_speed_max);
1190 0 : if (bwlimit == 0)
1191 : return;
1192 :
1193 : /*
1194 : * Slice is divided into intervals when the IO is submitted, adjust by
1195 : * bwlimit and maximum of 64 intervals.
1196 : */
1197 0 : div = max_t(u32, 1, (u32)(bwlimit / (16 * 1024 * 1024)));
1198 0 : div = min_t(u32, 64, div);
1199 :
1200 : /* Start new epoch, set deadline */
1201 0 : now = ktime_get();
1202 0 : if (sctx->throttle_deadline == 0) {
1203 0 : sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1204 0 : sctx->throttle_sent = 0;
1205 : }
1206 :
1207 : /* Still in the time to send? */
1208 0 : if (ktime_before(now, sctx->throttle_deadline)) {
1209 : /* If current bio is within the limit, send it */
1210 0 : sctx->throttle_sent += bio_size;
1211 0 : if (sctx->throttle_sent <= div_u64(bwlimit, div))
1212 : return;
1213 :
1214 : /* We're over the limit, sleep until the rest of the slice */
1215 0 : delta = ktime_ms_delta(sctx->throttle_deadline, now);
1216 : } else {
1217 : /* New request after deadline, start new epoch */
1218 : delta = 0;
1219 : }
1220 :
1221 0 : if (delta) {
1222 0 : long timeout;
1223 :
1224 0 : timeout = div_u64(delta * HZ, 1000);
1225 0 : schedule_timeout_interruptible(timeout);
1226 : }
1227 :
1228 : /* Next call will start the deadline period */
1229 0 : sctx->throttle_deadline = 0;
1230 : }
1231 :
1232 : /*
1233 : * Given a physical address, this will calculate it's
1234 : * logical offset. if this is a parity stripe, it will return
1235 : * the most left data stripe's logical offset.
1236 : *
1237 : * return 0 if it is a data stripe, 1 means parity stripe.
1238 : */
1239 0 : static int get_raid56_logic_offset(u64 physical, int num,
1240 : struct map_lookup *map, u64 *offset,
1241 : u64 *stripe_start)
1242 : {
1243 0 : int i;
1244 0 : int j = 0;
1245 0 : u64 last_offset;
1246 0 : const int data_stripes = nr_data_stripes(map);
1247 :
1248 0 : last_offset = (physical - map->stripes[num].physical) * data_stripes;
1249 0 : if (stripe_start)
1250 0 : *stripe_start = last_offset;
1251 :
1252 0 : *offset = last_offset;
1253 0 : for (i = 0; i < data_stripes; i++) {
1254 0 : u32 stripe_nr;
1255 0 : u32 stripe_index;
1256 0 : u32 rot;
1257 :
1258 0 : *offset = last_offset + btrfs_stripe_nr_to_offset(i);
1259 :
1260 0 : stripe_nr = (u32)(*offset >> BTRFS_STRIPE_LEN_SHIFT) / data_stripes;
1261 :
1262 : /* Work out the disk rotation on this stripe-set */
1263 0 : rot = stripe_nr % map->num_stripes;
1264 0 : stripe_nr /= map->num_stripes;
1265 : /* calculate which stripe this data locates */
1266 0 : rot += i;
1267 0 : stripe_index = rot % map->num_stripes;
1268 0 : if (stripe_index == num)
1269 : return 0;
1270 0 : if (stripe_index < num)
1271 0 : j++;
1272 : }
1273 0 : *offset = last_offset + btrfs_stripe_nr_to_offset(j);
1274 0 : return 1;
1275 : }
1276 :
1277 : /*
1278 : * Return 0 if the extent item range covers any byte of the range.
1279 : * Return <0 if the extent item is before @search_start.
1280 : * Return >0 if the extent item is after @start_start + @search_len.
1281 : */
1282 0 : static int compare_extent_item_range(struct btrfs_path *path,
1283 : u64 search_start, u64 search_len)
1284 : {
1285 0 : struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1286 0 : u64 len;
1287 0 : struct btrfs_key key;
1288 :
1289 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1290 0 : ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
1291 : key.type == BTRFS_METADATA_ITEM_KEY);
1292 0 : if (key.type == BTRFS_METADATA_ITEM_KEY)
1293 0 : len = fs_info->nodesize;
1294 : else
1295 0 : len = key.offset;
1296 :
1297 0 : if (key.objectid + len <= search_start)
1298 : return -1;
1299 0 : if (key.objectid >= search_start + search_len)
1300 0 : return 1;
1301 : return 0;
1302 : }
1303 :
1304 : /*
1305 : * Locate one extent item which covers any byte in range
1306 : * [@search_start, @search_start + @search_length)
1307 : *
1308 : * If the path is not initialized, we will initialize the search by doing
1309 : * a btrfs_search_slot().
1310 : * If the path is already initialized, we will use the path as the initial
1311 : * slot, to avoid duplicated btrfs_search_slot() calls.
1312 : *
1313 : * NOTE: If an extent item starts before @search_start, we will still
1314 : * return the extent item. This is for data extent crossing stripe boundary.
1315 : *
1316 : * Return 0 if we found such extent item, and @path will point to the extent item.
1317 : * Return >0 if no such extent item can be found, and @path will be released.
1318 : * Return <0 if hit fatal error, and @path will be released.
1319 : */
1320 0 : static int find_first_extent_item(struct btrfs_root *extent_root,
1321 : struct btrfs_path *path,
1322 : u64 search_start, u64 search_len)
1323 : {
1324 0 : struct btrfs_fs_info *fs_info = extent_root->fs_info;
1325 0 : struct btrfs_key key;
1326 0 : int ret;
1327 :
1328 : /* Continue using the existing path */
1329 0 : if (path->nodes[0])
1330 0 : goto search_forward;
1331 :
1332 0 : if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1333 0 : key.type = BTRFS_METADATA_ITEM_KEY;
1334 : else
1335 0 : key.type = BTRFS_EXTENT_ITEM_KEY;
1336 0 : key.objectid = search_start;
1337 0 : key.offset = (u64)-1;
1338 :
1339 0 : ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1340 0 : if (ret < 0)
1341 : return ret;
1342 :
1343 0 : ASSERT(ret > 0);
1344 : /*
1345 : * Here we intentionally pass 0 as @min_objectid, as there could be
1346 : * an extent item starting before @search_start.
1347 : */
1348 0 : ret = btrfs_previous_extent_item(extent_root, path, 0);
1349 0 : if (ret < 0)
1350 : return ret;
1351 : /*
1352 : * No matter whether we have found an extent item, the next loop will
1353 : * properly do every check on the key.
1354 : */
1355 0 : search_forward:
1356 0 : while (true) {
1357 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1358 0 : if (key.objectid >= search_start + search_len)
1359 : break;
1360 0 : if (key.type != BTRFS_METADATA_ITEM_KEY &&
1361 : key.type != BTRFS_EXTENT_ITEM_KEY)
1362 0 : goto next;
1363 :
1364 0 : ret = compare_extent_item_range(path, search_start, search_len);
1365 0 : if (ret == 0)
1366 : return ret;
1367 0 : if (ret > 0)
1368 : break;
1369 0 : next:
1370 0 : path->slots[0]++;
1371 0 : if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
1372 0 : ret = btrfs_next_leaf(extent_root, path);
1373 0 : if (ret) {
1374 : /* Either no more item or fatal error */
1375 0 : btrfs_release_path(path);
1376 0 : return ret;
1377 : }
1378 : }
1379 : }
1380 0 : btrfs_release_path(path);
1381 0 : return 1;
1382 : }
1383 :
1384 0 : static void get_extent_info(struct btrfs_path *path, u64 *extent_start_ret,
1385 : u64 *size_ret, u64 *flags_ret, u64 *generation_ret)
1386 : {
1387 0 : struct btrfs_key key;
1388 0 : struct btrfs_extent_item *ei;
1389 :
1390 0 : btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1391 0 : ASSERT(key.type == BTRFS_METADATA_ITEM_KEY ||
1392 : key.type == BTRFS_EXTENT_ITEM_KEY);
1393 0 : *extent_start_ret = key.objectid;
1394 0 : if (key.type == BTRFS_METADATA_ITEM_KEY)
1395 0 : *size_ret = path->nodes[0]->fs_info->nodesize;
1396 : else
1397 0 : *size_ret = key.offset;
1398 0 : ei = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_extent_item);
1399 0 : *flags_ret = btrfs_extent_flags(path->nodes[0], ei);
1400 0 : *generation_ret = btrfs_extent_generation(path->nodes[0], ei);
1401 0 : }
1402 :
1403 0 : static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1404 : u64 physical, u64 physical_end)
1405 : {
1406 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
1407 0 : int ret = 0;
1408 :
1409 0 : if (!btrfs_is_zoned(fs_info))
1410 : return 0;
1411 :
1412 0 : mutex_lock(&sctx->wr_lock);
1413 0 : if (sctx->write_pointer < physical_end) {
1414 0 : ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1415 : physical,
1416 : sctx->write_pointer);
1417 0 : if (ret)
1418 0 : btrfs_err(fs_info,
1419 : "zoned: failed to recover write pointer");
1420 : }
1421 0 : mutex_unlock(&sctx->wr_lock);
1422 0 : btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1423 :
1424 0 : return ret;
1425 : }
1426 :
1427 0 : static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1428 : struct scrub_stripe *stripe,
1429 : u64 extent_start, u64 extent_len,
1430 : u64 extent_flags, u64 extent_gen)
1431 : {
1432 0 : for (u64 cur_logical = max(stripe->logical, extent_start);
1433 0 : cur_logical < min(stripe->logical + BTRFS_STRIPE_LEN,
1434 : extent_start + extent_len);
1435 0 : cur_logical += fs_info->sectorsize) {
1436 0 : const int nr_sector = (cur_logical - stripe->logical) >>
1437 0 : fs_info->sectorsize_bits;
1438 0 : struct scrub_sector_verification *sector =
1439 0 : &stripe->sectors[nr_sector];
1440 :
1441 0 : set_bit(nr_sector, &stripe->extent_sector_bitmap);
1442 0 : if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1443 0 : sector->is_metadata = true;
1444 0 : sector->generation = extent_gen;
1445 : }
1446 : }
1447 0 : }
1448 :
1449 : static void scrub_stripe_reset_bitmaps(struct scrub_stripe *stripe)
1450 : {
1451 0 : stripe->extent_sector_bitmap = 0;
1452 0 : stripe->init_error_bitmap = 0;
1453 0 : stripe->init_nr_io_errors = 0;
1454 0 : stripe->init_nr_csum_errors = 0;
1455 0 : stripe->init_nr_meta_errors = 0;
1456 0 : stripe->error_bitmap = 0;
1457 0 : stripe->io_error_bitmap = 0;
1458 0 : stripe->csum_error_bitmap = 0;
1459 0 : stripe->meta_error_bitmap = 0;
1460 : }
1461 :
1462 : /*
1463 : * Locate one stripe which has at least one extent in its range.
1464 : *
1465 : * Return 0 if found such stripe, and store its info into @stripe.
1466 : * Return >0 if there is no such stripe in the specified range.
1467 : * Return <0 for error.
1468 : */
1469 0 : static int scrub_find_fill_first_stripe(struct btrfs_block_group *bg,
1470 : struct btrfs_device *dev, u64 physical,
1471 : int mirror_num, u64 logical_start,
1472 : u32 logical_len,
1473 : struct scrub_stripe *stripe)
1474 : {
1475 0 : struct btrfs_fs_info *fs_info = bg->fs_info;
1476 0 : struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1477 0 : struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1478 0 : const u64 logical_end = logical_start + logical_len;
1479 0 : struct btrfs_path path = { 0 };
1480 0 : u64 cur_logical = logical_start;
1481 0 : u64 stripe_end;
1482 0 : u64 extent_start;
1483 0 : u64 extent_len;
1484 0 : u64 extent_flags;
1485 0 : u64 extent_gen;
1486 0 : int ret;
1487 :
1488 0 : memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
1489 : stripe->nr_sectors);
1490 0 : scrub_stripe_reset_bitmaps(stripe);
1491 :
1492 : /* The range must be inside the bg. */
1493 0 : ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1494 :
1495 0 : path.search_commit_root = 1;
1496 0 : path.skip_locking = 1;
1497 :
1498 0 : ret = find_first_extent_item(extent_root, &path, logical_start, logical_len);
1499 : /* Either error or not found. */
1500 0 : if (ret)
1501 0 : goto out;
1502 0 : get_extent_info(&path, &extent_start, &extent_len, &extent_flags, &extent_gen);
1503 0 : if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1504 0 : stripe->nr_meta_extents++;
1505 0 : if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1506 0 : stripe->nr_data_extents++;
1507 0 : cur_logical = max(extent_start, cur_logical);
1508 :
1509 : /*
1510 : * Round down to stripe boundary.
1511 : *
1512 : * The extra calculation against bg->start is to handle block groups
1513 : * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
1514 : */
1515 0 : stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
1516 : bg->start;
1517 0 : stripe->physical = physical + stripe->logical - logical_start;
1518 0 : stripe->dev = dev;
1519 0 : stripe->bg = bg;
1520 0 : stripe->mirror_num = mirror_num;
1521 0 : stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
1522 :
1523 : /* Fill the first extent info into stripe->sectors[] array. */
1524 0 : fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1525 : extent_flags, extent_gen);
1526 0 : cur_logical = extent_start + extent_len;
1527 :
1528 : /* Fill the extent info for the remaining sectors. */
1529 0 : while (cur_logical <= stripe_end) {
1530 0 : ret = find_first_extent_item(extent_root, &path, cur_logical,
1531 0 : stripe_end - cur_logical + 1);
1532 0 : if (ret < 0)
1533 0 : goto out;
1534 0 : if (ret > 0) {
1535 : ret = 0;
1536 : break;
1537 : }
1538 0 : get_extent_info(&path, &extent_start, &extent_len,
1539 : &extent_flags, &extent_gen);
1540 0 : if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1541 0 : stripe->nr_meta_extents++;
1542 0 : if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
1543 0 : stripe->nr_data_extents++;
1544 0 : fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1545 : extent_flags, extent_gen);
1546 0 : cur_logical = extent_start + extent_len;
1547 : }
1548 :
1549 : /* Now fill the data csum. */
1550 0 : if (bg->flags & BTRFS_BLOCK_GROUP_DATA) {
1551 0 : int sector_nr;
1552 0 : unsigned long csum_bitmap = 0;
1553 :
1554 : /* Csum space should have already been allocated. */
1555 0 : ASSERT(stripe->csums);
1556 :
1557 : /*
1558 : * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN
1559 : * should contain at most 16 sectors.
1560 : */
1561 0 : ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1562 :
1563 0 : ret = btrfs_lookup_csums_bitmap(csum_root, stripe->logical,
1564 : stripe_end, stripe->csums,
1565 : &csum_bitmap, true);
1566 0 : if (ret < 0)
1567 0 : goto out;
1568 : if (ret > 0)
1569 : ret = 0;
1570 :
1571 0 : for_each_set_bit(sector_nr, &csum_bitmap, stripe->nr_sectors) {
1572 0 : stripe->sectors[sector_nr].csum = stripe->csums +
1573 0 : sector_nr * fs_info->csum_size;
1574 : }
1575 : }
1576 0 : set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1577 0 : out:
1578 0 : btrfs_release_path(&path);
1579 0 : return ret;
1580 : }
1581 :
1582 0 : static void scrub_reset_stripe(struct scrub_stripe *stripe)
1583 : {
1584 0 : scrub_stripe_reset_bitmaps(stripe);
1585 :
1586 0 : stripe->nr_meta_extents = 0;
1587 0 : stripe->nr_data_extents = 0;
1588 0 : stripe->state = 0;
1589 :
1590 0 : for (int i = 0; i < stripe->nr_sectors; i++) {
1591 0 : stripe->sectors[i].is_metadata = false;
1592 0 : stripe->sectors[i].csum = NULL;
1593 0 : stripe->sectors[i].generation = 0;
1594 : }
1595 0 : }
1596 :
1597 0 : static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1598 : struct scrub_stripe *stripe)
1599 : {
1600 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
1601 0 : struct btrfs_bio *bbio;
1602 0 : int mirror = stripe->mirror_num;
1603 :
1604 0 : ASSERT(stripe->bg);
1605 0 : ASSERT(stripe->mirror_num > 0);
1606 0 : ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
1607 :
1608 0 : bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1609 : scrub_read_endio, stripe);
1610 :
1611 : /* Read the whole stripe. */
1612 0 : bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
1613 0 : for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
1614 0 : int ret;
1615 :
1616 0 : ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
1617 : /* We should have allocated enough bio vectors. */
1618 0 : ASSERT(ret == PAGE_SIZE);
1619 : }
1620 0 : atomic_inc(&stripe->pending_io);
1621 :
1622 : /*
1623 : * For dev-replace, either user asks to avoid the source dev, or
1624 : * the device is missing, we try the next mirror instead.
1625 : */
1626 0 : if (sctx->is_dev_replace &&
1627 0 : (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1628 0 : BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
1629 0 : !stripe->dev->bdev)) {
1630 0 : int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1631 : stripe->bg->length);
1632 :
1633 0 : mirror = calc_next_mirror(mirror, num_copies);
1634 : }
1635 0 : btrfs_submit_bio(bbio, mirror);
1636 0 : }
1637 :
1638 0 : static bool stripe_has_metadata_error(struct scrub_stripe *stripe)
1639 : {
1640 0 : int i;
1641 :
1642 0 : for_each_set_bit(i, &stripe->error_bitmap, stripe->nr_sectors) {
1643 0 : if (stripe->sectors[i].is_metadata) {
1644 0 : struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1645 :
1646 0 : btrfs_err(fs_info,
1647 : "stripe %llu has unrepaired metadata sector at %llu",
1648 : stripe->logical,
1649 : stripe->logical + (i << fs_info->sectorsize_bits));
1650 0 : return true;
1651 : }
1652 : }
1653 : return false;
1654 : }
1655 :
1656 0 : static int flush_scrub_stripes(struct scrub_ctx *sctx)
1657 : {
1658 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
1659 0 : struct scrub_stripe *stripe;
1660 0 : const int nr_stripes = sctx->cur_stripe;
1661 0 : int ret = 0;
1662 :
1663 0 : if (!nr_stripes)
1664 : return 0;
1665 :
1666 0 : ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1667 :
1668 0 : scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1669 0 : btrfs_stripe_nr_to_offset(nr_stripes));
1670 0 : for (int i = 0; i < nr_stripes; i++) {
1671 0 : stripe = &sctx->stripes[i];
1672 0 : scrub_submit_initial_read(sctx, stripe);
1673 : }
1674 :
1675 0 : for (int i = 0; i < nr_stripes; i++) {
1676 0 : stripe = &sctx->stripes[i];
1677 :
1678 0 : wait_event(stripe->repair_wait,
1679 : test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1680 : }
1681 :
1682 : /*
1683 : * Submit the repaired sectors. For zoned case, we cannot do repair
1684 : * in-place, but queue the bg to be relocated.
1685 : */
1686 0 : if (btrfs_is_zoned(fs_info)) {
1687 0 : for (int i = 0; i < nr_stripes; i++) {
1688 0 : stripe = &sctx->stripes[i];
1689 :
1690 0 : if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors)) {
1691 0 : btrfs_repair_one_zone(fs_info,
1692 0 : sctx->stripes[0].bg->start);
1693 0 : break;
1694 : }
1695 : }
1696 0 : } else if (!sctx->readonly) {
1697 0 : for (int i = 0; i < nr_stripes; i++) {
1698 0 : unsigned long repaired;
1699 :
1700 0 : stripe = &sctx->stripes[i];
1701 :
1702 0 : bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1703 0 : &stripe->error_bitmap, stripe->nr_sectors);
1704 0 : scrub_write_sectors(sctx, stripe, repaired, false);
1705 : }
1706 : }
1707 :
1708 : /* Submit for dev-replace. */
1709 0 : if (sctx->is_dev_replace) {
1710 : /*
1711 : * For dev-replace, if we know there is something wrong with
1712 : * metadata, we should immedately abort.
1713 : */
1714 0 : for (int i = 0; i < nr_stripes; i++) {
1715 0 : if (stripe_has_metadata_error(&sctx->stripes[i])) {
1716 0 : ret = -EIO;
1717 0 : goto out;
1718 : }
1719 : }
1720 0 : for (int i = 0; i < nr_stripes; i++) {
1721 0 : unsigned long good;
1722 :
1723 0 : stripe = &sctx->stripes[i];
1724 :
1725 0 : ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1726 :
1727 0 : bitmap_andnot(&good, &stripe->extent_sector_bitmap,
1728 0 : &stripe->error_bitmap, stripe->nr_sectors);
1729 0 : scrub_write_sectors(sctx, stripe, good, true);
1730 : }
1731 : }
1732 :
1733 : /* Wait for the above writebacks to finish. */
1734 0 : for (int i = 0; i < nr_stripes; i++) {
1735 0 : stripe = &sctx->stripes[i];
1736 :
1737 0 : wait_scrub_stripe_io(stripe);
1738 0 : scrub_reset_stripe(stripe);
1739 : }
1740 0 : out:
1741 0 : sctx->cur_stripe = 0;
1742 0 : return ret;
1743 : }
1744 :
1745 0 : static void raid56_scrub_wait_endio(struct bio *bio)
1746 : {
1747 0 : complete(bio->bi_private);
1748 0 : }
1749 :
1750 0 : static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1751 : struct btrfs_device *dev, int mirror_num,
1752 : u64 logical, u32 length, u64 physical)
1753 : {
1754 0 : struct scrub_stripe *stripe;
1755 0 : int ret;
1756 :
1757 : /* No available slot, submit all stripes and wait for them. */
1758 0 : if (sctx->cur_stripe >= SCRUB_STRIPES_PER_SCTX) {
1759 0 : ret = flush_scrub_stripes(sctx);
1760 0 : if (ret < 0)
1761 : return ret;
1762 : }
1763 :
1764 0 : stripe = &sctx->stripes[sctx->cur_stripe];
1765 :
1766 : /* We can queue one stripe using the remaining slot. */
1767 0 : scrub_reset_stripe(stripe);
1768 0 : ret = scrub_find_fill_first_stripe(bg, dev, physical, mirror_num,
1769 : logical, length, stripe);
1770 : /* Either >0 as no more extents or <0 for error. */
1771 0 : if (ret)
1772 : return ret;
1773 0 : sctx->cur_stripe++;
1774 0 : return 0;
1775 : }
1776 :
1777 0 : static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1778 : struct btrfs_device *scrub_dev,
1779 : struct btrfs_block_group *bg,
1780 : struct map_lookup *map,
1781 : u64 full_stripe_start)
1782 : {
1783 0 : DECLARE_COMPLETION_ONSTACK(io_done);
1784 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
1785 0 : struct btrfs_raid_bio *rbio;
1786 0 : struct btrfs_io_context *bioc = NULL;
1787 0 : struct bio *bio;
1788 0 : struct scrub_stripe *stripe;
1789 0 : bool all_empty = true;
1790 0 : const int data_stripes = nr_data_stripes(map);
1791 0 : unsigned long extent_bitmap = 0;
1792 0 : u64 length = btrfs_stripe_nr_to_offset(data_stripes);
1793 0 : int ret;
1794 :
1795 0 : ASSERT(sctx->raid56_data_stripes);
1796 :
1797 0 : for (int i = 0; i < data_stripes; i++) {
1798 0 : int stripe_index;
1799 0 : int rot;
1800 0 : u64 physical;
1801 :
1802 0 : stripe = &sctx->raid56_data_stripes[i];
1803 0 : rot = div_u64(full_stripe_start - bg->start,
1804 0 : data_stripes) >> BTRFS_STRIPE_LEN_SHIFT;
1805 0 : stripe_index = (i + rot) % map->num_stripes;
1806 0 : physical = map->stripes[stripe_index].physical +
1807 : btrfs_stripe_nr_to_offset(rot);
1808 :
1809 0 : scrub_reset_stripe(stripe);
1810 0 : set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
1811 0 : ret = scrub_find_fill_first_stripe(bg,
1812 : map->stripes[stripe_index].dev, physical, 1,
1813 : full_stripe_start + btrfs_stripe_nr_to_offset(i),
1814 : BTRFS_STRIPE_LEN, stripe);
1815 0 : if (ret < 0)
1816 0 : goto out;
1817 : /*
1818 : * No extent in this data stripe, need to manually mark them
1819 : * initialized to make later read submission happy.
1820 : */
1821 0 : if (ret > 0) {
1822 0 : stripe->logical = full_stripe_start +
1823 : btrfs_stripe_nr_to_offset(i);
1824 0 : stripe->dev = map->stripes[stripe_index].dev;
1825 0 : stripe->mirror_num = 1;
1826 0 : set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
1827 : }
1828 : }
1829 :
1830 : /* Check if all data stripes are empty. */
1831 0 : for (int i = 0; i < data_stripes; i++) {
1832 0 : stripe = &sctx->raid56_data_stripes[i];
1833 0 : if (!bitmap_empty(&stripe->extent_sector_bitmap, stripe->nr_sectors)) {
1834 : all_empty = false;
1835 : break;
1836 : }
1837 : }
1838 0 : if (all_empty) {
1839 0 : ret = 0;
1840 0 : goto out;
1841 : }
1842 :
1843 0 : for (int i = 0; i < data_stripes; i++) {
1844 0 : stripe = &sctx->raid56_data_stripes[i];
1845 0 : scrub_submit_initial_read(sctx, stripe);
1846 : }
1847 0 : for (int i = 0; i < data_stripes; i++) {
1848 0 : stripe = &sctx->raid56_data_stripes[i];
1849 :
1850 0 : wait_event(stripe->repair_wait,
1851 : test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
1852 : }
1853 : /* For now, no zoned support for RAID56. */
1854 : ASSERT(!btrfs_is_zoned(sctx->fs_info));
1855 :
1856 : /* Writeback for the repaired sectors. */
1857 0 : for (int i = 0; i < data_stripes; i++) {
1858 0 : unsigned long repaired;
1859 :
1860 0 : stripe = &sctx->raid56_data_stripes[i];
1861 :
1862 0 : bitmap_andnot(&repaired, &stripe->init_error_bitmap,
1863 0 : &stripe->error_bitmap, stripe->nr_sectors);
1864 0 : scrub_write_sectors(sctx, stripe, repaired, false);
1865 : }
1866 :
1867 : /* Wait for the above writebacks to finish. */
1868 0 : for (int i = 0; i < data_stripes; i++) {
1869 0 : stripe = &sctx->raid56_data_stripes[i];
1870 :
1871 0 : wait_scrub_stripe_io(stripe);
1872 : }
1873 :
1874 : /*
1875 : * Now all data stripes are properly verified. Check if we have any
1876 : * unrepaired, if so abort immediately or we could further corrupt the
1877 : * P/Q stripes.
1878 : *
1879 : * During the loop, also populate extent_bitmap.
1880 : */
1881 0 : for (int i = 0; i < data_stripes; i++) {
1882 0 : unsigned long error;
1883 :
1884 0 : stripe = &sctx->raid56_data_stripes[i];
1885 :
1886 : /*
1887 : * We should only check the errors where there is an extent.
1888 : * As we may hit an empty data stripe while it's missing.
1889 : */
1890 0 : bitmap_and(&error, &stripe->error_bitmap,
1891 0 : &stripe->extent_sector_bitmap, stripe->nr_sectors);
1892 0 : if (!bitmap_empty(&error, stripe->nr_sectors)) {
1893 0 : btrfs_err(fs_info,
1894 : "unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
1895 : full_stripe_start, i, stripe->nr_sectors,
1896 : &error);
1897 0 : ret = -EIO;
1898 0 : goto out;
1899 : }
1900 0 : bitmap_or(&extent_bitmap, &extent_bitmap,
1901 0 : &stripe->extent_sector_bitmap, stripe->nr_sectors);
1902 : }
1903 :
1904 : /* Now we can check and regenerate the P/Q stripe. */
1905 0 : bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
1906 0 : bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
1907 0 : bio->bi_private = &io_done;
1908 0 : bio->bi_end_io = raid56_scrub_wait_endio;
1909 :
1910 0 : btrfs_bio_counter_inc_blocked(fs_info);
1911 0 : ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
1912 : &length, &bioc, NULL, NULL, 1);
1913 0 : if (ret < 0) {
1914 0 : btrfs_put_bioc(bioc);
1915 0 : btrfs_bio_counter_dec(fs_info);
1916 0 : goto out;
1917 : }
1918 0 : rbio = raid56_parity_alloc_scrub_rbio(bio, bioc, scrub_dev, &extent_bitmap,
1919 0 : BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1920 0 : btrfs_put_bioc(bioc);
1921 0 : if (!rbio) {
1922 0 : ret = -ENOMEM;
1923 0 : btrfs_bio_counter_dec(fs_info);
1924 0 : goto out;
1925 : }
1926 : /* Use the recovered stripes as cache to avoid read them from disk again. */
1927 0 : for (int i = 0; i < data_stripes; i++) {
1928 0 : stripe = &sctx->raid56_data_stripes[i];
1929 :
1930 0 : raid56_parity_cache_data_pages(rbio, stripe->pages,
1931 0 : full_stripe_start + (i << BTRFS_STRIPE_LEN_SHIFT));
1932 : }
1933 0 : raid56_parity_submit_scrub_rbio(rbio);
1934 0 : wait_for_completion_io(&io_done);
1935 0 : ret = blk_status_to_errno(bio->bi_status);
1936 0 : bio_put(bio);
1937 0 : btrfs_bio_counter_dec(fs_info);
1938 :
1939 0 : out:
1940 0 : return ret;
1941 : }
1942 :
1943 : /*
1944 : * Scrub one range which can only has simple mirror based profile.
1945 : * (Including all range in SINGLE/DUP/RAID1/RAID1C*, and each stripe in
1946 : * RAID0/RAID10).
1947 : *
1948 : * Since we may need to handle a subset of block group, we need @logical_start
1949 : * and @logical_length parameter.
1950 : */
1951 0 : static int scrub_simple_mirror(struct scrub_ctx *sctx,
1952 : struct btrfs_block_group *bg,
1953 : struct map_lookup *map,
1954 : u64 logical_start, u64 logical_length,
1955 : struct btrfs_device *device,
1956 : u64 physical, int mirror_num)
1957 : {
1958 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
1959 0 : const u64 logical_end = logical_start + logical_length;
1960 : /* An artificial limit, inherit from old scrub behavior */
1961 0 : struct btrfs_path path = { 0 };
1962 0 : u64 cur_logical = logical_start;
1963 0 : int ret;
1964 :
1965 : /* The range must be inside the bg */
1966 0 : ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
1967 :
1968 0 : path.search_commit_root = 1;
1969 0 : path.skip_locking = 1;
1970 : /* Go through each extent items inside the logical range */
1971 0 : while (cur_logical < logical_end) {
1972 0 : u64 cur_physical = physical + cur_logical - logical_start;
1973 :
1974 : /* Canceled? */
1975 0 : if (atomic_read(&fs_info->scrub_cancel_req) ||
1976 : atomic_read(&sctx->cancel_req)) {
1977 : ret = -ECANCELED;
1978 : break;
1979 : }
1980 : /* Paused? */
1981 0 : if (atomic_read(&fs_info->scrub_pause_req)) {
1982 : /* Push queued extents */
1983 0 : scrub_blocked_if_needed(fs_info);
1984 : }
1985 : /* Block group removed? */
1986 0 : spin_lock(&bg->lock);
1987 0 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags)) {
1988 0 : spin_unlock(&bg->lock);
1989 0 : ret = 0;
1990 0 : break;
1991 : }
1992 0 : spin_unlock(&bg->lock);
1993 :
1994 0 : ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
1995 : cur_logical, logical_end - cur_logical,
1996 : cur_physical);
1997 0 : if (ret > 0) {
1998 : /* No more extent, just update the accounting */
1999 0 : sctx->stat.last_physical = physical + logical_length;
2000 0 : ret = 0;
2001 0 : break;
2002 : }
2003 0 : if (ret < 0)
2004 : break;
2005 :
2006 0 : ASSERT(sctx->cur_stripe > 0);
2007 0 : cur_logical = sctx->stripes[sctx->cur_stripe - 1].logical
2008 : + BTRFS_STRIPE_LEN;
2009 :
2010 : /* Don't hold CPU for too long time */
2011 0 : cond_resched();
2012 : }
2013 0 : btrfs_release_path(&path);
2014 0 : return ret;
2015 : }
2016 :
2017 : /* Calculate the full stripe length for simple stripe based profiles */
2018 : static u64 simple_stripe_full_stripe_len(const struct map_lookup *map)
2019 : {
2020 0 : ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2021 : BTRFS_BLOCK_GROUP_RAID10));
2022 :
2023 0 : return btrfs_stripe_nr_to_offset(map->num_stripes / map->sub_stripes);
2024 : }
2025 :
2026 : /* Get the logical bytenr for the stripe */
2027 : static u64 simple_stripe_get_logical(struct map_lookup *map,
2028 : struct btrfs_block_group *bg,
2029 : int stripe_index)
2030 : {
2031 0 : ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2032 : BTRFS_BLOCK_GROUP_RAID10));
2033 0 : ASSERT(stripe_index < map->num_stripes);
2034 :
2035 : /*
2036 : * (stripe_index / sub_stripes) gives how many data stripes we need to
2037 : * skip.
2038 : */
2039 0 : return btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes) +
2040 0 : bg->start;
2041 : }
2042 :
2043 : /* Get the mirror number for the stripe */
2044 : static int simple_stripe_mirror_num(struct map_lookup *map, int stripe_index)
2045 : {
2046 0 : ASSERT(map->type & (BTRFS_BLOCK_GROUP_RAID0 |
2047 : BTRFS_BLOCK_GROUP_RAID10));
2048 0 : ASSERT(stripe_index < map->num_stripes);
2049 :
2050 : /* For RAID0, it's fixed to 1, for RAID10 it's 0,1,0,1... */
2051 0 : return stripe_index % map->sub_stripes + 1;
2052 : }
2053 :
2054 0 : static int scrub_simple_stripe(struct scrub_ctx *sctx,
2055 : struct btrfs_block_group *bg,
2056 : struct map_lookup *map,
2057 : struct btrfs_device *device,
2058 : int stripe_index)
2059 : {
2060 0 : const u64 logical_increment = simple_stripe_full_stripe_len(map);
2061 0 : const u64 orig_logical = simple_stripe_get_logical(map, bg, stripe_index);
2062 0 : const u64 orig_physical = map->stripes[stripe_index].physical;
2063 0 : const int mirror_num = simple_stripe_mirror_num(map, stripe_index);
2064 0 : u64 cur_logical = orig_logical;
2065 0 : u64 cur_physical = orig_physical;
2066 0 : int ret = 0;
2067 :
2068 0 : while (cur_logical < bg->start + bg->length) {
2069 : /*
2070 : * Inside each stripe, RAID0 is just SINGLE, and RAID10 is
2071 : * just RAID1, so we can reuse scrub_simple_mirror() to scrub
2072 : * this stripe.
2073 : */
2074 0 : ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2075 : BTRFS_STRIPE_LEN, device, cur_physical,
2076 : mirror_num);
2077 0 : if (ret)
2078 0 : return ret;
2079 : /* Skip to next stripe which belongs to the target device */
2080 0 : cur_logical += logical_increment;
2081 : /* For physical offset, we just go to next stripe */
2082 0 : cur_physical += BTRFS_STRIPE_LEN;
2083 : }
2084 : return ret;
2085 : }
2086 :
2087 0 : static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2088 : struct btrfs_block_group *bg,
2089 : struct extent_map *em,
2090 : struct btrfs_device *scrub_dev,
2091 : int stripe_index)
2092 : {
2093 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
2094 0 : struct map_lookup *map = em->map_lookup;
2095 0 : const u64 profile = map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK;
2096 0 : const u64 chunk_logical = bg->start;
2097 0 : int ret;
2098 0 : int ret2;
2099 0 : u64 physical = map->stripes[stripe_index].physical;
2100 0 : const u64 dev_stripe_len = btrfs_calc_stripe_length(em);
2101 0 : const u64 physical_end = physical + dev_stripe_len;
2102 0 : u64 logical;
2103 0 : u64 logic_end;
2104 : /* The logical increment after finishing one stripe */
2105 0 : u64 increment;
2106 : /* Offset inside the chunk */
2107 0 : u64 offset;
2108 0 : u64 stripe_logical;
2109 0 : int stop_loop = 0;
2110 :
2111 0 : scrub_blocked_if_needed(fs_info);
2112 :
2113 0 : if (sctx->is_dev_replace &&
2114 0 : btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2115 0 : mutex_lock(&sctx->wr_lock);
2116 0 : sctx->write_pointer = physical;
2117 0 : mutex_unlock(&sctx->wr_lock);
2118 : }
2119 :
2120 : /* Prepare the extra data stripes used by RAID56. */
2121 0 : if (profile & BTRFS_BLOCK_GROUP_RAID56_MASK) {
2122 0 : ASSERT(sctx->raid56_data_stripes == NULL);
2123 :
2124 0 : sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2125 : sizeof(struct scrub_stripe),
2126 : GFP_KERNEL);
2127 0 : if (!sctx->raid56_data_stripes) {
2128 0 : ret = -ENOMEM;
2129 0 : goto out;
2130 : }
2131 0 : for (int i = 0; i < nr_data_stripes(map); i++) {
2132 0 : ret = init_scrub_stripe(fs_info,
2133 0 : &sctx->raid56_data_stripes[i]);
2134 0 : if (ret < 0)
2135 0 : goto out;
2136 0 : sctx->raid56_data_stripes[i].bg = bg;
2137 0 : sctx->raid56_data_stripes[i].sctx = sctx;
2138 : }
2139 : }
2140 : /*
2141 : * There used to be a big double loop to handle all profiles using the
2142 : * same routine, which grows larger and more gross over time.
2143 : *
2144 : * So here we handle each profile differently, so simpler profiles
2145 : * have simpler scrubbing function.
2146 : */
2147 0 : if (!(profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10 |
2148 : BTRFS_BLOCK_GROUP_RAID56_MASK))) {
2149 : /*
2150 : * Above check rules out all complex profile, the remaining
2151 : * profiles are SINGLE|DUP|RAID1|RAID1C*, which is simple
2152 : * mirrored duplication without stripe.
2153 : *
2154 : * Only @physical and @mirror_num needs to calculated using
2155 : * @stripe_index.
2156 : */
2157 0 : ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2158 : scrub_dev, map->stripes[stripe_index].physical,
2159 : stripe_index + 1);
2160 0 : offset = 0;
2161 0 : goto out;
2162 : }
2163 0 : if (profile & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
2164 0 : ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2165 0 : offset = btrfs_stripe_nr_to_offset(stripe_index / map->sub_stripes);
2166 0 : goto out;
2167 : }
2168 :
2169 : /* Only RAID56 goes through the old code */
2170 0 : ASSERT(map->type & BTRFS_BLOCK_GROUP_RAID56_MASK);
2171 0 : ret = 0;
2172 :
2173 : /* Calculate the logical end of the stripe */
2174 0 : get_raid56_logic_offset(physical_end, stripe_index,
2175 : map, &logic_end, NULL);
2176 0 : logic_end += chunk_logical;
2177 :
2178 : /* Initialize @offset in case we need to go to out: label */
2179 0 : get_raid56_logic_offset(physical, stripe_index, map, &offset, NULL);
2180 0 : increment = btrfs_stripe_nr_to_offset(nr_data_stripes(map));
2181 :
2182 : /*
2183 : * Due to the rotation, for RAID56 it's better to iterate each stripe
2184 : * using their physical offset.
2185 : */
2186 0 : while (physical < physical_end) {
2187 0 : ret = get_raid56_logic_offset(physical, stripe_index, map,
2188 : &logical, &stripe_logical);
2189 0 : logical += chunk_logical;
2190 0 : if (ret) {
2191 : /* it is parity strip */
2192 0 : stripe_logical += chunk_logical;
2193 0 : ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2194 : map, stripe_logical);
2195 0 : if (ret)
2196 0 : goto out;
2197 0 : goto next;
2198 : }
2199 :
2200 : /*
2201 : * Now we're at a data stripe, scrub each extents in the range.
2202 : *
2203 : * At this stage, if we ignore the repair part, inside each data
2204 : * stripe it is no different than SINGLE profile.
2205 : * We can reuse scrub_simple_mirror() here, as the repair part
2206 : * is still based on @mirror_num.
2207 : */
2208 0 : ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2209 : scrub_dev, physical, 1);
2210 0 : if (ret < 0)
2211 0 : goto out;
2212 0 : next:
2213 0 : logical += increment;
2214 0 : physical += BTRFS_STRIPE_LEN;
2215 0 : spin_lock(&sctx->stat_lock);
2216 0 : if (stop_loop)
2217 : sctx->stat.last_physical =
2218 : map->stripes[stripe_index].physical + dev_stripe_len;
2219 : else
2220 0 : sctx->stat.last_physical = physical;
2221 0 : spin_unlock(&sctx->stat_lock);
2222 0 : if (stop_loop)
2223 : break;
2224 : }
2225 0 : out:
2226 0 : ret2 = flush_scrub_stripes(sctx);
2227 0 : if (!ret)
2228 0 : ret = ret2;
2229 0 : if (sctx->raid56_data_stripes) {
2230 0 : for (int i = 0; i < nr_data_stripes(map); i++)
2231 0 : release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2232 0 : kfree(sctx->raid56_data_stripes);
2233 0 : sctx->raid56_data_stripes = NULL;
2234 : }
2235 :
2236 0 : if (sctx->is_dev_replace && ret >= 0) {
2237 0 : int ret2;
2238 :
2239 0 : ret2 = sync_write_pointer_for_zoned(sctx,
2240 : chunk_logical + offset,
2241 : map->stripes[stripe_index].physical,
2242 : physical_end);
2243 0 : if (ret2)
2244 0 : ret = ret2;
2245 : }
2246 :
2247 0 : return ret < 0 ? ret : 0;
2248 : }
2249 :
2250 0 : static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2251 : struct btrfs_block_group *bg,
2252 : struct btrfs_device *scrub_dev,
2253 : u64 dev_offset,
2254 : u64 dev_extent_len)
2255 : {
2256 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
2257 0 : struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2258 0 : struct map_lookup *map;
2259 0 : struct extent_map *em;
2260 0 : int i;
2261 0 : int ret = 0;
2262 :
2263 0 : read_lock(&map_tree->lock);
2264 0 : em = lookup_extent_mapping(map_tree, bg->start, bg->length);
2265 0 : read_unlock(&map_tree->lock);
2266 :
2267 0 : if (!em) {
2268 : /*
2269 : * Might have been an unused block group deleted by the cleaner
2270 : * kthread or relocation.
2271 : */
2272 0 : spin_lock(&bg->lock);
2273 0 : if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &bg->runtime_flags))
2274 0 : ret = -EINVAL;
2275 0 : spin_unlock(&bg->lock);
2276 :
2277 0 : return ret;
2278 : }
2279 0 : if (em->start != bg->start)
2280 0 : goto out;
2281 0 : if (em->len < dev_extent_len)
2282 0 : goto out;
2283 :
2284 0 : map = em->map_lookup;
2285 0 : for (i = 0; i < map->num_stripes; ++i) {
2286 0 : if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
2287 0 : map->stripes[i].physical == dev_offset) {
2288 0 : ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
2289 0 : if (ret)
2290 0 : goto out;
2291 : }
2292 : }
2293 0 : out:
2294 0 : free_extent_map(em);
2295 :
2296 0 : return ret;
2297 : }
2298 :
2299 0 : static int finish_extent_writes_for_zoned(struct btrfs_root *root,
2300 : struct btrfs_block_group *cache)
2301 : {
2302 0 : struct btrfs_fs_info *fs_info = cache->fs_info;
2303 0 : struct btrfs_trans_handle *trans;
2304 :
2305 0 : if (!btrfs_is_zoned(fs_info))
2306 : return 0;
2307 :
2308 0 : btrfs_wait_block_group_reservations(cache);
2309 0 : btrfs_wait_nocow_writers(cache);
2310 0 : btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2311 :
2312 0 : trans = btrfs_join_transaction(root);
2313 0 : if (IS_ERR(trans))
2314 0 : return PTR_ERR(trans);
2315 0 : return btrfs_commit_transaction(trans);
2316 : }
2317 :
2318 : static noinline_for_stack
2319 0 : int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2320 : struct btrfs_device *scrub_dev, u64 start, u64 end)
2321 : {
2322 0 : struct btrfs_dev_extent *dev_extent = NULL;
2323 0 : struct btrfs_path *path;
2324 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
2325 0 : struct btrfs_root *root = fs_info->dev_root;
2326 0 : u64 chunk_offset;
2327 0 : int ret = 0;
2328 0 : int ro_set;
2329 0 : int slot;
2330 0 : struct extent_buffer *l;
2331 0 : struct btrfs_key key;
2332 0 : struct btrfs_key found_key;
2333 0 : struct btrfs_block_group *cache;
2334 0 : struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2335 :
2336 0 : path = btrfs_alloc_path();
2337 0 : if (!path)
2338 : return -ENOMEM;
2339 :
2340 0 : path->reada = READA_FORWARD;
2341 0 : path->search_commit_root = 1;
2342 0 : path->skip_locking = 1;
2343 :
2344 0 : key.objectid = scrub_dev->devid;
2345 0 : key.offset = 0ull;
2346 0 : key.type = BTRFS_DEV_EXTENT_KEY;
2347 :
2348 0 : while (1) {
2349 0 : u64 dev_extent_len;
2350 :
2351 0 : ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2352 0 : if (ret < 0)
2353 : break;
2354 0 : if (ret > 0) {
2355 0 : if (path->slots[0] >=
2356 0 : btrfs_header_nritems(path->nodes[0])) {
2357 0 : ret = btrfs_next_leaf(root, path);
2358 0 : if (ret < 0)
2359 : break;
2360 0 : if (ret > 0) {
2361 : ret = 0;
2362 : break;
2363 : }
2364 : } else {
2365 : ret = 0;
2366 : }
2367 : }
2368 :
2369 0 : l = path->nodes[0];
2370 0 : slot = path->slots[0];
2371 :
2372 0 : btrfs_item_key_to_cpu(l, &found_key, slot);
2373 :
2374 0 : if (found_key.objectid != scrub_dev->devid)
2375 : break;
2376 :
2377 0 : if (found_key.type != BTRFS_DEV_EXTENT_KEY)
2378 : break;
2379 :
2380 0 : if (found_key.offset >= end)
2381 : break;
2382 :
2383 0 : if (found_key.offset < key.offset)
2384 : break;
2385 :
2386 0 : dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
2387 0 : dev_extent_len = btrfs_dev_extent_length(l, dev_extent);
2388 :
2389 0 : if (found_key.offset + dev_extent_len <= start)
2390 0 : goto skip;
2391 :
2392 0 : chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
2393 :
2394 : /*
2395 : * get a reference on the corresponding block group to prevent
2396 : * the chunk from going away while we scrub it
2397 : */
2398 0 : cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2399 :
2400 : /* some chunks are removed but not committed to disk yet,
2401 : * continue scrubbing */
2402 0 : if (!cache)
2403 0 : goto skip;
2404 :
2405 0 : ASSERT(cache->start <= chunk_offset);
2406 : /*
2407 : * We are using the commit root to search for device extents, so
2408 : * that means we could have found a device extent item from a
2409 : * block group that was deleted in the current transaction. The
2410 : * logical start offset of the deleted block group, stored at
2411 : * @chunk_offset, might be part of the logical address range of
2412 : * a new block group (which uses different physical extents).
2413 : * In this case btrfs_lookup_block_group() has returned the new
2414 : * block group, and its start address is less than @chunk_offset.
2415 : *
2416 : * We skip such new block groups, because it's pointless to
2417 : * process them, as we won't find their extents because we search
2418 : * for them using the commit root of the extent tree. For a device
2419 : * replace it's also fine to skip it, we won't miss copying them
2420 : * to the target device because we have the write duplication
2421 : * setup through the regular write path (by btrfs_map_block()),
2422 : * and we have committed a transaction when we started the device
2423 : * replace, right after setting up the device replace state.
2424 : */
2425 0 : if (cache->start < chunk_offset) {
2426 0 : btrfs_put_block_group(cache);
2427 0 : goto skip;
2428 : }
2429 :
2430 0 : if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2431 0 : if (!test_bit(BLOCK_GROUP_FLAG_TO_COPY, &cache->runtime_flags)) {
2432 0 : btrfs_put_block_group(cache);
2433 0 : goto skip;
2434 : }
2435 : }
2436 :
2437 : /*
2438 : * Make sure that while we are scrubbing the corresponding block
2439 : * group doesn't get its logical address and its device extents
2440 : * reused for another block group, which can possibly be of a
2441 : * different type and different profile. We do this to prevent
2442 : * false error detections and crashes due to bogus attempts to
2443 : * repair extents.
2444 : */
2445 0 : spin_lock(&cache->lock);
2446 0 : if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags)) {
2447 0 : spin_unlock(&cache->lock);
2448 0 : btrfs_put_block_group(cache);
2449 0 : goto skip;
2450 : }
2451 0 : btrfs_freeze_block_group(cache);
2452 0 : spin_unlock(&cache->lock);
2453 :
2454 : /*
2455 : * we need call btrfs_inc_block_group_ro() with scrubs_paused,
2456 : * to avoid deadlock caused by:
2457 : * btrfs_inc_block_group_ro()
2458 : * -> btrfs_wait_for_commit()
2459 : * -> btrfs_commit_transaction()
2460 : * -> btrfs_scrub_pause()
2461 : */
2462 0 : scrub_pause_on(fs_info);
2463 :
2464 : /*
2465 : * Don't do chunk preallocation for scrub.
2466 : *
2467 : * This is especially important for SYSTEM bgs, or we can hit
2468 : * -EFBIG from btrfs_finish_chunk_alloc() like:
2469 : * 1. The only SYSTEM bg is marked RO.
2470 : * Since SYSTEM bg is small, that's pretty common.
2471 : * 2. New SYSTEM bg will be allocated
2472 : * Due to regular version will allocate new chunk.
2473 : * 3. New SYSTEM bg is empty and will get cleaned up
2474 : * Before cleanup really happens, it's marked RO again.
2475 : * 4. Empty SYSTEM bg get scrubbed
2476 : * We go back to 2.
2477 : *
2478 : * This can easily boost the amount of SYSTEM chunks if cleaner
2479 : * thread can't be triggered fast enough, and use up all space
2480 : * of btrfs_super_block::sys_chunk_array
2481 : *
2482 : * While for dev replace, we need to try our best to mark block
2483 : * group RO, to prevent race between:
2484 : * - Write duplication
2485 : * Contains latest data
2486 : * - Scrub copy
2487 : * Contains data from commit tree
2488 : *
2489 : * If target block group is not marked RO, nocow writes can
2490 : * be overwritten by scrub copy, causing data corruption.
2491 : * So for dev-replace, it's not allowed to continue if a block
2492 : * group is not RO.
2493 : */
2494 0 : ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2495 0 : if (!ret && sctx->is_dev_replace) {
2496 0 : ret = finish_extent_writes_for_zoned(root, cache);
2497 0 : if (ret) {
2498 0 : btrfs_dec_block_group_ro(cache);
2499 0 : scrub_pause_off(fs_info);
2500 0 : btrfs_put_block_group(cache);
2501 0 : break;
2502 : }
2503 : }
2504 :
2505 0 : if (ret == 0) {
2506 : ro_set = 1;
2507 0 : } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2508 0 : !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
2509 : /*
2510 : * btrfs_inc_block_group_ro return -ENOSPC when it
2511 : * failed in creating new chunk for metadata.
2512 : * It is not a problem for scrub, because
2513 : * metadata are always cowed, and our scrub paused
2514 : * commit_transactions.
2515 : *
2516 : * For RAID56 chunks, we have to mark them read-only
2517 : * for scrub, as later we would use our own cache
2518 : * out of RAID56 realm.
2519 : * Thus we want the RAID56 bg to be marked RO to
2520 : * prevent RMW from screwing up out cache.
2521 : */
2522 : ro_set = 0;
2523 0 : } else if (ret == -ETXTBSY) {
2524 0 : btrfs_warn(fs_info,
2525 : "skipping scrub of block group %llu due to active swapfile",
2526 : cache->start);
2527 0 : scrub_pause_off(fs_info);
2528 0 : ret = 0;
2529 0 : goto skip_unfreeze;
2530 : } else {
2531 0 : btrfs_warn(fs_info,
2532 : "failed setting block group ro: %d", ret);
2533 0 : btrfs_unfreeze_block_group(cache);
2534 0 : btrfs_put_block_group(cache);
2535 0 : scrub_pause_off(fs_info);
2536 0 : break;
2537 : }
2538 :
2539 : /*
2540 : * Now the target block is marked RO, wait for nocow writes to
2541 : * finish before dev-replace.
2542 : * COW is fine, as COW never overwrites extents in commit tree.
2543 : */
2544 0 : if (sctx->is_dev_replace) {
2545 0 : btrfs_wait_nocow_writers(cache);
2546 0 : btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2547 : cache->length);
2548 : }
2549 :
2550 0 : scrub_pause_off(fs_info);
2551 0 : down_write(&dev_replace->rwsem);
2552 0 : dev_replace->cursor_right = found_key.offset + dev_extent_len;
2553 0 : dev_replace->cursor_left = found_key.offset;
2554 0 : dev_replace->item_needs_writeback = 1;
2555 0 : up_write(&dev_replace->rwsem);
2556 :
2557 0 : ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2558 : dev_extent_len);
2559 0 : if (sctx->is_dev_replace &&
2560 0 : !btrfs_finish_block_group_to_copy(dev_replace->srcdev,
2561 : cache, found_key.offset))
2562 0 : ro_set = 0;
2563 :
2564 0 : down_write(&dev_replace->rwsem);
2565 0 : dev_replace->cursor_left = dev_replace->cursor_right;
2566 0 : dev_replace->item_needs_writeback = 1;
2567 0 : up_write(&dev_replace->rwsem);
2568 :
2569 0 : if (ro_set)
2570 0 : btrfs_dec_block_group_ro(cache);
2571 :
2572 : /*
2573 : * We might have prevented the cleaner kthread from deleting
2574 : * this block group if it was already unused because we raced
2575 : * and set it to RO mode first. So add it back to the unused
2576 : * list, otherwise it might not ever be deleted unless a manual
2577 : * balance is triggered or it becomes used and unused again.
2578 : */
2579 0 : spin_lock(&cache->lock);
2580 0 : if (!test_bit(BLOCK_GROUP_FLAG_REMOVED, &cache->runtime_flags) &&
2581 0 : !cache->ro && cache->reserved == 0 && cache->used == 0) {
2582 0 : spin_unlock(&cache->lock);
2583 0 : if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2584 0 : btrfs_discard_queue_work(&fs_info->discard_ctl,
2585 : cache);
2586 : else
2587 0 : btrfs_mark_bg_unused(cache);
2588 : } else {
2589 0 : spin_unlock(&cache->lock);
2590 : }
2591 0 : skip_unfreeze:
2592 0 : btrfs_unfreeze_block_group(cache);
2593 0 : btrfs_put_block_group(cache);
2594 0 : if (ret)
2595 : break;
2596 0 : if (sctx->is_dev_replace &&
2597 : atomic64_read(&dev_replace->num_write_errors) > 0) {
2598 : ret = -EIO;
2599 : break;
2600 : }
2601 0 : if (sctx->stat.malloc_errors > 0) {
2602 : ret = -ENOMEM;
2603 : break;
2604 : }
2605 0 : skip:
2606 0 : key.offset = found_key.offset + dev_extent_len;
2607 0 : btrfs_release_path(path);
2608 : }
2609 :
2610 0 : btrfs_free_path(path);
2611 :
2612 0 : return ret;
2613 : }
2614 :
2615 0 : static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2616 : struct page *page, u64 physical, u64 generation)
2617 : {
2618 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
2619 0 : struct bio_vec bvec;
2620 0 : struct bio bio;
2621 0 : struct btrfs_super_block *sb = page_address(page);
2622 0 : int ret;
2623 :
2624 0 : bio_init(&bio, dev->bdev, &bvec, 1, REQ_OP_READ);
2625 0 : bio.bi_iter.bi_sector = physical >> SECTOR_SHIFT;
2626 0 : __bio_add_page(&bio, page, BTRFS_SUPER_INFO_SIZE, 0);
2627 0 : ret = submit_bio_wait(&bio);
2628 0 : bio_uninit(&bio);
2629 :
2630 0 : if (ret < 0)
2631 : return ret;
2632 0 : ret = btrfs_check_super_csum(fs_info, sb);
2633 0 : if (ret != 0) {
2634 0 : btrfs_err_rl(fs_info,
2635 : "super block at physical %llu devid %llu has bad csum",
2636 : physical, dev->devid);
2637 0 : return -EIO;
2638 : }
2639 0 : if (btrfs_super_generation(sb) != generation) {
2640 0 : btrfs_err_rl(fs_info,
2641 : "super block at physical %llu devid %llu has bad generation %llu expect %llu",
2642 : physical, dev->devid,
2643 : btrfs_super_generation(sb), generation);
2644 0 : return -EUCLEAN;
2645 : }
2646 :
2647 0 : return btrfs_validate_super(fs_info, sb, -1);
2648 : }
2649 :
2650 0 : static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2651 : struct btrfs_device *scrub_dev)
2652 : {
2653 0 : int i;
2654 0 : u64 bytenr;
2655 0 : u64 gen;
2656 0 : int ret = 0;
2657 0 : struct page *page;
2658 0 : struct btrfs_fs_info *fs_info = sctx->fs_info;
2659 :
2660 0 : if (BTRFS_FS_ERROR(fs_info))
2661 : return -EROFS;
2662 :
2663 0 : page = alloc_page(GFP_KERNEL);
2664 0 : if (!page) {
2665 0 : spin_lock(&sctx->stat_lock);
2666 0 : sctx->stat.malloc_errors++;
2667 0 : spin_unlock(&sctx->stat_lock);
2668 0 : return -ENOMEM;
2669 : }
2670 :
2671 : /* Seed devices of a new filesystem has their own generation. */
2672 0 : if (scrub_dev->fs_devices != fs_info->fs_devices)
2673 0 : gen = scrub_dev->generation;
2674 : else
2675 0 : gen = fs_info->last_trans_committed;
2676 :
2677 0 : for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
2678 0 : bytenr = btrfs_sb_offset(i);
2679 0 : if (bytenr + BTRFS_SUPER_INFO_SIZE >
2680 0 : scrub_dev->commit_total_bytes)
2681 : break;
2682 0 : if (!btrfs_check_super_location(scrub_dev, bytenr))
2683 0 : continue;
2684 :
2685 0 : ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2686 0 : if (ret) {
2687 0 : spin_lock(&sctx->stat_lock);
2688 0 : sctx->stat.super_errors++;
2689 0 : spin_unlock(&sctx->stat_lock);
2690 : }
2691 : }
2692 0 : __free_page(page);
2693 0 : return 0;
2694 : }
2695 :
2696 0 : static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2697 : {
2698 0 : if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2699 : &fs_info->scrub_lock)) {
2700 0 : struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2701 :
2702 0 : fs_info->scrub_workers = NULL;
2703 0 : mutex_unlock(&fs_info->scrub_lock);
2704 :
2705 0 : if (scrub_workers)
2706 0 : destroy_workqueue(scrub_workers);
2707 : }
2708 0 : }
2709 :
2710 : /*
2711 : * get a reference count on fs_info->scrub_workers. start worker if necessary
2712 : */
2713 0 : static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
2714 : int is_dev_replace)
2715 : {
2716 0 : struct workqueue_struct *scrub_workers = NULL;
2717 0 : unsigned int flags = WQ_FREEZABLE | WQ_UNBOUND;
2718 0 : int max_active = fs_info->thread_pool_size;
2719 0 : int ret = -ENOMEM;
2720 :
2721 0 : if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2722 : return 0;
2723 :
2724 0 : if (is_dev_replace)
2725 0 : scrub_workers = alloc_ordered_workqueue("btrfs-scrub", flags);
2726 : else
2727 0 : scrub_workers = alloc_workqueue("btrfs-scrub", flags, max_active);
2728 0 : if (!scrub_workers)
2729 : return -ENOMEM;
2730 :
2731 0 : mutex_lock(&fs_info->scrub_lock);
2732 0 : if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2733 0 : ASSERT(fs_info->scrub_workers == NULL);
2734 0 : fs_info->scrub_workers = scrub_workers;
2735 0 : refcount_set(&fs_info->scrub_workers_refcnt, 1);
2736 0 : mutex_unlock(&fs_info->scrub_lock);
2737 0 : return 0;
2738 : }
2739 : /* Other thread raced in and created the workers for us */
2740 0 : refcount_inc(&fs_info->scrub_workers_refcnt);
2741 0 : mutex_unlock(&fs_info->scrub_lock);
2742 :
2743 0 : ret = 0;
2744 :
2745 0 : destroy_workqueue(scrub_workers);
2746 0 : return ret;
2747 : }
2748 :
2749 0 : int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2750 : u64 end, struct btrfs_scrub_progress *progress,
2751 : int readonly, int is_dev_replace)
2752 : {
2753 0 : struct btrfs_dev_lookup_args args = { .devid = devid };
2754 0 : struct scrub_ctx *sctx;
2755 0 : int ret;
2756 0 : struct btrfs_device *dev;
2757 0 : unsigned int nofs_flag;
2758 0 : bool need_commit = false;
2759 :
2760 0 : if (btrfs_fs_closing(fs_info))
2761 : return -EAGAIN;
2762 :
2763 : /* At mount time we have ensured nodesize is in the range of [4K, 64K]. */
2764 0 : ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2765 :
2766 : /*
2767 : * SCRUB_MAX_SECTORS_PER_BLOCK is calculated using the largest possible
2768 : * value (max nodesize / min sectorsize), thus nodesize should always
2769 : * be fine.
2770 : */
2771 0 : ASSERT(fs_info->nodesize <=
2772 : SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2773 :
2774 : /* Allocate outside of device_list_mutex */
2775 0 : sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2776 0 : if (IS_ERR(sctx))
2777 0 : return PTR_ERR(sctx);
2778 :
2779 0 : ret = scrub_workers_get(fs_info, is_dev_replace);
2780 0 : if (ret)
2781 0 : goto out_free_ctx;
2782 :
2783 0 : mutex_lock(&fs_info->fs_devices->device_list_mutex);
2784 0 : dev = btrfs_find_device(fs_info->fs_devices, &args);
2785 0 : if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
2786 : !is_dev_replace)) {
2787 0 : mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2788 0 : ret = -ENODEV;
2789 0 : goto out;
2790 : }
2791 :
2792 0 : if (!is_dev_replace && !readonly &&
2793 0 : !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) {
2794 0 : mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2795 0 : btrfs_err_in_rcu(fs_info,
2796 : "scrub on devid %llu: filesystem on %s is not writable",
2797 : devid, btrfs_dev_name(dev));
2798 0 : ret = -EROFS;
2799 0 : goto out;
2800 : }
2801 :
2802 0 : mutex_lock(&fs_info->scrub_lock);
2803 0 : if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
2804 0 : test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
2805 0 : mutex_unlock(&fs_info->scrub_lock);
2806 0 : mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2807 0 : ret = -EIO;
2808 0 : goto out;
2809 : }
2810 :
2811 0 : down_read(&fs_info->dev_replace.rwsem);
2812 0 : if (dev->scrub_ctx ||
2813 0 : (!is_dev_replace &&
2814 0 : btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2815 0 : up_read(&fs_info->dev_replace.rwsem);
2816 0 : mutex_unlock(&fs_info->scrub_lock);
2817 0 : mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2818 0 : ret = -EINPROGRESS;
2819 0 : goto out;
2820 : }
2821 0 : up_read(&fs_info->dev_replace.rwsem);
2822 :
2823 0 : sctx->readonly = readonly;
2824 0 : dev->scrub_ctx = sctx;
2825 0 : mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2826 :
2827 : /*
2828 : * checking @scrub_pause_req here, we can avoid
2829 : * race between committing transaction and scrubbing.
2830 : */
2831 0 : __scrub_blocked_if_needed(fs_info);
2832 0 : atomic_inc(&fs_info->scrubs_running);
2833 0 : mutex_unlock(&fs_info->scrub_lock);
2834 :
2835 : /*
2836 : * In order to avoid deadlock with reclaim when there is a transaction
2837 : * trying to pause scrub, make sure we use GFP_NOFS for all the
2838 : * allocations done at btrfs_scrub_sectors() and scrub_sectors_for_parity()
2839 : * invoked by our callees. The pausing request is done when the
2840 : * transaction commit starts, and it blocks the transaction until scrub
2841 : * is paused (done at specific points at scrub_stripe() or right above
2842 : * before incrementing fs_info->scrubs_running).
2843 : */
2844 0 : nofs_flag = memalloc_nofs_save();
2845 0 : if (!is_dev_replace) {
2846 0 : u64 old_super_errors;
2847 :
2848 0 : spin_lock(&sctx->stat_lock);
2849 0 : old_super_errors = sctx->stat.super_errors;
2850 0 : spin_unlock(&sctx->stat_lock);
2851 :
2852 0 : btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2853 : /*
2854 : * by holding device list mutex, we can
2855 : * kick off writing super in log tree sync.
2856 : */
2857 0 : mutex_lock(&fs_info->fs_devices->device_list_mutex);
2858 0 : ret = scrub_supers(sctx, dev);
2859 0 : mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2860 :
2861 0 : spin_lock(&sctx->stat_lock);
2862 : /*
2863 : * Super block errors found, but we can not commit transaction
2864 : * at current context, since btrfs_commit_transaction() needs
2865 : * to pause the current running scrub (hold by ourselves).
2866 : */
2867 0 : if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2868 0 : need_commit = true;
2869 0 : spin_unlock(&sctx->stat_lock);
2870 : }
2871 :
2872 0 : if (!ret)
2873 0 : ret = scrub_enumerate_chunks(sctx, dev, start, end);
2874 0 : memalloc_nofs_restore(nofs_flag);
2875 :
2876 0 : atomic_dec(&fs_info->scrubs_running);
2877 0 : wake_up(&fs_info->scrub_pause_wait);
2878 :
2879 0 : if (progress)
2880 0 : memcpy(progress, &sctx->stat, sizeof(*progress));
2881 :
2882 0 : if (!is_dev_replace)
2883 0 : btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
2884 : ret ? "not finished" : "finished", devid, ret);
2885 :
2886 0 : mutex_lock(&fs_info->scrub_lock);
2887 0 : dev->scrub_ctx = NULL;
2888 0 : mutex_unlock(&fs_info->scrub_lock);
2889 :
2890 0 : scrub_workers_put(fs_info);
2891 0 : scrub_put_ctx(sctx);
2892 :
2893 : /*
2894 : * We found some super block errors before, now try to force a
2895 : * transaction commit, as scrub has finished.
2896 : */
2897 0 : if (need_commit) {
2898 0 : struct btrfs_trans_handle *trans;
2899 :
2900 0 : trans = btrfs_start_transaction(fs_info->tree_root, 0);
2901 0 : if (IS_ERR(trans)) {
2902 0 : ret = PTR_ERR(trans);
2903 0 : btrfs_err(fs_info,
2904 : "scrub: failed to start transaction to fix super block errors: %d", ret);
2905 0 : return ret;
2906 : }
2907 0 : ret = btrfs_commit_transaction(trans);
2908 0 : if (ret < 0)
2909 0 : btrfs_err(fs_info,
2910 : "scrub: failed to commit transaction to fix super block errors: %d", ret);
2911 : }
2912 : return ret;
2913 0 : out:
2914 0 : scrub_workers_put(fs_info);
2915 0 : out_free_ctx:
2916 0 : scrub_free_ctx(sctx);
2917 :
2918 0 : return ret;
2919 : }
2920 :
2921 0 : void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
2922 : {
2923 0 : mutex_lock(&fs_info->scrub_lock);
2924 0 : atomic_inc(&fs_info->scrub_pause_req);
2925 0 : while (atomic_read(&fs_info->scrubs_paused) !=
2926 : atomic_read(&fs_info->scrubs_running)) {
2927 0 : mutex_unlock(&fs_info->scrub_lock);
2928 0 : wait_event(fs_info->scrub_pause_wait,
2929 : atomic_read(&fs_info->scrubs_paused) ==
2930 : atomic_read(&fs_info->scrubs_running));
2931 0 : mutex_lock(&fs_info->scrub_lock);
2932 : }
2933 0 : mutex_unlock(&fs_info->scrub_lock);
2934 0 : }
2935 :
2936 0 : void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
2937 : {
2938 0 : atomic_dec(&fs_info->scrub_pause_req);
2939 0 : wake_up(&fs_info->scrub_pause_wait);
2940 0 : }
2941 :
2942 0 : int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
2943 : {
2944 0 : mutex_lock(&fs_info->scrub_lock);
2945 0 : if (!atomic_read(&fs_info->scrubs_running)) {
2946 0 : mutex_unlock(&fs_info->scrub_lock);
2947 0 : return -ENOTCONN;
2948 : }
2949 :
2950 0 : atomic_inc(&fs_info->scrub_cancel_req);
2951 0 : while (atomic_read(&fs_info->scrubs_running)) {
2952 0 : mutex_unlock(&fs_info->scrub_lock);
2953 0 : wait_event(fs_info->scrub_pause_wait,
2954 : atomic_read(&fs_info->scrubs_running) == 0);
2955 0 : mutex_lock(&fs_info->scrub_lock);
2956 : }
2957 0 : atomic_dec(&fs_info->scrub_cancel_req);
2958 0 : mutex_unlock(&fs_info->scrub_lock);
2959 :
2960 0 : return 0;
2961 : }
2962 :
2963 0 : int btrfs_scrub_cancel_dev(struct btrfs_device *dev)
2964 : {
2965 0 : struct btrfs_fs_info *fs_info = dev->fs_info;
2966 0 : struct scrub_ctx *sctx;
2967 :
2968 0 : mutex_lock(&fs_info->scrub_lock);
2969 0 : sctx = dev->scrub_ctx;
2970 0 : if (!sctx) {
2971 0 : mutex_unlock(&fs_info->scrub_lock);
2972 0 : return -ENOTCONN;
2973 : }
2974 0 : atomic_inc(&sctx->cancel_req);
2975 0 : while (dev->scrub_ctx) {
2976 0 : mutex_unlock(&fs_info->scrub_lock);
2977 0 : wait_event(fs_info->scrub_pause_wait,
2978 : dev->scrub_ctx == NULL);
2979 0 : mutex_lock(&fs_info->scrub_lock);
2980 : }
2981 0 : mutex_unlock(&fs_info->scrub_lock);
2982 :
2983 0 : return 0;
2984 : }
2985 :
2986 0 : int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
2987 : struct btrfs_scrub_progress *progress)
2988 : {
2989 0 : struct btrfs_dev_lookup_args args = { .devid = devid };
2990 0 : struct btrfs_device *dev;
2991 0 : struct scrub_ctx *sctx = NULL;
2992 :
2993 0 : mutex_lock(&fs_info->fs_devices->device_list_mutex);
2994 0 : dev = btrfs_find_device(fs_info->fs_devices, &args);
2995 0 : if (dev)
2996 0 : sctx = dev->scrub_ctx;
2997 0 : if (sctx)
2998 0 : memcpy(progress, &sctx->stat, sizeof(*progress));
2999 0 : mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3000 :
3001 0 : return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3002 : }
|