Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2019-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #include "xfs.h"
7 : #include "xfs_fs.h"
8 : #include "xfs_shared.h"
9 : #include "xfs_format.h"
10 : #include "xfs_trans_resv.h"
11 : #include "xfs_log_format.h"
12 : #include "xfs_trans.h"
13 : #include "xfs_mount.h"
14 : #include "xfs_alloc.h"
15 : #include "xfs_ialloc.h"
16 : #include "xfs_health.h"
17 : #include "xfs_btree.h"
18 : #include "xfs_ag.h"
19 : #include "xfs_rtalloc.h"
20 : #include "xfs_inode.h"
21 : #include "xfs_icache.h"
22 : #include "scrub/scrub.h"
23 : #include "scrub/common.h"
24 : #include "scrub/trace.h"
25 : #include "scrub/fscounters.h"
26 :
27 : /*
28 : * FS Summary Counters
29 : * ===================
30 : *
31 : * The basics of filesystem summary counter checking are that we iterate the
32 : * AGs counting the number of free blocks, free space btree blocks, per-AG
33 : * reservations, inodes, delayed allocation reservations, and free inodes.
34 : * Then we compare what we computed against the in-core counters.
35 : *
36 : * However, the reality is that summary counters are a tricky beast to check.
37 : * While we /could/ freeze the filesystem and scramble around the AGs counting
38 : * the free blocks, in practice we prefer not do that for a scan because
39 : * freezing is costly. To get around this, we added a per-cpu counter of the
40 : * delalloc reservations so that we can rotor around the AGs relatively
41 : * quickly, and we allow the counts to be slightly off because we're not taking
42 : * any locks while we do this.
43 : *
44 : * So the first thing we do is warm up the buffer cache in the setup routine by
45 : * walking all the AGs to make sure the incore per-AG structure has been
46 : * initialized. The expected value calculation then iterates the incore per-AG
47 : * structures as quickly as it can. We snapshot the percpu counters before and
48 : * after this operation and use the difference in counter values to guess at
49 : * our tolerance for mismatch between expected and actual counter values.
50 : */
51 :
52 : /*
53 : * Since the expected value computation is lockless but only browses incore
54 : * values, the percpu counters should be fairly close to each other. However,
55 : * we'll allow ourselves to be off by at least this (arbitrary) amount.
56 : */
57 : #define XCHK_FSCOUNT_MIN_VARIANCE (512)
58 :
59 : /*
60 : * Make sure the per-AG structure has been initialized from the on-disk header
61 : * contents and trust that the incore counters match the ondisk counters. (The
62 : * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the
63 : * summary counters after checking all AG headers). Do this from the setup
64 : * function so that the inner AG aggregation loop runs as quickly as possible.
65 : *
66 : * This function runs during the setup phase /before/ we start checking any
67 : * metadata.
68 : */
69 : STATIC int
70 376917 : xchk_fscount_warmup(
71 : struct xfs_scrub *sc)
72 : {
73 376917 : struct xfs_mount *mp = sc->mp;
74 376917 : struct xfs_buf *agi_bp = NULL;
75 376917 : struct xfs_buf *agf_bp = NULL;
76 376917 : struct xfs_perag *pag = NULL;
77 376917 : xfs_agnumber_t agno;
78 376917 : int error = 0;
79 :
80 2011643 : for_each_perag(mp, agno, pag) {
81 1634726 : if (xchk_should_terminate(sc, &error))
82 : break;
83 4904178 : if (xfs_perag_initialised_agi(pag) &&
84 : xfs_perag_initialised_agf(pag))
85 1634726 : continue;
86 :
87 : /* Lock both AG headers. */
88 0 : error = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp);
89 0 : if (error)
90 : break;
91 0 : error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf_bp);
92 0 : if (error)
93 : break;
94 :
95 : /*
96 : * These are supposed to be initialized by the header read
97 : * function.
98 : */
99 0 : if (!xfs_perag_initialised_agi(pag) ||
100 : !xfs_perag_initialised_agf(pag)) {
101 0 : error = -EFSCORRUPTED;
102 0 : break;
103 : }
104 :
105 0 : xfs_buf_relse(agf_bp);
106 0 : agf_bp = NULL;
107 0 : xfs_buf_relse(agi_bp);
108 0 : agi_bp = NULL;
109 : }
110 :
111 376917 : if (agf_bp)
112 0 : xfs_buf_relse(agf_bp);
113 376917 : if (agi_bp)
114 0 : xfs_buf_relse(agi_bp);
115 376917 : if (pag)
116 0 : xfs_perag_rele(pag);
117 376917 : return error;
118 : }
119 :
120 : static inline int
121 34775 : xchk_fsfreeze(
122 : struct xfs_scrub *sc)
123 : {
124 34775 : int error;
125 :
126 34775 : error = freeze_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL);
127 34775 : trace_xchk_fsfreeze(sc, error);
128 34775 : return error;
129 : }
130 :
131 : static inline int
132 34775 : xchk_fsthaw(
133 : struct xfs_scrub *sc)
134 : {
135 34775 : int error;
136 :
137 : /* This should always succeed, we have a kernel freeze */
138 34775 : error = thaw_super(sc->mp->m_super, FREEZE_HOLDER_KERNEL);
139 34775 : trace_xchk_fsthaw(sc, error);
140 34775 : return error;
141 : }
142 :
143 : /*
144 : * We couldn't stabilize the filesystem long enough to sample all the variables
145 : * that comprise the summary counters and compare them to the percpu counters.
146 : * We need to disable all writer threads, which means taking the first two
147 : * freeze levels to put userspace to sleep, and the third freeze level to
148 : * prevent background threads from starting new transactions. Take one level
149 : * more to prevent other callers from unfreezing the filesystem while we run.
150 : */
151 : STATIC int
152 34775 : xchk_fscounters_freeze(
153 : struct xfs_scrub *sc)
154 : {
155 34775 : struct xchk_fscounters *fsc = sc->buf;
156 34775 : int error = 0;
157 :
158 34775 : if (sc->flags & XCHK_HAVE_FREEZE_PROT) {
159 10994 : sc->flags &= ~XCHK_HAVE_FREEZE_PROT;
160 10994 : mnt_drop_write_file(sc->file);
161 : }
162 :
163 : /* Try to grab a kernel freeze. */
164 34775 : while ((error = xchk_fsfreeze(sc)) == -EBUSY) {
165 0 : if (xchk_should_terminate(sc, &error))
166 0 : return error;
167 :
168 0 : delay(HZ / 10);
169 : }
170 34775 : if (error)
171 : return error;
172 :
173 34775 : fsc->frozen = true;
174 34775 : return 0;
175 : }
176 :
177 : /* Thaw the filesystem after checking or repairing fscounters. */
178 : STATIC void
179 376917 : xchk_fscounters_cleanup(
180 : void *buf)
181 : {
182 376917 : struct xchk_fscounters *fsc = buf;
183 376917 : struct xfs_scrub *sc = fsc->sc;
184 376917 : int error;
185 :
186 376917 : if (!fsc->frozen)
187 : return;
188 :
189 34775 : error = xchk_fsthaw(sc);
190 34775 : if (error)
191 0 : xfs_emerg(sc->mp, "still frozen after scrub, err=%d", error);
192 : else
193 34775 : fsc->frozen = false;
194 : }
195 :
196 : int
197 376917 : xchk_setup_fscounters(
198 : struct xfs_scrub *sc)
199 : {
200 376917 : struct xchk_fscounters *fsc;
201 376917 : int error;
202 :
203 : /*
204 : * If the AGF doesn't track btreeblks, we have to lock the AGF to count
205 : * btree block usage by walking the actual btrees.
206 : */
207 376917 : if (!xfs_has_lazysbcount(sc->mp))
208 0 : xchk_fsgates_enable(sc, XCHK_FSGATES_DRAIN);
209 :
210 376917 : sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
211 376917 : if (!sc->buf)
212 : return -ENOMEM;
213 376917 : sc->buf_cleanup = xchk_fscounters_cleanup;
214 376917 : fsc = sc->buf;
215 376917 : fsc->sc = sc;
216 :
217 376917 : xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max);
218 :
219 : /* We must get the incore counters set up before we can proceed. */
220 376917 : error = xchk_fscount_warmup(sc);
221 376917 : if (error)
222 : return error;
223 :
224 : /*
225 : * Pause all writer activity in the filesystem while we're scrubbing to
226 : * reduce the likelihood of background perturbations to the counters
227 : * throwing off our calculations.
228 : */
229 376917 : if (sc->flags & XCHK_TRY_HARDER) {
230 34775 : error = xchk_fscounters_freeze(sc);
231 34775 : if (error)
232 : return error;
233 : }
234 :
235 376917 : return xfs_trans_alloc_empty(sc->mp, &sc->tp);
236 : }
237 :
238 : /*
239 : * Part 1: Collecting filesystem summary counts. For each AG, we add its
240 : * summary counts (total inodes, free inodes, free data blocks) to an incore
241 : * copy of the overall filesystem summary counts.
242 : *
243 : * To avoid false corruption reports in part 2, any failure in this part must
244 : * set the INCOMPLETE flag even when a negative errno is returned. This care
245 : * must be taken with certain errno values (i.e. EFSBADCRC, EFSCORRUPTED,
246 : * ECANCELED) that are absorbed into a scrub state flag update by
247 : * xchk_*_process_error.
248 : */
249 :
250 : /* Count free space btree blocks manually for pre-lazysbcount filesystems. */
251 : static int
252 0 : xchk_fscount_btreeblks(
253 : struct xfs_scrub *sc,
254 : struct xchk_fscounters *fsc,
255 : xfs_agnumber_t agno)
256 : {
257 0 : xfs_extlen_t blocks;
258 0 : int error;
259 :
260 0 : error = xchk_ag_init_existing(sc, agno, &sc->sa);
261 0 : if (error)
262 0 : goto out_free;
263 :
264 0 : error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
265 0 : if (error)
266 0 : goto out_free;
267 0 : fsc->fdblocks += blocks - 1;
268 :
269 0 : error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
270 0 : if (error)
271 0 : goto out_free;
272 0 : fsc->fdblocks += blocks - 1;
273 :
274 0 : out_free:
275 0 : xchk_ag_free(sc, &sc->sa);
276 0 : return error;
277 : }
278 :
279 : /*
280 : * Calculate what the global in-core counters ought to be from the incore
281 : * per-AG structure. Callers can compare this to the actual in-core counters
282 : * to estimate by how much both in-core and on-disk counters need to be
283 : * adjusted.
284 : */
285 : STATIC int
286 376917 : xchk_fscount_aggregate_agcounts(
287 : struct xfs_scrub *sc,
288 : struct xchk_fscounters *fsc)
289 : {
290 376917 : struct xfs_mount *mp = sc->mp;
291 376917 : struct xfs_perag *pag;
292 376917 : uint64_t delayed;
293 376917 : xfs_agnumber_t agno;
294 376917 : int tries = 8;
295 376917 : int error = 0;
296 :
297 376917 : retry:
298 376917 : fsc->icount = 0;
299 376917 : fsc->ifree = 0;
300 376917 : fsc->fdblocks = 0;
301 :
302 2011631 : for_each_perag(mp, agno, pag) {
303 1634717 : if (xchk_should_terminate(sc, &error))
304 : break;
305 :
306 : /* This somehow got unset since the warmup? */
307 4904142 : if (!xfs_perag_initialised_agi(pag) ||
308 : !xfs_perag_initialised_agf(pag)) {
309 0 : error = -EFSCORRUPTED;
310 0 : break;
311 : }
312 :
313 : /* Count all the inodes */
314 1634714 : fsc->icount += pag->pagi_count;
315 1634714 : fsc->ifree += pag->pagi_freecount;
316 :
317 : /* Add up the free/freelist/bnobt/cntbt blocks */
318 1634714 : fsc->fdblocks += pag->pagf_freeblks;
319 1634714 : fsc->fdblocks += pag->pagf_flcount;
320 1634714 : if (xfs_has_lazysbcount(sc->mp)) {
321 1634714 : fsc->fdblocks += pag->pagf_btreeblks;
322 : } else {
323 0 : error = xchk_fscount_btreeblks(sc, fsc, agno);
324 0 : if (error)
325 : break;
326 : }
327 :
328 : /*
329 : * Per-AG reservations are taken out of the incore counters,
330 : * so they must be left out of the free blocks computation.
331 : */
332 1634714 : fsc->fdblocks -= pag->pag_meta_resv.ar_reserved;
333 1634714 : fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved;
334 :
335 : }
336 376917 : if (pag)
337 3 : xfs_perag_rele(pag);
338 376917 : if (error) {
339 3 : xchk_set_incomplete(sc);
340 3 : return error;
341 : }
342 :
343 : /*
344 : * The global incore space reservation is taken from the incore
345 : * counters, so leave that out of the computation.
346 : */
347 376914 : fsc->fdblocks -= mp->m_resblks_avail;
348 :
349 : /*
350 : * Delayed allocation reservations are taken out of the incore counters
351 : * but not recorded on disk, so leave them and their indlen blocks out
352 : * of the computation.
353 : */
354 376914 : delayed = percpu_counter_sum(&mp->m_delalloc_blks);
355 376914 : fsc->fdblocks -= delayed;
356 :
357 376914 : trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks,
358 : delayed);
359 :
360 :
361 : /* Bail out if the values we compute are totally nonsense. */
362 376914 : if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max ||
363 376914 : fsc->fdblocks > mp->m_sb.sb_dblocks ||
364 376914 : fsc->ifree > fsc->icount_max)
365 : return -EFSCORRUPTED;
366 :
367 : /*
368 : * If ifree > icount then we probably had some perturbation in the
369 : * counters while we were calculating things. We'll try a few times
370 : * to maintain ifree <= icount before giving up.
371 : */
372 376914 : if (fsc->ifree > fsc->icount) {
373 0 : if (tries--)
374 0 : goto retry;
375 : return -EDEADLOCK;
376 : }
377 :
378 : return 0;
379 : }
380 :
381 : #ifdef CONFIG_XFS_RT
382 : STATIC int
383 26432303 : xchk_fscount_add_frextent(
384 : struct xfs_mount *mp,
385 : struct xfs_trans *tp,
386 : const struct xfs_rtalloc_rec *rec,
387 : void *priv)
388 : {
389 26432303 : struct xchk_fscounters *fsc = priv;
390 26432303 : int error = 0;
391 :
392 26432303 : fsc->frextents += rec->ar_extcount;
393 :
394 26432303 : xchk_should_terminate(fsc->sc, &error);
395 26432303 : return error;
396 : }
397 :
398 : /* Calculate the number of free realtime extents from the realtime bitmap. */
399 : STATIC int
400 376914 : xchk_fscount_count_frextents(
401 : struct xfs_scrub *sc,
402 : struct xchk_fscounters *fsc)
403 : {
404 376914 : struct xfs_mount *mp = sc->mp;
405 376914 : int error;
406 :
407 376914 : fsc->frextents = 0;
408 376914 : if (!xfs_has_realtime(mp))
409 : return 0;
410 :
411 167075 : xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
412 167075 : error = xfs_rtalloc_query_all(sc->mp, sc->tp,
413 : xchk_fscount_add_frextent, fsc);
414 167075 : if (error) {
415 1 : xchk_set_incomplete(sc);
416 1 : goto out_unlock;
417 : }
418 :
419 167074 : out_unlock:
420 167075 : xfs_iunlock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
421 167075 : return error;
422 : }
423 : #else
424 : STATIC int
425 : xchk_fscount_count_frextents(
426 : struct xfs_scrub *sc,
427 : struct xchk_fscounters *fsc)
428 : {
429 : fsc->frextents = 0;
430 : return 0;
431 : }
432 : #endif /* CONFIG_XFS_RT */
433 :
434 : /*
435 : * Part 2: Comparing filesystem summary counters. All we have to do here is
436 : * sum the percpu counters and compare them to what we've observed.
437 : */
438 :
439 : /*
440 : * Is the @counter reasonably close to the @expected value?
441 : *
442 : * We neither locked nor froze anything in the filesystem while aggregating the
443 : * per-AG data to compute the @expected value, which means that the counter
444 : * could have changed. We know the @old_value of the summation of the counter
445 : * before the aggregation, and we re-sum the counter now. If the expected
446 : * value falls between the two summations, we're ok.
447 : *
448 : * Otherwise, we /might/ have a problem. If the change in the summations is
449 : * more than we want to tolerate, the filesystem is probably busy and we should
450 : * just send back INCOMPLETE and see if userspace will try again.
451 : *
452 : * If we're repairing then we require an exact match.
453 : */
454 : static inline bool
455 1507652 : xchk_fscount_within_range(
456 : struct xfs_scrub *sc,
457 : const int64_t old_value,
458 : struct percpu_counter *counter,
459 : uint64_t expected)
460 : {
461 1507652 : int64_t min_value, max_value;
462 1507652 : int64_t curr_value = percpu_counter_sum(counter);
463 :
464 1507652 : trace_xchk_fscounters_within_range(sc->mp, expected, curr_value,
465 : old_value);
466 :
467 : /* Negative values are always wrong. */
468 1507652 : if (curr_value < 0)
469 : return false;
470 :
471 : /* Exact matches are always ok. */
472 1507652 : if (curr_value == expected)
473 : return true;
474 :
475 141815 : min_value = min(old_value, curr_value);
476 141815 : max_value = max(old_value, curr_value);
477 :
478 : /* Within the before-and-after range is ok. */
479 141815 : if (expected >= min_value && expected <= max_value)
480 101276 : return true;
481 :
482 : /* Everything else is bad. */
483 : return false;
484 : }
485 :
486 : /* Check the superblock counters. */
487 : int
488 376917 : xchk_fscounters(
489 : struct xfs_scrub *sc)
490 : {
491 376917 : struct xfs_mount *mp = sc->mp;
492 376917 : struct xchk_fscounters *fsc = sc->buf;
493 376917 : int64_t icount, ifree, fdblocks, frextents;
494 376917 : bool try_again = false;
495 376917 : int error;
496 :
497 : /* Snapshot the percpu counters. */
498 376917 : icount = percpu_counter_sum(&mp->m_icount);
499 376917 : ifree = percpu_counter_sum(&mp->m_ifree);
500 376917 : fdblocks = percpu_counter_sum(&mp->m_fdblocks);
501 376917 : frextents = percpu_counter_sum(&mp->m_frextents);
502 :
503 : /* No negative values, please! */
504 376917 : if (icount < 0 || ifree < 0)
505 0 : xchk_set_corrupt(sc);
506 :
507 : /*
508 : * If the filesystem is not frozen, the counter summation calls above
509 : * can race with xfs_mod_freecounter, which subtracts a requested space
510 : * reservation from the counter and undoes the subtraction if that made
511 : * the counter go negative. Therefore, it's possible to see negative
512 : * values here, and we should only flag that as a corruption if we
513 : * froze the fs. This is much more likely to happen with frextents
514 : * since there are no reserved pools.
515 : */
516 376917 : if (fdblocks < 0 || frextents < 0) {
517 0 : if (!fsc->frozen)
518 : return -EDEADLOCK;
519 :
520 0 : xchk_set_corrupt(sc);
521 0 : return 0;
522 : }
523 :
524 : /* See if icount is obviously wrong. */
525 376917 : if (icount < fsc->icount_min || icount > fsc->icount_max)
526 0 : xchk_set_corrupt(sc);
527 :
528 : /* See if fdblocks is obviously wrong. */
529 376917 : if (fdblocks > mp->m_sb.sb_dblocks)
530 0 : xchk_set_corrupt(sc);
531 :
532 : /* See if frextents is obviously wrong. */
533 376917 : if (frextents > mp->m_sb.sb_rextents)
534 0 : xchk_set_corrupt(sc);
535 :
536 : /*
537 : * If ifree exceeds icount by more than the minimum variance then
538 : * something's probably wrong with the counters.
539 : */
540 376917 : if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE)
541 0 : xchk_set_corrupt(sc);
542 :
543 : /* Walk the incore AG headers to calculate the expected counters. */
544 376917 : error = xchk_fscount_aggregate_agcounts(sc, fsc);
545 376917 : if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
546 3 : return error;
547 :
548 : /* Count the free extents counter for rt volumes. */
549 376914 : error = xchk_fscount_count_frextents(sc, fsc);
550 376914 : if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error))
551 1 : return error;
552 376913 : if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
553 : return 0;
554 :
555 : /*
556 : * Compare the in-core counters with whatever we counted. If the fs is
557 : * frozen, we treat the discrepancy as a corruption because the freeze
558 : * should have stabilized the counter values. Otherwise, we need
559 : * userspace to call us back having granted us freeze permission.
560 : */
561 376913 : if (!xchk_fscount_within_range(sc, icount, &mp->m_icount,
562 : fsc->icount)) {
563 4 : if (fsc->frozen)
564 0 : xchk_set_corrupt(sc);
565 : else
566 : try_again = true;
567 : }
568 :
569 376913 : if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) {
570 3620 : if (fsc->frozen)
571 0 : xchk_set_corrupt(sc);
572 : else
573 : try_again = true;
574 : }
575 :
576 376913 : if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks,
577 : fsc->fdblocks)) {
578 34050 : if (fsc->frozen)
579 0 : xchk_set_corrupt(sc);
580 : else
581 : try_again = true;
582 : }
583 :
584 376913 : if (!xchk_fscount_within_range(sc, frextents, &mp->m_frextents,
585 : fsc->frextents)) {
586 2865 : if (fsc->frozen)
587 0 : xchk_set_corrupt(sc);
588 : else
589 : try_again = true;
590 : }
591 :
592 374048 : if (try_again)
593 34775 : return -EDEADLOCK;
594 :
595 : return 0;
596 : }
|