Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-or-later
2 : /*
3 : * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 : * Author: Darrick J. Wong <djwong@kernel.org>
5 : */
6 : #ifndef __XFS_SCRUB_COMMON_H__
7 : #define __XFS_SCRUB_COMMON_H__
8 :
9 : /*
10 : * We /could/ terminate a scrub/repair operation early. If we're not
11 : * in a good place to continue (fatal signal, etc.) then bail out.
12 : * Note that we're careful not to make any judgements about *error.
13 : */
14 : static inline bool
15 8441497313 : xchk_should_terminate(
16 : struct xfs_scrub *sc,
17 : int *error)
18 : {
19 : /*
20 : * If preemption is disabled, we need to yield to the scheduler every
21 : * few seconds so that we don't run afoul of the soft lockup watchdog
22 : * or RCU stall detector.
23 : */
24 8441497313 : cond_resched();
25 :
26 8440720134 : if (fatal_signal_pending(current)) {
27 14 : if (*error == 0)
28 14 : *error = -EINTR;
29 14 : return true;
30 : }
31 : return false;
32 : }
33 :
34 : int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
35 : void xchk_trans_cancel(struct xfs_scrub *sc);
36 :
37 : bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
38 : xfs_agblock_t bno, int *error);
39 : bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
40 : xfs_fileoff_t offset, int *error);
41 :
42 : bool xchk_xref_process_error(struct xfs_scrub *sc,
43 : xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
44 : bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
45 : int whichfork, xfs_fileoff_t offset, int *error);
46 :
47 : void xchk_block_set_preen(struct xfs_scrub *sc,
48 : struct xfs_buf *bp);
49 : void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
50 :
51 : void xchk_set_corrupt(struct xfs_scrub *sc);
52 : void xchk_block_set_corrupt(struct xfs_scrub *sc,
53 : struct xfs_buf *bp);
54 : void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
55 : void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
56 : xfs_fileoff_t offset);
57 :
58 : void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
59 : struct xfs_buf *bp);
60 : void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
61 : xfs_ino_t ino);
62 : void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
63 : int whichfork, xfs_fileoff_t offset);
64 :
65 : void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
66 : void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
67 : xfs_fileoff_t offset);
68 :
69 : void xchk_set_incomplete(struct xfs_scrub *sc);
70 : int xchk_checkpoint_log(struct xfs_mount *mp);
71 :
72 : /* Are we set up for a cross-referencing check? */
73 : bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
74 : struct xfs_btree_cur **curpp);
75 :
76 : /* Setup functions */
77 : int xchk_setup_agheader(struct xfs_scrub *sc);
78 : int xchk_setup_fs(struct xfs_scrub *sc);
79 : int xchk_setup_ag_allocbt(struct xfs_scrub *sc);
80 : int xchk_setup_ag_iallocbt(struct xfs_scrub *sc);
81 : int xchk_setup_ag_rmapbt(struct xfs_scrub *sc);
82 : int xchk_setup_ag_refcountbt(struct xfs_scrub *sc);
83 : int xchk_setup_inode(struct xfs_scrub *sc);
84 : int xchk_setup_inode_bmap(struct xfs_scrub *sc);
85 : int xchk_setup_inode_bmap_data(struct xfs_scrub *sc);
86 : int xchk_setup_directory(struct xfs_scrub *sc);
87 : int xchk_setup_xattr(struct xfs_scrub *sc);
88 : int xchk_setup_symlink(struct xfs_scrub *sc);
89 : int xchk_setup_parent(struct xfs_scrub *sc);
90 : #ifdef CONFIG_XFS_RT
91 : int xchk_setup_rt(struct xfs_scrub *sc);
92 : #else
93 : static inline int
94 : xchk_setup_rt(struct xfs_scrub *sc)
95 : {
96 : return -ENOENT;
97 : }
98 : #endif
99 : #ifdef CONFIG_XFS_QUOTA
100 : int xchk_setup_quota(struct xfs_scrub *sc);
101 : #else
102 : static inline int
103 : xchk_setup_quota(struct xfs_scrub *sc)
104 : {
105 : return -ENOENT;
106 : }
107 : #endif
108 : int xchk_setup_fscounters(struct xfs_scrub *sc);
109 :
110 : void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
111 : int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
112 : struct xchk_ag *sa);
113 :
114 : /*
115 : * Grab all AG resources, treating the inability to grab the perag structure as
116 : * a fs corruption. This is intended for callers checking an ondisk reference
117 : * to a given AG, which means that the AG must still exist.
118 : */
119 : static inline int
120 : xchk_ag_init_existing(
121 : struct xfs_scrub *sc,
122 : xfs_agnumber_t agno,
123 : struct xchk_ag *sa)
124 : {
125 519323250 : int error = xchk_ag_init(sc, agno, sa);
126 :
127 519337157 : return error == -ENOENT ? -EFSCORRUPTED : error;
128 : }
129 :
130 : int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
131 : struct xchk_ag *sa);
132 : void xchk_ag_btcur_free(struct xchk_ag *sa);
133 : void xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
134 : int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
135 : const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
136 :
137 : int xchk_setup_ag_btree(struct xfs_scrub *sc, bool force_log);
138 : int xchk_iget_for_scrubbing(struct xfs_scrub *sc);
139 : int xchk_setup_inode_contents(struct xfs_scrub *sc, unsigned int resblks);
140 : void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
141 :
142 : int xchk_iget(struct xfs_scrub *sc, xfs_ino_t inum, struct xfs_inode **ipp);
143 : int xchk_iget_agi(struct xfs_scrub *sc, xfs_ino_t inum,
144 : struct xfs_buf **agi_bpp, struct xfs_inode **ipp);
145 : void xchk_irele(struct xfs_scrub *sc, struct xfs_inode *ip);
146 : int xchk_install_handle_inode(struct xfs_scrub *sc, struct xfs_inode *ip);
147 :
148 : /*
149 : * Don't bother cross-referencing if we already found corruption or cross
150 : * referencing discrepancies.
151 : */
152 : static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
153 : {
154 17520523394 : return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
155 : XFS_SCRUB_OFLAG_XCORRUPT);
156 : }
157 :
158 : int xchk_metadata_inode_forks(struct xfs_scrub *sc);
159 :
160 : /*
161 : * Setting up a hook to wait for intents to drain is costly -- we have to take
162 : * the CPU hotplug lock and force an i-cache flush on all CPUs once to set it
163 : * up, and again to tear it down. These costs add up quickly, so we only want
164 : * to enable the drain waiter if the drain actually detected a conflict with
165 : * running intent chains.
166 : */
167 : static inline bool xchk_need_intent_drain(struct xfs_scrub *sc)
168 : {
169 1234391859 : return sc->flags & XCHK_NEED_DRAIN;
170 : }
171 :
172 : void xchk_fsgates_enable(struct xfs_scrub *sc, unsigned int scrub_fshooks);
173 :
174 : #endif /* __XFS_SCRUB_COMMON_H__ */
|