Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : /*
3 : * Copyright (C) 2008 Oracle. All rights reserved.
4 : */
5 :
6 : #ifndef BTRFS_LOCKING_H
7 : #define BTRFS_LOCKING_H
8 :
9 : #include <linux/atomic.h>
10 : #include <linux/wait.h>
11 : #include <linux/percpu_counter.h>
12 : #include "extent_io.h"
13 :
14 : #define BTRFS_WRITE_LOCK 1
15 : #define BTRFS_READ_LOCK 2
16 :
17 : /*
18 : * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
19 : * the time of this patch is 8, which is how many we use. Keep this in mind if
20 : * you decide you want to add another subclass.
21 : */
22 : enum btrfs_lock_nesting {
23 : BTRFS_NESTING_NORMAL,
24 :
25 : /*
26 : * When we COW a block we are holding the lock on the original block,
27 : * and since our lockdep maps are rootid+level, this confuses lockdep
28 : * when we lock the newly allocated COW'd block. Handle this by having
29 : * a subclass for COW'ed blocks so that lockdep doesn't complain.
30 : */
31 : BTRFS_NESTING_COW,
32 :
33 : /*
34 : * Oftentimes we need to lock adjacent nodes on the same level while
35 : * still holding the lock on the original node we searched to, such as
36 : * for searching forward or for split/balance.
37 : *
38 : * Because of this we need to indicate to lockdep that this is
39 : * acceptable by having a different subclass for each of these
40 : * operations.
41 : */
42 : BTRFS_NESTING_LEFT,
43 : BTRFS_NESTING_RIGHT,
44 :
45 : /*
46 : * When splitting we will be holding a lock on the left/right node when
47 : * we need to cow that node, thus we need a new set of subclasses for
48 : * these two operations.
49 : */
50 : BTRFS_NESTING_LEFT_COW,
51 : BTRFS_NESTING_RIGHT_COW,
52 :
53 : /*
54 : * When splitting we may push nodes to the left or right, but still use
55 : * the subsequent nodes in our path, keeping our locks on those adjacent
56 : * blocks. Thus when we go to allocate a new split block we've already
57 : * used up all of our available subclasses, so this subclass exists to
58 : * handle this case where we need to allocate a new split block.
59 : */
60 : BTRFS_NESTING_SPLIT,
61 :
62 : /*
63 : * When promoting a new block to a root we need to have a special
64 : * subclass so we don't confuse lockdep, as it will appear that we are
65 : * locking a higher level node before a lower level one. Copying also
66 : * has this problem as it appears we're locking the same block again
67 : * when we make a snapshot of an existing root.
68 : */
69 : BTRFS_NESTING_NEW_ROOT,
70 :
71 : /*
72 : * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
73 : * add this in here and add a static_assert to keep us from going over
74 : * the limit. As of this writing we're limited to 8, and we're
75 : * definitely using 8, hence this check to keep us from messing up in
76 : * the future.
77 : */
78 : BTRFS_NESTING_MAX,
79 : };
80 :
81 : enum btrfs_lockdep_trans_states {
82 : BTRFS_LOCKDEP_TRANS_COMMIT_START,
83 : BTRFS_LOCKDEP_TRANS_UNBLOCKED,
84 : BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED,
85 : BTRFS_LOCKDEP_TRANS_COMPLETED,
86 : };
87 :
88 : /*
89 : * Lockdep annotation for wait events.
90 : *
91 : * @owner: The struct where the lockdep map is defined
92 : * @lock: The lockdep map corresponding to a wait event
93 : *
94 : * This macro is used to annotate a wait event. In this case a thread acquires
95 : * the lockdep map as writer (exclusive lock) because it has to block until all
96 : * the threads that hold the lock as readers signal the condition for the wait
97 : * event and release their locks.
98 : */
99 : #define btrfs_might_wait_for_event(owner, lock) \
100 : do { \
101 : rwsem_acquire(&owner->lock##_map, 0, 0, _THIS_IP_); \
102 : rwsem_release(&owner->lock##_map, _THIS_IP_); \
103 : } while (0)
104 :
105 : /*
106 : * Protection for the resource/condition of a wait event.
107 : *
108 : * @owner: The struct where the lockdep map is defined
109 : * @lock: The lockdep map corresponding to a wait event
110 : *
111 : * Many threads can modify the condition for the wait event at the same time
112 : * and signal the threads that block on the wait event. The threads that modify
113 : * the condition and do the signaling acquire the lock as readers (shared
114 : * lock).
115 : */
116 : #define btrfs_lockdep_acquire(owner, lock) \
117 : rwsem_acquire_read(&owner->lock##_map, 0, 0, _THIS_IP_)
118 :
119 : /*
120 : * Used after signaling the condition for a wait event to release the lockdep
121 : * map held by a reader thread.
122 : */
123 : #define btrfs_lockdep_release(owner, lock) \
124 : rwsem_release(&owner->lock##_map, _THIS_IP_)
125 :
126 : /*
127 : * Macros for the transaction states wait events, similar to the generic wait
128 : * event macros.
129 : */
130 : #define btrfs_might_wait_for_state(owner, i) \
131 : do { \
132 : rwsem_acquire(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_); \
133 : rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_); \
134 : } while (0)
135 :
136 : #define btrfs_trans_state_lockdep_acquire(owner, i) \
137 : rwsem_acquire_read(&owner->btrfs_state_change_map[i], 0, 0, _THIS_IP_)
138 :
139 : #define btrfs_trans_state_lockdep_release(owner, i) \
140 : rwsem_release(&owner->btrfs_state_change_map[i], _THIS_IP_)
141 :
142 : /* Initialization of the lockdep map */
143 : #define btrfs_lockdep_init_map(owner, lock) \
144 : do { \
145 : static struct lock_class_key lock##_key; \
146 : lockdep_init_map(&owner->lock##_map, #lock, &lock##_key, 0); \
147 : } while (0)
148 :
149 : /* Initialization of the transaction states lockdep maps. */
150 : #define btrfs_state_lockdep_init_map(owner, lock, state) \
151 : do { \
152 : static struct lock_class_key lock##_key; \
153 : lockdep_init_map(&owner->btrfs_state_change_map[state], #lock, \
154 : &lock##_key, 0); \
155 : } while (0)
156 :
157 : static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
158 : "too many lock subclasses defined");
159 :
160 : struct btrfs_path;
161 :
162 : void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
163 : void btrfs_tree_lock(struct extent_buffer *eb);
164 : void btrfs_tree_unlock(struct extent_buffer *eb);
165 :
166 : void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
167 : void btrfs_tree_read_lock(struct extent_buffer *eb);
168 : void btrfs_tree_read_unlock(struct extent_buffer *eb);
169 : int btrfs_try_tree_read_lock(struct extent_buffer *eb);
170 : int btrfs_try_tree_write_lock(struct extent_buffer *eb);
171 : struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
172 : struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
173 : struct extent_buffer *btrfs_try_read_lock_root_node(struct btrfs_root *root);
174 :
175 : #ifdef CONFIG_BTRFS_DEBUG
176 : static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
177 : {
178 : lockdep_assert_held_write(&eb->lock);
179 : }
180 : #else
181 : static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
182 : #endif
183 :
184 : void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
185 :
186 830017584 : static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
187 : {
188 830017584 : if (rw == BTRFS_WRITE_LOCK)
189 426335682 : btrfs_tree_unlock(eb);
190 403681902 : else if (rw == BTRFS_READ_LOCK)
191 403681902 : btrfs_tree_read_unlock(eb);
192 : else
193 0 : BUG();
194 829987822 : }
195 :
196 : struct btrfs_drew_lock {
197 : atomic_t readers;
198 : atomic_t writers;
199 : wait_queue_head_t pending_writers;
200 : wait_queue_head_t pending_readers;
201 : };
202 :
203 : void btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
204 : void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
205 : bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
206 : void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
207 : void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
208 : void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
209 :
210 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
211 : void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
212 : void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
213 : #else
214 : static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
215 : struct extent_buffer *eb, int level)
216 : {
217 : }
218 : static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
219 : struct extent_buffer *eb)
220 : {
221 : }
222 : #endif
223 :
224 : #endif
|