Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0+ */
2 : /*
3 : * Read-Copy Update mechanism for mutual exclusion
4 : *
5 : * Copyright IBM Corporation, 2001
6 : *
7 : * Author: Dipankar Sarma <dipankar@in.ibm.com>
8 : *
9 : * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
10 : * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
11 : * Papers:
12 : * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
13 : * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
14 : *
15 : * For detailed explanation of Read-Copy Update mechanism see -
16 : * http://lse.sourceforge.net/locking/rcupdate.html
17 : *
18 : */
19 :
20 : #ifndef __LINUX_RCUPDATE_H
21 : #define __LINUX_RCUPDATE_H
22 :
23 : #include <linux/types.h>
24 : #include <linux/compiler.h>
25 : #include <linux/atomic.h>
26 : #include <linux/irqflags.h>
27 : #include <linux/preempt.h>
28 : #include <linux/bottom_half.h>
29 : #include <linux/lockdep.h>
30 : #include <linux/cleanup.h>
31 : #include <asm/processor.h>
32 : #include <linux/cpumask.h>
33 : #include <linux/context_tracking_irq.h>
34 :
35 : #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
36 : #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
37 : #define ulong2long(a) (*(long *)(&(a)))
38 : #define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
39 : #define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
40 :
41 : /* Exported common interfaces */
42 : void call_rcu(struct rcu_head *head, rcu_callback_t func);
43 : void rcu_barrier_tasks(void);
44 : void rcu_barrier_tasks_rude(void);
45 : void synchronize_rcu(void);
46 :
47 : struct rcu_gp_oldstate;
48 : unsigned long get_completed_synchronize_rcu(void);
49 : void get_completed_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp);
50 :
51 : // Maximum number of unsigned long values corresponding to
52 : // not-yet-completed RCU grace periods.
53 : #define NUM_ACTIVE_RCU_POLL_OLDSTATE 2
54 :
55 : /**
56 : * same_state_synchronize_rcu - Are two old-state values identical?
57 : * @oldstate1: First old-state value.
58 : * @oldstate2: Second old-state value.
59 : *
60 : * The two old-state values must have been obtained from either
61 : * get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or
62 : * get_completed_synchronize_rcu(). Returns @true if the two values are
63 : * identical and @false otherwise. This allows structures whose lifetimes
64 : * are tracked by old-state values to push these values to a list header,
65 : * allowing those structures to be slightly smaller.
66 : */
67 : static inline bool same_state_synchronize_rcu(unsigned long oldstate1, unsigned long oldstate2)
68 : {
69 : return oldstate1 == oldstate2;
70 : }
71 :
72 : #ifdef CONFIG_PREEMPT_RCU
73 :
74 : void __rcu_read_lock(void);
75 : void __rcu_read_unlock(void);
76 :
77 : /*
78 : * Defined as a macro as it is a very low level header included from
79 : * areas that don't even know about current. This gives the rcu_read_lock()
80 : * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
81 : * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
82 : */
83 : #define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting)
84 :
85 : #else /* #ifdef CONFIG_PREEMPT_RCU */
86 :
87 : #ifdef CONFIG_TINY_RCU
88 : #define rcu_read_unlock_strict() do { } while (0)
89 : #else
90 : void rcu_read_unlock_strict(void);
91 : #endif
92 :
93 : static inline void __rcu_read_lock(void)
94 : {
95 : preempt_disable();
96 : }
97 :
98 : static inline void __rcu_read_unlock(void)
99 : {
100 : preempt_enable();
101 : if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
102 : rcu_read_unlock_strict();
103 : }
104 :
105 : static inline int rcu_preempt_depth(void)
106 : {
107 : return 0;
108 : }
109 :
110 : #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
111 :
112 : #ifdef CONFIG_RCU_LAZY
113 : void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func);
114 : #else
115 : static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
116 : {
117 : call_rcu(head, func);
118 : }
119 : #endif
120 :
121 : /* Internal to kernel */
122 : void rcu_init(void);
123 : extern int rcu_scheduler_active;
124 : void rcu_sched_clock_irq(int user);
125 : void rcu_report_dead(unsigned int cpu);
126 : void rcutree_migrate_callbacks(int cpu);
127 :
128 : #ifdef CONFIG_TASKS_RCU_GENERIC
129 : void rcu_init_tasks_generic(void);
130 : #else
131 : static inline void rcu_init_tasks_generic(void) { }
132 : #endif
133 :
134 : #ifdef CONFIG_RCU_STALL_COMMON
135 : void rcu_sysrq_start(void);
136 : void rcu_sysrq_end(void);
137 : #else /* #ifdef CONFIG_RCU_STALL_COMMON */
138 : static inline void rcu_sysrq_start(void) { }
139 : static inline void rcu_sysrq_end(void) { }
140 : #endif /* #else #ifdef CONFIG_RCU_STALL_COMMON */
141 :
142 : #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK))
143 : void rcu_irq_work_resched(void);
144 : #else
145 : static inline void rcu_irq_work_resched(void) { }
146 : #endif
147 :
148 : #ifdef CONFIG_RCU_NOCB_CPU
149 : void rcu_init_nohz(void);
150 : int rcu_nocb_cpu_offload(int cpu);
151 : int rcu_nocb_cpu_deoffload(int cpu);
152 : void rcu_nocb_flush_deferred_wakeup(void);
153 : #else /* #ifdef CONFIG_RCU_NOCB_CPU */
154 : static inline void rcu_init_nohz(void) { }
155 : static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; }
156 : static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; }
157 : static inline void rcu_nocb_flush_deferred_wakeup(void) { }
158 : #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
159 :
160 : /*
161 : * Note a quasi-voluntary context switch for RCU-tasks's benefit.
162 : * This is a macro rather than an inline function to avoid #include hell.
163 : */
164 : #ifdef CONFIG_TASKS_RCU_GENERIC
165 :
166 : # ifdef CONFIG_TASKS_RCU
167 : # define rcu_tasks_classic_qs(t, preempt) \
168 : do { \
169 : if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
170 : WRITE_ONCE((t)->rcu_tasks_holdout, false); \
171 : } while (0)
172 : void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
173 : void synchronize_rcu_tasks(void);
174 : # else
175 : # define rcu_tasks_classic_qs(t, preempt) do { } while (0)
176 : # define call_rcu_tasks call_rcu
177 : # define synchronize_rcu_tasks synchronize_rcu
178 : # endif
179 :
180 : # ifdef CONFIG_TASKS_TRACE_RCU
181 : // Bits for ->trc_reader_special.b.need_qs field.
182 : #define TRC_NEED_QS 0x1 // Task needs a quiescent state.
183 : #define TRC_NEED_QS_CHECKED 0x2 // Task has been checked for needing quiescent state.
184 :
185 : u8 rcu_trc_cmpxchg_need_qs(struct task_struct *t, u8 old, u8 new);
186 : void rcu_tasks_trace_qs_blkd(struct task_struct *t);
187 :
188 : # define rcu_tasks_trace_qs(t) \
189 : do { \
190 : int ___rttq_nesting = READ_ONCE((t)->trc_reader_nesting); \
191 : \
192 : if (likely(!READ_ONCE((t)->trc_reader_special.b.need_qs)) && \
193 : likely(!___rttq_nesting)) { \
194 : rcu_trc_cmpxchg_need_qs((t), 0, TRC_NEED_QS_CHECKED); \
195 : } else if (___rttq_nesting && ___rttq_nesting != INT_MIN && \
196 : !READ_ONCE((t)->trc_reader_special.b.blocked)) { \
197 : rcu_tasks_trace_qs_blkd(t); \
198 : } \
199 : } while (0)
200 : # else
201 : # define rcu_tasks_trace_qs(t) do { } while (0)
202 : # endif
203 :
204 : #define rcu_tasks_qs(t, preempt) \
205 : do { \
206 : rcu_tasks_classic_qs((t), (preempt)); \
207 : rcu_tasks_trace_qs(t); \
208 : } while (0)
209 :
210 : # ifdef CONFIG_TASKS_RUDE_RCU
211 : void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
212 : void synchronize_rcu_tasks_rude(void);
213 : # endif
214 :
215 : #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
216 : void exit_tasks_rcu_start(void);
217 : void exit_tasks_rcu_stop(void);
218 : void exit_tasks_rcu_finish(void);
219 : #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
220 : #define rcu_tasks_classic_qs(t, preempt) do { } while (0)
221 : #define rcu_tasks_qs(t, preempt) do { } while (0)
222 : #define rcu_note_voluntary_context_switch(t) do { } while (0)
223 : #define call_rcu_tasks call_rcu
224 : #define synchronize_rcu_tasks synchronize_rcu
225 : static inline void exit_tasks_rcu_start(void) { }
226 : static inline void exit_tasks_rcu_stop(void) { }
227 : static inline void exit_tasks_rcu_finish(void) { }
228 : #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
229 :
230 : /**
231 : * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
232 : *
233 : * As an accident of implementation, an RCU Tasks Trace grace period also
234 : * acts as an RCU grace period. However, this could change at any time.
235 : * Code relying on this accident must call this function to verify that
236 : * this accident is still happening.
237 : *
238 : * You have been warned!
239 : */
240 : static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
241 :
242 : /**
243 : * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
244 : *
245 : * This macro resembles cond_resched(), except that it is defined to
246 : * report potential quiescent states to RCU-tasks even if the cond_resched()
247 : * machinery were to be shut off, as some advocate for PREEMPTION kernels.
248 : */
249 : #define cond_resched_tasks_rcu_qs() \
250 : do { \
251 : rcu_tasks_qs(current, false); \
252 : cond_resched(); \
253 : } while (0)
254 :
255 : /*
256 : * Infrastructure to implement the synchronize_() primitives in
257 : * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
258 : */
259 :
260 : #if defined(CONFIG_TREE_RCU)
261 : #include <linux/rcutree.h>
262 : #elif defined(CONFIG_TINY_RCU)
263 : #include <linux/rcutiny.h>
264 : #else
265 : #error "Unknown RCU implementation specified to kernel configuration"
266 : #endif
267 :
268 : /*
269 : * The init_rcu_head_on_stack() and destroy_rcu_head_on_stack() calls
270 : * are needed for dynamic initialization and destruction of rcu_head
271 : * on the stack, and init_rcu_head()/destroy_rcu_head() are needed for
272 : * dynamic initialization and destruction of statically allocated rcu_head
273 : * structures. However, rcu_head structures allocated dynamically in the
274 : * heap don't need any initialization.
275 : */
276 : #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
277 : void init_rcu_head(struct rcu_head *head);
278 : void destroy_rcu_head(struct rcu_head *head);
279 : void init_rcu_head_on_stack(struct rcu_head *head);
280 : void destroy_rcu_head_on_stack(struct rcu_head *head);
281 : #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
282 : static inline void init_rcu_head(struct rcu_head *head) { }
283 : static inline void destroy_rcu_head(struct rcu_head *head) { }
284 : static inline void init_rcu_head_on_stack(struct rcu_head *head) { }
285 : static inline void destroy_rcu_head_on_stack(struct rcu_head *head) { }
286 : #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
287 :
288 : #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
289 : bool rcu_lockdep_current_cpu_online(void);
290 : #else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
291 : static inline bool rcu_lockdep_current_cpu_online(void) { return true; }
292 : #endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
293 :
294 : extern struct lockdep_map rcu_lock_map;
295 : extern struct lockdep_map rcu_bh_lock_map;
296 : extern struct lockdep_map rcu_sched_lock_map;
297 : extern struct lockdep_map rcu_callback_map;
298 :
299 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
300 :
301 : static inline void rcu_lock_acquire(struct lockdep_map *map)
302 : {
303 : lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_);
304 : }
305 :
306 : static inline void rcu_lock_release(struct lockdep_map *map)
307 : {
308 : lock_release(map, _THIS_IP_);
309 : }
310 :
311 : int debug_lockdep_rcu_enabled(void);
312 : int rcu_read_lock_held(void);
313 : int rcu_read_lock_bh_held(void);
314 : int rcu_read_lock_sched_held(void);
315 : int rcu_read_lock_any_held(void);
316 :
317 : #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
318 :
319 : # define rcu_lock_acquire(a) do { } while (0)
320 : # define rcu_lock_release(a) do { } while (0)
321 :
322 : static inline int rcu_read_lock_held(void)
323 : {
324 : return 1;
325 : }
326 :
327 : static inline int rcu_read_lock_bh_held(void)
328 : {
329 : return 1;
330 : }
331 :
332 : static inline int rcu_read_lock_sched_held(void)
333 : {
334 : return !preemptible();
335 : }
336 :
337 : static inline int rcu_read_lock_any_held(void)
338 : {
339 : return !preemptible();
340 : }
341 :
342 : static inline int debug_lockdep_rcu_enabled(void)
343 : {
344 : return 0;
345 : }
346 :
347 : #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
348 :
349 : #ifdef CONFIG_PROVE_RCU
350 :
351 : /**
352 : * RCU_LOCKDEP_WARN - emit lockdep splat if specified condition is met
353 : * @c: condition to check
354 : * @s: informative message
355 : *
356 : * This checks debug_lockdep_rcu_enabled() before checking (c) to
357 : * prevent early boot splats due to lockdep not yet being initialized,
358 : * and rechecks it after checking (c) to prevent false-positive splats
359 : * due to races with lockdep being disabled. See commit 3066820034b5dd
360 : * ("rcu: Reject RCU_LOCKDEP_WARN() false positives") for more detail.
361 : */
362 : #define RCU_LOCKDEP_WARN(c, s) \
363 : do { \
364 : static bool __section(".data.unlikely") __warned; \
365 : if (debug_lockdep_rcu_enabled() && (c) && \
366 : debug_lockdep_rcu_enabled() && !__warned) { \
367 : __warned = true; \
368 : lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
369 : } \
370 : } while (0)
371 :
372 : #if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
373 : static inline void rcu_preempt_sleep_check(void)
374 : {
375 : RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map),
376 : "Illegal context switch in RCU read-side critical section");
377 : }
378 : #else /* #ifdef CONFIG_PROVE_RCU */
379 : static inline void rcu_preempt_sleep_check(void) { }
380 : #endif /* #else #ifdef CONFIG_PROVE_RCU */
381 :
382 : #define rcu_sleep_check() \
383 : do { \
384 : rcu_preempt_sleep_check(); \
385 : if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
386 : RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
387 : "Illegal context switch in RCU-bh read-side critical section"); \
388 : RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
389 : "Illegal context switch in RCU-sched read-side critical section"); \
390 : } while (0)
391 :
392 : #else /* #ifdef CONFIG_PROVE_RCU */
393 :
394 : #define RCU_LOCKDEP_WARN(c, s) do { } while (0 && (c))
395 : #define rcu_sleep_check() do { } while (0)
396 :
397 : #endif /* #else #ifdef CONFIG_PROVE_RCU */
398 :
399 : /*
400 : * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
401 : * and rcu_assign_pointer(). Some of these could be folded into their
402 : * callers, but they are left separate in order to ease introduction of
403 : * multiple pointers markings to match different RCU implementations
404 : * (e.g., __srcu), should this make sense in the future.
405 : */
406 :
407 : #ifdef __CHECKER__
408 : #define rcu_check_sparse(p, space) \
409 : ((void)(((typeof(*p) space *)p) == p))
410 : #else /* #ifdef __CHECKER__ */
411 : #define rcu_check_sparse(p, space)
412 : #endif /* #else #ifdef __CHECKER__ */
413 :
414 : #define __unrcu_pointer(p, local) \
415 : ({ \
416 : typeof(*p) *local = (typeof(*p) *__force)(p); \
417 : rcu_check_sparse(p, __rcu); \
418 : ((typeof(*p) __force __kernel *)(local)); \
419 : })
420 : /**
421 : * unrcu_pointer - mark a pointer as not being RCU protected
422 : * @p: pointer needing to lose its __rcu property
423 : *
424 : * Converts @p from an __rcu pointer to a __kernel pointer.
425 : * This allows an __rcu pointer to be used with xchg() and friends.
426 : */
427 : #define unrcu_pointer(p) __unrcu_pointer(p, __UNIQUE_ID(rcu))
428 :
429 : #define __rcu_access_pointer(p, local, space) \
430 : ({ \
431 : typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
432 : rcu_check_sparse(p, space); \
433 : ((typeof(*p) __force __kernel *)(local)); \
434 : })
435 : #define __rcu_dereference_check(p, local, c, space) \
436 : ({ \
437 : /* Dependency order vs. p above. */ \
438 : typeof(*p) *local = (typeof(*p) *__force)READ_ONCE(p); \
439 : RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
440 : rcu_check_sparse(p, space); \
441 : ((typeof(*p) __force __kernel *)(local)); \
442 : })
443 : #define __rcu_dereference_protected(p, local, c, space) \
444 : ({ \
445 : RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
446 : rcu_check_sparse(p, space); \
447 : ((typeof(*p) __force __kernel *)(p)); \
448 : })
449 : #define __rcu_dereference_raw(p, local) \
450 : ({ \
451 : /* Dependency order vs. p above. */ \
452 : typeof(p) local = READ_ONCE(p); \
453 : ((typeof(*p) __force __kernel *)(local)); \
454 : })
455 : #define rcu_dereference_raw(p) __rcu_dereference_raw(p, __UNIQUE_ID(rcu))
456 :
457 : /**
458 : * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
459 : * @v: The value to statically initialize with.
460 : */
461 : #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
462 :
463 : /**
464 : * rcu_assign_pointer() - assign to RCU-protected pointer
465 : * @p: pointer to assign to
466 : * @v: value to assign (publish)
467 : *
468 : * Assigns the specified value to the specified RCU-protected
469 : * pointer, ensuring that any concurrent RCU readers will see
470 : * any prior initialization.
471 : *
472 : * Inserts memory barriers on architectures that require them
473 : * (which is most of them), and also prevents the compiler from
474 : * reordering the code that initializes the structure after the pointer
475 : * assignment. More importantly, this call documents which pointers
476 : * will be dereferenced by RCU read-side code.
477 : *
478 : * In some special cases, you may use RCU_INIT_POINTER() instead
479 : * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
480 : * to the fact that it does not constrain either the CPU or the compiler.
481 : * That said, using RCU_INIT_POINTER() when you should have used
482 : * rcu_assign_pointer() is a very bad thing that results in
483 : * impossible-to-diagnose memory corruption. So please be careful.
484 : * See the RCU_INIT_POINTER() comment header for details.
485 : *
486 : * Note that rcu_assign_pointer() evaluates each of its arguments only
487 : * once, appearances notwithstanding. One of the "extra" evaluations
488 : * is in typeof() and the other visible only to sparse (__CHECKER__),
489 : * neither of which actually execute the argument. As with most cpp
490 : * macros, this execute-arguments-only-once property is important, so
491 : * please be careful when making changes to rcu_assign_pointer() and the
492 : * other macros that it invokes.
493 : */
494 : #define rcu_assign_pointer(p, v) \
495 : do { \
496 : uintptr_t _r_a_p__v = (uintptr_t)(v); \
497 : rcu_check_sparse(p, __rcu); \
498 : \
499 : if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
500 : WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
501 : else \
502 : smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
503 : } while (0)
504 :
505 : /**
506 : * rcu_replace_pointer() - replace an RCU pointer, returning its old value
507 : * @rcu_ptr: RCU pointer, whose old value is returned
508 : * @ptr: regular pointer
509 : * @c: the lockdep conditions under which the dereference will take place
510 : *
511 : * Perform a replacement, where @rcu_ptr is an RCU-annotated
512 : * pointer and @c is the lockdep argument that is passed to the
513 : * rcu_dereference_protected() call used to read that pointer. The old
514 : * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
515 : */
516 : #define rcu_replace_pointer(rcu_ptr, ptr, c) \
517 : ({ \
518 : typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
519 : rcu_assign_pointer((rcu_ptr), (ptr)); \
520 : __tmp; \
521 : })
522 :
523 : /**
524 : * rcu_access_pointer() - fetch RCU pointer with no dereferencing
525 : * @p: The pointer to read
526 : *
527 : * Return the value of the specified RCU-protected pointer, but omit the
528 : * lockdep checks for being in an RCU read-side critical section. This is
529 : * useful when the value of this pointer is accessed, but the pointer is
530 : * not dereferenced, for example, when testing an RCU-protected pointer
531 : * against NULL. Although rcu_access_pointer() may also be used in cases
532 : * where update-side locks prevent the value of the pointer from changing,
533 : * you should instead use rcu_dereference_protected() for this use case.
534 : * Within an RCU read-side critical section, there is little reason to
535 : * use rcu_access_pointer().
536 : *
537 : * It is usually best to test the rcu_access_pointer() return value
538 : * directly in order to avoid accidental dereferences being introduced
539 : * by later inattentive changes. In other words, assigning the
540 : * rcu_access_pointer() return value to a local variable results in an
541 : * accident waiting to happen.
542 : *
543 : * It is also permissible to use rcu_access_pointer() when read-side
544 : * access to the pointer was removed at least one grace period ago, as is
545 : * the case in the context of the RCU callback that is freeing up the data,
546 : * or after a synchronize_rcu() returns. This can be useful when tearing
547 : * down multi-linked structures after a grace period has elapsed. However,
548 : * rcu_dereference_protected() is normally preferred for this use case.
549 : */
550 : #define rcu_access_pointer(p) __rcu_access_pointer((p), __UNIQUE_ID(rcu), __rcu)
551 :
552 : /**
553 : * rcu_dereference_check() - rcu_dereference with debug checking
554 : * @p: The pointer to read, prior to dereferencing
555 : * @c: The conditions under which the dereference will take place
556 : *
557 : * Do an rcu_dereference(), but check that the conditions under which the
558 : * dereference will take place are correct. Typically the conditions
559 : * indicate the various locking conditions that should be held at that
560 : * point. The check should return true if the conditions are satisfied.
561 : * An implicit check for being in an RCU read-side critical section
562 : * (rcu_read_lock()) is included.
563 : *
564 : * For example:
565 : *
566 : * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
567 : *
568 : * could be used to indicate to lockdep that foo->bar may only be dereferenced
569 : * if either rcu_read_lock() is held, or that the lock required to replace
570 : * the bar struct at foo->bar is held.
571 : *
572 : * Note that the list of conditions may also include indications of when a lock
573 : * need not be held, for example during initialisation or destruction of the
574 : * target struct:
575 : *
576 : * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
577 : * atomic_read(&foo->usage) == 0);
578 : *
579 : * Inserts memory barriers on architectures that require them
580 : * (currently only the Alpha), prevents the compiler from refetching
581 : * (and from merging fetches), and, more importantly, documents exactly
582 : * which pointers are protected by RCU and checks that the pointer is
583 : * annotated as __rcu.
584 : */
585 : #define rcu_dereference_check(p, c) \
586 : __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
587 : (c) || rcu_read_lock_held(), __rcu)
588 :
589 : /**
590 : * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
591 : * @p: The pointer to read, prior to dereferencing
592 : * @c: The conditions under which the dereference will take place
593 : *
594 : * This is the RCU-bh counterpart to rcu_dereference_check(). However,
595 : * please note that starting in v5.0 kernels, vanilla RCU grace periods
596 : * wait for local_bh_disable() regions of code in addition to regions of
597 : * code demarked by rcu_read_lock() and rcu_read_unlock(). This means
598 : * that synchronize_rcu(), call_rcu, and friends all take not only
599 : * rcu_read_lock() but also rcu_read_lock_bh() into account.
600 : */
601 : #define rcu_dereference_bh_check(p, c) \
602 : __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
603 : (c) || rcu_read_lock_bh_held(), __rcu)
604 :
605 : /**
606 : * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
607 : * @p: The pointer to read, prior to dereferencing
608 : * @c: The conditions under which the dereference will take place
609 : *
610 : * This is the RCU-sched counterpart to rcu_dereference_check().
611 : * However, please note that starting in v5.0 kernels, vanilla RCU grace
612 : * periods wait for preempt_disable() regions of code in addition to
613 : * regions of code demarked by rcu_read_lock() and rcu_read_unlock().
614 : * This means that synchronize_rcu(), call_rcu, and friends all take not
615 : * only rcu_read_lock() but also rcu_read_lock_sched() into account.
616 : */
617 : #define rcu_dereference_sched_check(p, c) \
618 : __rcu_dereference_check((p), __UNIQUE_ID(rcu), \
619 : (c) || rcu_read_lock_sched_held(), \
620 : __rcu)
621 :
622 : /*
623 : * The tracing infrastructure traces RCU (we want that), but unfortunately
624 : * some of the RCU checks causes tracing to lock up the system.
625 : *
626 : * The no-tracing version of rcu_dereference_raw() must not call
627 : * rcu_read_lock_held().
628 : */
629 : #define rcu_dereference_raw_check(p) \
630 : __rcu_dereference_check((p), __UNIQUE_ID(rcu), 1, __rcu)
631 :
632 : /**
633 : * rcu_dereference_protected() - fetch RCU pointer when updates prevented
634 : * @p: The pointer to read, prior to dereferencing
635 : * @c: The conditions under which the dereference will take place
636 : *
637 : * Return the value of the specified RCU-protected pointer, but omit
638 : * the READ_ONCE(). This is useful in cases where update-side locks
639 : * prevent the value of the pointer from changing. Please note that this
640 : * primitive does *not* prevent the compiler from repeating this reference
641 : * or combining it with other references, so it should not be used without
642 : * protection of appropriate locks.
643 : *
644 : * This function is only for update-side use. Using this function
645 : * when protected only by rcu_read_lock() will result in infrequent
646 : * but very ugly failures.
647 : */
648 : #define rcu_dereference_protected(p, c) \
649 : __rcu_dereference_protected((p), __UNIQUE_ID(rcu), (c), __rcu)
650 :
651 :
652 : /**
653 : * rcu_dereference() - fetch RCU-protected pointer for dereferencing
654 : * @p: The pointer to read, prior to dereferencing
655 : *
656 : * This is a simple wrapper around rcu_dereference_check().
657 : */
658 : #define rcu_dereference(p) rcu_dereference_check(p, 0)
659 :
660 : /**
661 : * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
662 : * @p: The pointer to read, prior to dereferencing
663 : *
664 : * Makes rcu_dereference_check() do the dirty work.
665 : */
666 : #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
667 :
668 : /**
669 : * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
670 : * @p: The pointer to read, prior to dereferencing
671 : *
672 : * Makes rcu_dereference_check() do the dirty work.
673 : */
674 : #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
675 :
676 : /**
677 : * rcu_pointer_handoff() - Hand off a pointer from RCU to other mechanism
678 : * @p: The pointer to hand off
679 : *
680 : * This is simply an identity function, but it documents where a pointer
681 : * is handed off from RCU to some other synchronization mechanism, for
682 : * example, reference counting or locking. In C11, it would map to
683 : * kill_dependency(). It could be used as follows::
684 : *
685 : * rcu_read_lock();
686 : * p = rcu_dereference(gp);
687 : * long_lived = is_long_lived(p);
688 : * if (long_lived) {
689 : * if (!atomic_inc_not_zero(p->refcnt))
690 : * long_lived = false;
691 : * else
692 : * p = rcu_pointer_handoff(p);
693 : * }
694 : * rcu_read_unlock();
695 : */
696 : #define rcu_pointer_handoff(p) (p)
697 :
698 : /**
699 : * rcu_read_lock() - mark the beginning of an RCU read-side critical section
700 : *
701 : * When synchronize_rcu() is invoked on one CPU while other CPUs
702 : * are within RCU read-side critical sections, then the
703 : * synchronize_rcu() is guaranteed to block until after all the other
704 : * CPUs exit their critical sections. Similarly, if call_rcu() is invoked
705 : * on one CPU while other CPUs are within RCU read-side critical
706 : * sections, invocation of the corresponding RCU callback is deferred
707 : * until after the all the other CPUs exit their critical sections.
708 : *
709 : * In v5.0 and later kernels, synchronize_rcu() and call_rcu() also
710 : * wait for regions of code with preemption disabled, including regions of
711 : * code with interrupts or softirqs disabled. In pre-v5.0 kernels, which
712 : * define synchronize_sched(), only code enclosed within rcu_read_lock()
713 : * and rcu_read_unlock() are guaranteed to be waited for.
714 : *
715 : * Note, however, that RCU callbacks are permitted to run concurrently
716 : * with new RCU read-side critical sections. One way that this can happen
717 : * is via the following sequence of events: (1) CPU 0 enters an RCU
718 : * read-side critical section, (2) CPU 1 invokes call_rcu() to register
719 : * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
720 : * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
721 : * callback is invoked. This is legal, because the RCU read-side critical
722 : * section that was running concurrently with the call_rcu() (and which
723 : * therefore might be referencing something that the corresponding RCU
724 : * callback would free up) has completed before the corresponding
725 : * RCU callback is invoked.
726 : *
727 : * RCU read-side critical sections may be nested. Any deferred actions
728 : * will be deferred until the outermost RCU read-side critical section
729 : * completes.
730 : *
731 : * You can avoid reading and understanding the next paragraph by
732 : * following this rule: don't put anything in an rcu_read_lock() RCU
733 : * read-side critical section that would block in a !PREEMPTION kernel.
734 : * But if you want the full story, read on!
735 : *
736 : * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
737 : * it is illegal to block while in an RCU read-side critical section.
738 : * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
739 : * kernel builds, RCU read-side critical sections may be preempted,
740 : * but explicit blocking is illegal. Finally, in preemptible RCU
741 : * implementations in real-time (with -rt patchset) kernel builds, RCU
742 : * read-side critical sections may be preempted and they may also block, but
743 : * only when acquiring spinlocks that are subject to priority inheritance.
744 : */
745 : static __always_inline void rcu_read_lock(void)
746 : {
747 >24333*10^7 : __rcu_read_lock();
748 >27730*10^7 : __acquire(RCU);
749 >27730*10^7 : rcu_lock_acquire(&rcu_lock_map);
750 >27730*10^7 : RCU_LOCKDEP_WARN(!rcu_is_watching(),
751 : "rcu_read_lock() used illegally while idle");
752 39168522373 : }
753 :
754 : /*
755 : * So where is rcu_write_lock()? It does not exist, as there is no
756 : * way for writers to lock out RCU readers. This is a feature, not
757 : * a bug -- this property is what provides RCU's performance benefits.
758 : * Of course, writers must coordinate with each other. The normal
759 : * spinlock primitives work well for this, but any other technique may be
760 : * used as well. RCU does not care how the writers keep out of each
761 : * others' way, as long as they do so.
762 : */
763 :
764 : /**
765 : * rcu_read_unlock() - marks the end of an RCU read-side critical section.
766 : *
767 : * In almost all situations, rcu_read_unlock() is immune from deadlock.
768 : * In recent kernels that have consolidated synchronize_sched() and
769 : * synchronize_rcu_bh() into synchronize_rcu(), this deadlock immunity
770 : * also extends to the scheduler's runqueue and priority-inheritance
771 : * spinlocks, courtesy of the quiescent-state deferral that is carried
772 : * out when rcu_read_unlock() is invoked with interrupts disabled.
773 : *
774 : * See rcu_read_lock() for more information.
775 : */
776 : static inline void rcu_read_unlock(void)
777 : {
778 >27752*10^7 : RCU_LOCKDEP_WARN(!rcu_is_watching(),
779 : "rcu_read_unlock() used illegally while idle");
780 >27752*10^7 : __release(RCU);
781 >25938*10^7 : __rcu_read_unlock();
782 >13764*10^7 : rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
783 1164995104 : }
784 :
785 : /**
786 : * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
787 : *
788 : * This is equivalent to rcu_read_lock(), but also disables softirqs.
789 : * Note that anything else that disables softirqs can also serve as an RCU
790 : * read-side critical section. However, please note that this equivalence
791 : * applies only to v5.0 and later. Before v5.0, rcu_read_lock() and
792 : * rcu_read_lock_bh() were unrelated.
793 : *
794 : * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
795 : * must occur in the same context, for example, it is illegal to invoke
796 : * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
797 : * was invoked from some other task.
798 : */
799 : static inline void rcu_read_lock_bh(void)
800 : {
801 : local_bh_disable();
802 : __acquire(RCU_BH);
803 : rcu_lock_acquire(&rcu_bh_lock_map);
804 : RCU_LOCKDEP_WARN(!rcu_is_watching(),
805 : "rcu_read_lock_bh() used illegally while idle");
806 : }
807 :
808 : /**
809 : * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
810 : *
811 : * See rcu_read_lock_bh() for more information.
812 : */
813 : static inline void rcu_read_unlock_bh(void)
814 : {
815 : RCU_LOCKDEP_WARN(!rcu_is_watching(),
816 : "rcu_read_unlock_bh() used illegally while idle");
817 : rcu_lock_release(&rcu_bh_lock_map);
818 : __release(RCU_BH);
819 : local_bh_enable();
820 : }
821 :
822 : /**
823 : * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
824 : *
825 : * This is equivalent to rcu_read_lock(), but also disables preemption.
826 : * Read-side critical sections can also be introduced by anything else that
827 : * disables preemption, including local_irq_disable() and friends. However,
828 : * please note that the equivalence to rcu_read_lock() applies only to
829 : * v5.0 and later. Before v5.0, rcu_read_lock() and rcu_read_lock_sched()
830 : * were unrelated.
831 : *
832 : * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
833 : * must occur in the same context, for example, it is illegal to invoke
834 : * rcu_read_unlock_sched() from process context if the matching
835 : * rcu_read_lock_sched() was invoked from an NMI handler.
836 : */
837 : static inline void rcu_read_lock_sched(void)
838 : {
839 678545308 : preempt_disable();
840 678574373 : __acquire(RCU_SCHED);
841 678574373 : rcu_lock_acquire(&rcu_sched_lock_map);
842 678574373 : RCU_LOCKDEP_WARN(!rcu_is_watching(),
843 : "rcu_read_lock_sched() used illegally while idle");
844 : }
845 :
846 : /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
847 : static inline notrace void rcu_read_lock_sched_notrace(void)
848 : {
849 : preempt_disable_notrace();
850 : __acquire(RCU_SCHED);
851 : }
852 :
853 : /**
854 : * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
855 : *
856 : * See rcu_read_lock_sched() for more information.
857 : */
858 678588486 : static inline void rcu_read_unlock_sched(void)
859 : {
860 678588486 : RCU_LOCKDEP_WARN(!rcu_is_watching(),
861 : "rcu_read_unlock_sched() used illegally while idle");
862 678588486 : rcu_lock_release(&rcu_sched_lock_map);
863 678588486 : __release(RCU_SCHED);
864 678588486 : preempt_enable();
865 678585629 : }
866 :
867 : /* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
868 : static inline notrace void rcu_read_unlock_sched_notrace(void)
869 : {
870 : __release(RCU_SCHED);
871 : preempt_enable_notrace();
872 : }
873 :
874 : /**
875 : * RCU_INIT_POINTER() - initialize an RCU protected pointer
876 : * @p: The pointer to be initialized.
877 : * @v: The value to initialized the pointer to.
878 : *
879 : * Initialize an RCU-protected pointer in special cases where readers
880 : * do not need ordering constraints on the CPU or the compiler. These
881 : * special cases are:
882 : *
883 : * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or*
884 : * 2. The caller has taken whatever steps are required to prevent
885 : * RCU readers from concurrently accessing this pointer *or*
886 : * 3. The referenced data structure has already been exposed to
887 : * readers either at compile time or via rcu_assign_pointer() *and*
888 : *
889 : * a. You have not made *any* reader-visible changes to
890 : * this structure since then *or*
891 : * b. It is OK for readers accessing this structure from its
892 : * new location to see the old state of the structure. (For
893 : * example, the changes were to statistical counters or to
894 : * other state where exact synchronization is not required.)
895 : *
896 : * Failure to follow these rules governing use of RCU_INIT_POINTER() will
897 : * result in impossible-to-diagnose memory corruption. As in the structures
898 : * will look OK in crash dumps, but any concurrent RCU readers might
899 : * see pre-initialized values of the referenced data structure. So
900 : * please be very careful how you use RCU_INIT_POINTER()!!!
901 : *
902 : * If you are creating an RCU-protected linked structure that is accessed
903 : * by a single external-to-structure RCU-protected pointer, then you may
904 : * use RCU_INIT_POINTER() to initialize the internal RCU-protected
905 : * pointers, but you must use rcu_assign_pointer() to initialize the
906 : * external-to-structure pointer *after* you have completely initialized
907 : * the reader-accessible portions of the linked structure.
908 : *
909 : * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no
910 : * ordering guarantees for either the CPU or the compiler.
911 : */
912 : #define RCU_INIT_POINTER(p, v) \
913 : do { \
914 : rcu_check_sparse(p, __rcu); \
915 : WRITE_ONCE(p, RCU_INITIALIZER(v)); \
916 : } while (0)
917 :
918 : /**
919 : * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
920 : * @p: The pointer to be initialized.
921 : * @v: The value to initialized the pointer to.
922 : *
923 : * GCC-style initialization for an RCU-protected pointer in a structure field.
924 : */
925 : #define RCU_POINTER_INITIALIZER(p, v) \
926 : .p = RCU_INITIALIZER(v)
927 :
928 : /*
929 : * Does the specified offset indicate that the corresponding rcu_head
930 : * structure can be handled by kvfree_rcu()?
931 : */
932 : #define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
933 :
934 : /**
935 : * kfree_rcu() - kfree an object after a grace period.
936 : * @ptr: pointer to kfree for double-argument invocations.
937 : * @rhf: the name of the struct rcu_head within the type of @ptr.
938 : *
939 : * Many rcu callbacks functions just call kfree() on the base structure.
940 : * These functions are trivial, but their size adds up, and furthermore
941 : * when they are used in a kernel module, that module must invoke the
942 : * high-latency rcu_barrier() function at module-unload time.
943 : *
944 : * The kfree_rcu() function handles this issue. Rather than encoding a
945 : * function address in the embedded rcu_head structure, kfree_rcu() instead
946 : * encodes the offset of the rcu_head structure within the base structure.
947 : * Because the functions are not allowed in the low-order 4096 bytes of
948 : * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
949 : * If the offset is larger than 4095 bytes, a compile-time error will
950 : * be generated in kvfree_rcu_arg_2(). If this error is triggered, you can
951 : * either fall back to use of call_rcu() or rearrange the structure to
952 : * position the rcu_head structure into the first 4096 bytes.
953 : *
954 : * The object to be freed can be allocated either by kmalloc() or
955 : * kmem_cache_alloc().
956 : *
957 : * Note that the allowable offset might decrease in the future.
958 : *
959 : * The BUILD_BUG_ON check must not involve any function calls, hence the
960 : * checks are done in macros here.
961 : */
962 : #define kfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
963 : #define kvfree_rcu(ptr, rhf) kvfree_rcu_arg_2(ptr, rhf)
964 :
965 : /**
966 : * kfree_rcu_mightsleep() - kfree an object after a grace period.
967 : * @ptr: pointer to kfree for single-argument invocations.
968 : *
969 : * When it comes to head-less variant, only one argument
970 : * is passed and that is just a pointer which has to be
971 : * freed after a grace period. Therefore the semantic is
972 : *
973 : * kfree_rcu_mightsleep(ptr);
974 : *
975 : * where @ptr is the pointer to be freed by kvfree().
976 : *
977 : * Please note, head-less way of freeing is permitted to
978 : * use from a context that has to follow might_sleep()
979 : * annotation. Otherwise, please switch and embed the
980 : * rcu_head structure within the type of @ptr.
981 : */
982 : #define kfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
983 : #define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
984 :
985 : #define kvfree_rcu_arg_2(ptr, rhf) \
986 : do { \
987 : typeof (ptr) ___p = (ptr); \
988 : \
989 : if (___p) { \
990 : BUILD_BUG_ON(!__is_kvfree_rcu_offset(offsetof(typeof(*(ptr)), rhf))); \
991 : kvfree_call_rcu(&((___p)->rhf), (void *) (___p)); \
992 : } \
993 : } while (0)
994 :
995 : #define kvfree_rcu_arg_1(ptr) \
996 : do { \
997 : typeof(ptr) ___p = (ptr); \
998 : \
999 : if (___p) \
1000 : kvfree_call_rcu(NULL, (void *) (___p)); \
1001 : } while (0)
1002 :
1003 : /*
1004 : * Place this after a lock-acquisition primitive to guarantee that
1005 : * an UNLOCK+LOCK pair acts as a full barrier. This guarantee applies
1006 : * if the UNLOCK and LOCK are executed by the same CPU or if the
1007 : * UNLOCK and LOCK operate on the same lock variable.
1008 : */
1009 : #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE
1010 : #define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
1011 : #else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
1012 : #define smp_mb__after_unlock_lock() do { } while (0)
1013 : #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
1014 :
1015 :
1016 : /* Has the specified rcu_head structure been handed to call_rcu()? */
1017 :
1018 : /**
1019 : * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
1020 : * @rhp: The rcu_head structure to initialize.
1021 : *
1022 : * If you intend to invoke rcu_head_after_call_rcu() to test whether a
1023 : * given rcu_head structure has already been passed to call_rcu(), then
1024 : * you must also invoke this rcu_head_init() function on it just after
1025 : * allocating that structure. Calls to this function must not race with
1026 : * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
1027 : */
1028 : static inline void rcu_head_init(struct rcu_head *rhp)
1029 : {
1030 : rhp->func = (rcu_callback_t)~0L;
1031 : }
1032 :
1033 : /**
1034 : * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
1035 : * @rhp: The rcu_head structure to test.
1036 : * @f: The function passed to call_rcu() along with @rhp.
1037 : *
1038 : * Returns @true if the @rhp has been passed to call_rcu() with @func,
1039 : * and @false otherwise. Emits a warning in any other case, including
1040 : * the case where @rhp has already been invoked after a grace period.
1041 : * Calls to this function must not race with callback invocation. One way
1042 : * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
1043 : * in an RCU read-side critical section that includes a read-side fetch
1044 : * of the pointer to the structure containing @rhp.
1045 : */
1046 : static inline bool
1047 : rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
1048 : {
1049 : rcu_callback_t func = READ_ONCE(rhp->func);
1050 :
1051 : if (func == f)
1052 : return true;
1053 : WARN_ON_ONCE(func != (rcu_callback_t)~0L);
1054 : return false;
1055 : }
1056 :
1057 : /* kernel/ksysfs.c definitions */
1058 : extern int rcu_expedited;
1059 : extern int rcu_normal;
1060 :
1061 : DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock())
1062 :
1063 : #endif /* __LINUX_RCUPDATE_H */
|