Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_PERCPU_COUNTER_H
3 : #define _LINUX_PERCPU_COUNTER_H
4 : /*
5 : * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 : *
7 : * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 : */
9 :
10 : #include <linux/spinlock.h>
11 : #include <linux/smp.h>
12 : #include <linux/list.h>
13 : #include <linux/threads.h>
14 : #include <linux/percpu.h>
15 : #include <linux/types.h>
16 :
17 : /* percpu_counter batch for local add or sub */
18 : #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
19 :
20 : #ifdef CONFIG_SMP
21 :
22 : struct percpu_counter {
23 : raw_spinlock_t lock;
24 : s64 count;
25 : #ifdef CONFIG_HOTPLUG_CPU
26 : struct list_head list; /* All percpu_counters are on a list */
27 : #endif
28 : s32 __percpu *counters;
29 : };
30 :
31 : extern int percpu_counter_batch;
32 :
33 : int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
34 : struct lock_class_key *key);
35 :
36 : #define percpu_counter_init(fbc, value, gfp) \
37 : ({ \
38 : static struct lock_class_key __key; \
39 : \
40 : __percpu_counter_init(fbc, value, gfp, &__key); \
41 : })
42 :
43 : void percpu_counter_destroy(struct percpu_counter *fbc);
44 : void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
45 : void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
46 : s32 batch);
47 : s64 __percpu_counter_sum(struct percpu_counter *fbc);
48 : int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
49 : void percpu_counter_sync(struct percpu_counter *fbc);
50 :
51 : static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
52 : {
53 1880096 : return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
54 : }
55 :
56 : static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
57 : {
58 5133402392 : percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
59 217738498 : }
60 :
61 : /*
62 : * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
63 : * are accumulated in local per cpu counter and not in fbc->count until
64 : * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
65 : * write efficient.
66 : * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
67 : * used to add up the counts from each CPU to account for all the local
68 : * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
69 : * should be used when a counter is updated frequently and read rarely.
70 : */
71 : static inline void
72 : percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
73 : {
74 : percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
75 : }
76 :
77 : static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
78 : {
79 16829298 : s64 ret = __percpu_counter_sum(fbc);
80 16836714 : return ret < 0 ? 0 : ret;
81 : }
82 :
83 : static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
84 : {
85 27615328 : return __percpu_counter_sum(fbc);
86 : }
87 :
88 : static inline s64 percpu_counter_read(struct percpu_counter *fbc)
89 : {
90 1060729 : return fbc->count;
91 : }
92 :
93 : /*
94 : * It is possible for the percpu_counter_read() to return a small negative
95 : * number for some counter which should never be negative.
96 : *
97 : */
98 : static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
99 : {
100 : /* Prevent reloads of fbc->count */
101 2078643517 : s64 ret = READ_ONCE(fbc->count);
102 :
103 1951023402 : if (ret >= 0)
104 : return ret;
105 : return 0;
106 : }
107 :
108 : static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
109 : {
110 2174216 : return (fbc->counters != NULL);
111 : }
112 :
113 : #else /* !CONFIG_SMP */
114 :
115 : struct percpu_counter {
116 : s64 count;
117 : };
118 :
119 : static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
120 : gfp_t gfp)
121 : {
122 : fbc->count = amount;
123 : return 0;
124 : }
125 :
126 : static inline void percpu_counter_destroy(struct percpu_counter *fbc)
127 : {
128 : }
129 :
130 : static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
131 : {
132 : fbc->count = amount;
133 : }
134 :
135 : static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
136 : {
137 : if (fbc->count > rhs)
138 : return 1;
139 : else if (fbc->count < rhs)
140 : return -1;
141 : else
142 : return 0;
143 : }
144 :
145 : static inline int
146 : __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
147 : {
148 : return percpu_counter_compare(fbc, rhs);
149 : }
150 :
151 : static inline void
152 : percpu_counter_add(struct percpu_counter *fbc, s64 amount)
153 : {
154 : unsigned long flags;
155 :
156 : local_irq_save(flags);
157 : fbc->count += amount;
158 : local_irq_restore(flags);
159 : }
160 :
161 : /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
162 : static inline void
163 : percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
164 : {
165 : percpu_counter_add(fbc, amount);
166 : }
167 :
168 : static inline void
169 : percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
170 : {
171 : percpu_counter_add(fbc, amount);
172 : }
173 :
174 : static inline s64 percpu_counter_read(struct percpu_counter *fbc)
175 : {
176 : return fbc->count;
177 : }
178 :
179 : /*
180 : * percpu_counter is intended to track positive numbers. In the UP case the
181 : * number should never be negative.
182 : */
183 : static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
184 : {
185 : return fbc->count;
186 : }
187 :
188 : static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
189 : {
190 : return percpu_counter_read_positive(fbc);
191 : }
192 :
193 : static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
194 : {
195 : return percpu_counter_read(fbc);
196 : }
197 :
198 : static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
199 : {
200 : return true;
201 : }
202 :
203 : static inline void percpu_counter_sync(struct percpu_counter *fbc)
204 : {
205 : }
206 : #endif /* CONFIG_SMP */
207 :
208 : static inline void percpu_counter_inc(struct percpu_counter *fbc)
209 : {
210 1848932095 : percpu_counter_add(fbc, 1);
211 131372373 : }
212 :
213 : static inline void percpu_counter_dec(struct percpu_counter *fbc)
214 : {
215 1730111610 : percpu_counter_add(fbc, -1);
216 1710680554 : }
217 :
218 : static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
219 : {
220 21945635 : percpu_counter_add(fbc, -amount);
221 2307386 : }
222 :
223 : static inline void
224 : percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
225 : {
226 : percpu_counter_add_local(fbc, -amount);
227 : }
228 :
229 : #endif /* _LINUX_PERCPU_COUNTER_H */
|