Line data Source code
1 : /*
2 : * include/linux/topology.h
3 : *
4 : * Written by: Matthew Dobson, IBM Corporation
5 : *
6 : * Copyright (C) 2002, IBM Corp.
7 : *
8 : * All rights reserved.
9 : *
10 : * This program is free software; you can redistribute it and/or modify
11 : * it under the terms of the GNU General Public License as published by
12 : * the Free Software Foundation; either version 2 of the License, or
13 : * (at your option) any later version.
14 : *
15 : * This program is distributed in the hope that it will be useful, but
16 : * WITHOUT ANY WARRANTY; without even the implied warranty of
17 : * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
18 : * NON INFRINGEMENT. See the GNU General Public License for more
19 : * details.
20 : *
21 : * You should have received a copy of the GNU General Public License
22 : * along with this program; if not, write to the Free Software
23 : * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 : *
25 : * Send feedback to <colpatch@us.ibm.com>
26 : */
27 : #ifndef _LINUX_TOPOLOGY_H
28 : #define _LINUX_TOPOLOGY_H
29 :
30 : #include <linux/arch_topology.h>
31 : #include <linux/cpumask.h>
32 : #include <linux/bitops.h>
33 : #include <linux/mmzone.h>
34 : #include <linux/smp.h>
35 : #include <linux/percpu.h>
36 : #include <asm/topology.h>
37 :
38 : #ifndef nr_cpus_node
39 : #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
40 : #endif
41 :
42 : #define for_each_node_with_cpus(node) \
43 : for_each_online_node(node) \
44 : if (nr_cpus_node(node))
45 :
46 : int arch_update_cpu_topology(void);
47 :
48 : /* Conform to ACPI 2.0 SLIT distance definitions */
49 : #define LOCAL_DISTANCE 10
50 : #define REMOTE_DISTANCE 20
51 : #define DISTANCE_BITS 8
52 : #ifndef node_distance
53 : #define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
54 : #endif
55 : #ifndef RECLAIM_DISTANCE
56 : /*
57 : * If the distance between nodes in a system is larger than RECLAIM_DISTANCE
58 : * (in whatever arch specific measurement units returned by node_distance())
59 : * and node_reclaim_mode is enabled then the VM will only call node_reclaim()
60 : * on nodes within this distance.
61 : */
62 : #define RECLAIM_DISTANCE 30
63 : #endif
64 :
65 : /*
66 : * The following tunable allows platforms to override the default node
67 : * reclaim distance (RECLAIM_DISTANCE) if remote memory accesses are
68 : * sufficiently fast that the default value actually hurts
69 : * performance.
70 : *
71 : * AMD EPYC machines use this because even though the 2-hop distance
72 : * is 32 (3.2x slower than a local memory access) performance actually
73 : * *improves* if allowed to reclaim memory and load balance tasks
74 : * between NUMA nodes 2-hops apart.
75 : */
76 : extern int __read_mostly node_reclaim_distance;
77 :
78 : #ifndef PENALTY_FOR_NODE_WITH_CPUS
79 : #define PENALTY_FOR_NODE_WITH_CPUS (1)
80 : #endif
81 :
82 : #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
83 : DECLARE_PER_CPU(int, numa_node);
84 :
85 : #ifndef numa_node_id
86 : /* Returns the number of the current Node. */
87 : static inline int numa_node_id(void)
88 : {
89 76596821 : return raw_cpu_read(numa_node);
90 : }
91 : #endif
92 :
93 : #ifndef cpu_to_node
94 : static inline int cpu_to_node(int cpu)
95 : {
96 : return per_cpu(numa_node, cpu);
97 : }
98 : #endif
99 :
100 : #ifndef set_numa_node
101 : static inline void set_numa_node(int node)
102 : {
103 : this_cpu_write(numa_node, node);
104 : }
105 : #endif
106 :
107 : #ifndef set_cpu_numa_node
108 : static inline void set_cpu_numa_node(int cpu, int node)
109 : {
110 : per_cpu(numa_node, cpu) = node;
111 : }
112 : #endif
113 :
114 : #else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
115 :
116 : /* Returns the number of the current Node. */
117 : #ifndef numa_node_id
118 : static inline int numa_node_id(void)
119 : {
120 : return cpu_to_node(raw_smp_processor_id());
121 : }
122 : #endif
123 :
124 : #endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
125 :
126 : #ifdef CONFIG_HAVE_MEMORYLESS_NODES
127 :
128 : /*
129 : * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
130 : * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
131 : * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
132 : */
133 : DECLARE_PER_CPU(int, _numa_mem_);
134 :
135 : #ifndef set_numa_mem
136 : static inline void set_numa_mem(int node)
137 : {
138 : this_cpu_write(_numa_mem_, node);
139 : }
140 : #endif
141 :
142 : #ifndef numa_mem_id
143 : /* Returns the number of the nearest Node with memory */
144 : static inline int numa_mem_id(void)
145 : {
146 : return raw_cpu_read(_numa_mem_);
147 : }
148 : #endif
149 :
150 : #ifndef cpu_to_mem
151 : static inline int cpu_to_mem(int cpu)
152 : {
153 : return per_cpu(_numa_mem_, cpu);
154 : }
155 : #endif
156 :
157 : #ifndef set_cpu_numa_mem
158 : static inline void set_cpu_numa_mem(int cpu, int node)
159 : {
160 : per_cpu(_numa_mem_, cpu) = node;
161 : }
162 : #endif
163 :
164 : #else /* !CONFIG_HAVE_MEMORYLESS_NODES */
165 :
166 : #ifndef numa_mem_id
167 : /* Returns the number of the nearest Node with memory */
168 : static inline int numa_mem_id(void)
169 : {
170 76596821 : return numa_node_id();
171 : }
172 : #endif
173 :
174 : #ifndef cpu_to_mem
175 : static inline int cpu_to_mem(int cpu)
176 : {
177 : return cpu_to_node(cpu);
178 : }
179 : #endif
180 :
181 : #endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
182 :
183 : #if defined(topology_die_id) && defined(topology_die_cpumask)
184 : #define TOPOLOGY_DIE_SYSFS
185 : #endif
186 : #if defined(topology_cluster_id) && defined(topology_cluster_cpumask)
187 : #define TOPOLOGY_CLUSTER_SYSFS
188 : #endif
189 : #if defined(topology_book_id) && defined(topology_book_cpumask)
190 : #define TOPOLOGY_BOOK_SYSFS
191 : #endif
192 : #if defined(topology_drawer_id) && defined(topology_drawer_cpumask)
193 : #define TOPOLOGY_DRAWER_SYSFS
194 : #endif
195 :
196 : #ifndef topology_physical_package_id
197 : #define topology_physical_package_id(cpu) ((void)(cpu), -1)
198 : #endif
199 : #ifndef topology_die_id
200 : #define topology_die_id(cpu) ((void)(cpu), -1)
201 : #endif
202 : #ifndef topology_cluster_id
203 : #define topology_cluster_id(cpu) ((void)(cpu), -1)
204 : #endif
205 : #ifndef topology_core_id
206 : #define topology_core_id(cpu) ((void)(cpu), 0)
207 : #endif
208 : #ifndef topology_book_id
209 : #define topology_book_id(cpu) ((void)(cpu), -1)
210 : #endif
211 : #ifndef topology_drawer_id
212 : #define topology_drawer_id(cpu) ((void)(cpu), -1)
213 : #endif
214 : #ifndef topology_ppin
215 : #define topology_ppin(cpu) ((void)(cpu), 0ull)
216 : #endif
217 : #ifndef topology_sibling_cpumask
218 : #define topology_sibling_cpumask(cpu) cpumask_of(cpu)
219 : #endif
220 : #ifndef topology_core_cpumask
221 : #define topology_core_cpumask(cpu) cpumask_of(cpu)
222 : #endif
223 : #ifndef topology_cluster_cpumask
224 : #define topology_cluster_cpumask(cpu) cpumask_of(cpu)
225 : #endif
226 : #ifndef topology_die_cpumask
227 : #define topology_die_cpumask(cpu) cpumask_of(cpu)
228 : #endif
229 : #ifndef topology_book_cpumask
230 : #define topology_book_cpumask(cpu) cpumask_of(cpu)
231 : #endif
232 : #ifndef topology_drawer_cpumask
233 : #define topology_drawer_cpumask(cpu) cpumask_of(cpu)
234 : #endif
235 :
236 : #if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask)
237 : static inline const struct cpumask *cpu_smt_mask(int cpu)
238 : {
239 : return topology_sibling_cpumask(cpu);
240 : }
241 : #endif
242 :
243 : static inline const struct cpumask *cpu_cpu_mask(int cpu)
244 : {
245 : return cpumask_of_node(cpu_to_node(cpu));
246 : }
247 :
248 : #ifdef CONFIG_NUMA
249 : int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node);
250 : extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int hops);
251 : #else
252 : static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
253 : {
254 : return cpumask_nth(cpu, cpus);
255 : }
256 :
257 : static inline const struct cpumask *
258 : sched_numa_hop_mask(unsigned int node, unsigned int hops)
259 : {
260 : return ERR_PTR(-EOPNOTSUPP);
261 : }
262 : #endif /* CONFIG_NUMA */
263 :
264 : /**
265 : * for_each_numa_hop_mask - iterate over cpumasks of increasing NUMA distance
266 : * from a given node.
267 : * @mask: the iteration variable.
268 : * @node: the NUMA node to start the search from.
269 : *
270 : * Requires rcu_lock to be held.
271 : *
272 : * Yields cpu_online_mask for @node == NUMA_NO_NODE.
273 : */
274 : #define for_each_numa_hop_mask(mask, node) \
275 : for (unsigned int __hops = 0; \
276 : mask = (node != NUMA_NO_NODE || __hops) ? \
277 : sched_numa_hop_mask(node, __hops) : \
278 : cpu_online_mask, \
279 : !IS_ERR_OR_NULL(mask); \
280 : __hops++)
281 :
282 : #endif /* _LINUX_TOPOLOGY_H */
|