Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */ 2 : #ifndef _LINUX_CPUSET_H 3 : #define _LINUX_CPUSET_H 4 : /* 5 : * cpuset interface 6 : * 7 : * Copyright (C) 2003 BULL SA 8 : * Copyright (C) 2004-2006 Silicon Graphics, Inc. 9 : * 10 : */ 11 : 12 : #include <linux/sched.h> 13 : #include <linux/sched/topology.h> 14 : #include <linux/sched/task.h> 15 : #include <linux/cpumask.h> 16 : #include <linux/nodemask.h> 17 : #include <linux/mm.h> 18 : #include <linux/mmu_context.h> 19 : #include <linux/jump_label.h> 20 : 21 : #ifdef CONFIG_CPUSETS 22 : 23 : /* 24 : * Static branch rewrites can happen in an arbitrary order for a given 25 : * key. In code paths where we need to loop with read_mems_allowed_begin() and 26 : * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need 27 : * to ensure that begin() always gets rewritten before retry() in the 28 : * disabled -> enabled transition. If not, then if local irqs are disabled 29 : * around the loop, we can deadlock since retry() would always be 30 : * comparing the latest value of the mems_allowed seqcount against 0 as 31 : * begin() still would see cpusets_enabled() as false. The enabled -> disabled 32 : * transition should happen in reverse order for the same reasons (want to stop 33 : * looking at real value of mems_allowed.sequence in retry() first). 34 : */ 35 : extern struct static_key_false cpusets_pre_enable_key; 36 : extern struct static_key_false cpusets_enabled_key; 37 : extern struct static_key_false cpusets_insane_config_key; 38 : 39 : static inline bool cpusets_enabled(void) 40 : { 41 : return static_branch_unlikely(&cpusets_enabled_key); 42 : } 43 : 44 : static inline void cpuset_inc(void) 45 : { 46 : static_branch_inc_cpuslocked(&cpusets_pre_enable_key); 47 : static_branch_inc_cpuslocked(&cpusets_enabled_key); 48 : } 49 : 50 : static inline void cpuset_dec(void) 51 : { 52 : static_branch_dec_cpuslocked(&cpusets_enabled_key); 53 : static_branch_dec_cpuslocked(&cpusets_pre_enable_key); 54 : } 55 : 56 : /* 57 : * This will get enabled whenever a cpuset configuration is considered 58 : * unsupportable in general. E.g. movable only node which cannot satisfy 59 : * any non movable allocations (see update_nodemask). Page allocator 60 : * needs to make additional checks for those configurations and this 61 : * check is meant to guard those checks without any overhead for sane 62 : * configurations. 63 : */ 64 : static inline bool cpusets_insane_config(void) 65 : { 66 : return static_branch_unlikely(&cpusets_insane_config_key); 67 : } 68 : 69 : extern int cpuset_init(void); 70 : extern void cpuset_init_smp(void); 71 : extern void cpuset_force_rebuild(void); 72 : extern void cpuset_update_active_cpus(void); 73 : extern void cpuset_wait_for_hotplug(void); 74 : extern void inc_dl_tasks_cs(struct task_struct *task); 75 : extern void dec_dl_tasks_cs(struct task_struct *task); 76 : extern void cpuset_lock(void); 77 : extern void cpuset_unlock(void); 78 : extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); 79 : extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); 80 : extern nodemask_t cpuset_mems_allowed(struct task_struct *p); 81 : #define cpuset_current_mems_allowed (current->mems_allowed) 82 : void cpuset_init_current_mems_allowed(void); 83 : int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 84 : 85 : extern bool cpuset_node_allowed(int node, gfp_t gfp_mask); 86 : 87 : static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 88 : { 89 : return cpuset_node_allowed(zone_to_nid(z), gfp_mask); 90 : } 91 : 92 : static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 93 : { 94 : if (cpusets_enabled()) 95 : return __cpuset_zone_allowed(z, gfp_mask); 96 : return true; 97 : } 98 : 99 : extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 100 : const struct task_struct *tsk2); 101 : 102 : #define cpuset_memory_pressure_bump() \ 103 : do { \ 104 : if (cpuset_memory_pressure_enabled) \ 105 : __cpuset_memory_pressure_bump(); \ 106 : } while (0) 107 : extern int cpuset_memory_pressure_enabled; 108 : extern void __cpuset_memory_pressure_bump(void); 109 : 110 : extern void cpuset_task_status_allowed(struct seq_file *m, 111 : struct task_struct *task); 112 : extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 113 : struct pid *pid, struct task_struct *tsk); 114 : 115 : extern int cpuset_mem_spread_node(void); 116 : extern int cpuset_slab_spread_node(void); 117 : 118 5928293176 : static inline int cpuset_do_page_mem_spread(void) 119 : { 120 5928293176 : return task_spread_page(current); 121 : } 122 : 123 : static inline int cpuset_do_slab_mem_spread(void) 124 : { 125 : return task_spread_slab(current); 126 : } 127 : 128 : extern bool current_cpuset_is_being_rebound(void); 129 : 130 : extern void rebuild_sched_domains(void); 131 : 132 : extern void cpuset_print_current_mems_allowed(void); 133 : 134 : /* 135 : * read_mems_allowed_begin is required when making decisions involving 136 : * mems_allowed such as during page allocation. mems_allowed can be updated in 137 : * parallel and depending on the new value an operation can fail potentially 138 : * causing process failure. A retry loop with read_mems_allowed_begin and 139 : * read_mems_allowed_retry prevents these artificial failures. 140 : */ 141 0 : static inline unsigned int read_mems_allowed_begin(void) 142 : { 143 0 : if (!static_branch_unlikely(&cpusets_pre_enable_key)) 144 : return 0; 145 : 146 0 : return read_seqcount_begin(¤t->mems_allowed_seq); 147 : } 148 : 149 : /* 150 : * If this returns true, the operation that took place after 151 : * read_mems_allowed_begin may have failed artificially due to a concurrent 152 : * update of mems_allowed. It is up to the caller to retry the operation if 153 : * appropriate. 154 : */ 155 0 : static inline bool read_mems_allowed_retry(unsigned int seq) 156 : { 157 0 : if (!static_branch_unlikely(&cpusets_enabled_key)) 158 : return false; 159 : 160 0 : return read_seqcount_retry(¤t->mems_allowed_seq, seq); 161 : } 162 : 163 : static inline void set_mems_allowed(nodemask_t nodemask) 164 : { 165 : unsigned long flags; 166 : 167 : task_lock(current); 168 : local_irq_save(flags); 169 : write_seqcount_begin(¤t->mems_allowed_seq); 170 : current->mems_allowed = nodemask; 171 : write_seqcount_end(¤t->mems_allowed_seq); 172 : local_irq_restore(flags); 173 : task_unlock(current); 174 : } 175 : 176 : #else /* !CONFIG_CPUSETS */ 177 : 178 : static inline bool cpusets_enabled(void) { return false; } 179 : 180 : static inline bool cpusets_insane_config(void) { return false; } 181 : 182 : static inline int cpuset_init(void) { return 0; } 183 : static inline void cpuset_init_smp(void) {} 184 : 185 : static inline void cpuset_force_rebuild(void) { } 186 : 187 : static inline void cpuset_update_active_cpus(void) 188 : { 189 : partition_sched_domains(1, NULL, NULL); 190 : } 191 : 192 : static inline void cpuset_wait_for_hotplug(void) { } 193 : 194 : static inline void inc_dl_tasks_cs(struct task_struct *task) { } 195 : static inline void dec_dl_tasks_cs(struct task_struct *task) { } 196 : static inline void cpuset_lock(void) { } 197 : static inline void cpuset_unlock(void) { } 198 : 199 : static inline void cpuset_cpus_allowed(struct task_struct *p, 200 : struct cpumask *mask) 201 : { 202 : cpumask_copy(mask, task_cpu_possible_mask(p)); 203 : } 204 : 205 : static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) 206 : { 207 : return false; 208 : } 209 : 210 : static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 211 : { 212 : return node_possible_map; 213 : } 214 : 215 : #define cpuset_current_mems_allowed (node_states[N_MEMORY]) 216 : static inline void cpuset_init_current_mems_allowed(void) {} 217 : 218 : static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) 219 : { 220 : return 1; 221 : } 222 : 223 : static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 224 : { 225 : return true; 226 : } 227 : 228 : static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) 229 : { 230 : return true; 231 : } 232 : 233 : static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 234 : const struct task_struct *tsk2) 235 : { 236 : return 1; 237 : } 238 : 239 : static inline void cpuset_memory_pressure_bump(void) {} 240 : 241 : static inline void cpuset_task_status_allowed(struct seq_file *m, 242 : struct task_struct *task) 243 : { 244 : } 245 : 246 : static inline int cpuset_mem_spread_node(void) 247 : { 248 : return 0; 249 : } 250 : 251 : static inline int cpuset_slab_spread_node(void) 252 : { 253 : return 0; 254 : } 255 : 256 : static inline int cpuset_do_page_mem_spread(void) 257 : { 258 : return 0; 259 : } 260 : 261 : static inline int cpuset_do_slab_mem_spread(void) 262 : { 263 : return 0; 264 : } 265 : 266 : static inline bool current_cpuset_is_being_rebound(void) 267 : { 268 : return false; 269 : } 270 : 271 : static inline void rebuild_sched_domains(void) 272 : { 273 : partition_sched_domains(1, NULL, NULL); 274 : } 275 : 276 : static inline void cpuset_print_current_mems_allowed(void) 277 : { 278 : } 279 : 280 : static inline void set_mems_allowed(nodemask_t nodemask) 281 : { 282 : } 283 : 284 : static inline unsigned int read_mems_allowed_begin(void) 285 : { 286 : return 0; 287 : } 288 : 289 : static inline bool read_mems_allowed_retry(unsigned int seq) 290 : { 291 : return false; 292 : } 293 : 294 : #endif /* !CONFIG_CPUSETS */ 295 : 296 : #endif /* _LINUX_CPUSET_H */