Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CPUSET_H
3#define _LINUX_CPUSET_H
4/*
5 * cpuset interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/sched/topology.h>
14#include <linux/sched/task.h>
15#include <linux/cpumask.h>
16#include <linux/nodemask.h>
17#include <linux/mm.h>
18#include <linux/mmu_context.h>
19#include <linux/jump_label.h>
20
21extern bool lockdep_is_cpuset_held(void);
22
23#ifdef CONFIG_CPUSETS
24
25/*
26 * Static branch rewrites can happen in an arbitrary order for a given
27 * key. In code paths where we need to loop with read_mems_allowed_begin() and
28 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
29 * to ensure that begin() always gets rewritten before retry() in the
30 * disabled -> enabled transition. If not, then if local irqs are disabled
31 * around the loop, we can deadlock since retry() would always be
32 * comparing the latest value of the mems_allowed seqcount against 0 as
33 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
34 * transition should happen in reverse order for the same reasons (want to stop
35 * looking at real value of mems_allowed.sequence in retry() first).
36 */
37extern struct static_key_false cpusets_pre_enable_key;
38extern struct static_key_false cpusets_enabled_key;
39extern struct static_key_false cpusets_insane_config_key;
40
41static inline bool cpusets_enabled(void)
42{
43 return static_branch_unlikely(&cpusets_enabled_key);
44}
45
46static inline void cpuset_inc(void)
47{
48 static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
49 static_branch_inc_cpuslocked(&cpusets_enabled_key);
50}
51
52static inline void cpuset_dec(void)
53{
54 static_branch_dec_cpuslocked(&cpusets_enabled_key);
55 static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
56}
57
58/*
59 * This will get enabled whenever a cpuset configuration is considered
60 * unsupportable in general. E.g. movable only node which cannot satisfy
61 * any non movable allocations (see update_nodemask). Page allocator
62 * needs to make additional checks for those configurations and this
63 * check is meant to guard those checks without any overhead for sane
64 * configurations.
65 */
66static inline bool cpusets_insane_config(void)
67{
68 return static_branch_unlikely(&cpusets_insane_config_key);
69}
70
71extern int cpuset_init(void);
72extern void cpuset_init_smp(void);
73extern void cpuset_force_rebuild(void);
74extern void cpuset_update_active_cpus(void);
75extern void inc_dl_tasks_cs(struct task_struct *task);
76extern void dec_dl_tasks_cs(struct task_struct *task);
77extern void cpuset_lock(void);
78extern void cpuset_unlock(void);
79extern void lockdep_assert_cpuset_lock_held(void);
80extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
81extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
82extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
83extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
84#define cpuset_current_mems_allowed (current->mems_allowed)
85void cpuset_init_current_mems_allowed(void);
86int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
87
88extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask);
89
90static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
91{
92 return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask);
93}
94
95static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
96{
97 if (cpusets_enabled())
98 return __cpuset_zone_allowed(z, gfp_mask);
99 return true;
100}
101
102extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
103 const struct task_struct *tsk2);
104
105#ifdef CONFIG_CPUSETS_V1
106#define cpuset_memory_pressure_bump() \
107 do { \
108 if (cpuset_memory_pressure_enabled) \
109 __cpuset_memory_pressure_bump(); \
110 } while (0)
111extern int cpuset_memory_pressure_enabled;
112extern void __cpuset_memory_pressure_bump(void);
113#else
114static inline void cpuset_memory_pressure_bump(void) { }
115#endif
116
117extern void cpuset_task_status_allowed(struct seq_file *m,
118 struct task_struct *task);
119extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
120 struct pid *pid, struct task_struct *tsk);
121
122extern int cpuset_mem_spread_node(void);
123
124static inline int cpuset_do_page_mem_spread(void)
125{
126 return task_spread_page(current);
127}
128
129extern bool current_cpuset_is_being_rebound(void);
130
131extern void dl_rebuild_rd_accounting(void);
132extern void rebuild_sched_domains(void);
133
134extern void cpuset_print_current_mems_allowed(void);
135extern void cpuset_reset_sched_domains(void);
136
137/*
138 * read_mems_allowed_begin is required when making decisions involving
139 * mems_allowed such as during page allocation. mems_allowed can be updated in
140 * parallel and depending on the new value an operation can fail potentially
141 * causing process failure. A retry loop with read_mems_allowed_begin and
142 * read_mems_allowed_retry prevents these artificial failures.
143 */
144static inline unsigned int read_mems_allowed_begin(void)
145{
146 if (!static_branch_unlikely(&cpusets_pre_enable_key))
147 return 0;
148
149 return read_seqcount_begin(¤t->mems_allowed_seq);
150}
151
152/*
153 * If this returns true, the operation that took place after
154 * read_mems_allowed_begin may have failed artificially due to a concurrent
155 * update of mems_allowed. It is up to the caller to retry the operation if
156 * appropriate.
157 */
158static inline bool read_mems_allowed_retry(unsigned int seq)
159{
160 if (!static_branch_unlikely(&cpusets_enabled_key))
161 return false;
162
163 return read_seqcount_retry(¤t->mems_allowed_seq, seq);
164}
165
166static inline void set_mems_allowed(nodemask_t nodemask)
167{
168 unsigned long flags;
169
170 task_lock(current);
171 local_irq_save(flags);
172 write_seqcount_begin(¤t->mems_allowed_seq);
173 current->mems_allowed = nodemask;
174 write_seqcount_end(¤t->mems_allowed_seq);
175 local_irq_restore(flags);
176 task_unlock(current);
177}
178
179extern void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask);
180#else /* !CONFIG_CPUSETS */
181
182static inline bool cpusets_enabled(void) { return false; }
183
184static inline bool cpusets_insane_config(void) { return false; }
185
186static inline int cpuset_init(void) { return 0; }
187static inline void cpuset_init_smp(void) {}
188
189static inline void cpuset_force_rebuild(void) { }
190
191static inline void cpuset_update_active_cpus(void)
192{
193 partition_sched_domains(1, NULL, NULL);
194}
195
196static inline void inc_dl_tasks_cs(struct task_struct *task) { }
197static inline void dec_dl_tasks_cs(struct task_struct *task) { }
198static inline void cpuset_lock(void) { }
199static inline void cpuset_unlock(void) { }
200static inline void lockdep_assert_cpuset_lock_held(void) { }
201
202static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
203 struct cpumask *mask)
204{
205 cpumask_copy(mask, task_cpu_possible_mask(p));
206}
207
208static inline void cpuset_cpus_allowed(struct task_struct *p,
209 struct cpumask *mask)
210{
211 cpuset_cpus_allowed_locked(p, mask);
212}
213
214static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
215{
216 return false;
217}
218
219static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
220{
221 return node_possible_map;
222}
223
224#define cpuset_current_mems_allowed (node_states[N_MEMORY])
225static inline void cpuset_init_current_mems_allowed(void) {}
226
227static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
228{
229 return 1;
230}
231
232static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
233{
234 return true;
235}
236
237static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
238{
239 return true;
240}
241
242static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
243 const struct task_struct *tsk2)
244{
245 return 1;
246}
247
248static inline void cpuset_memory_pressure_bump(void) {}
249
250static inline void cpuset_task_status_allowed(struct seq_file *m,
251 struct task_struct *task)
252{
253}
254
255static inline int cpuset_mem_spread_node(void)
256{
257 return 0;
258}
259
260static inline int cpuset_do_page_mem_spread(void)
261{
262 return 0;
263}
264
265static inline bool current_cpuset_is_being_rebound(void)
266{
267 return false;
268}
269
270static inline void dl_rebuild_rd_accounting(void)
271{
272}
273
274static inline void rebuild_sched_domains(void)
275{
276 partition_sched_domains(1, NULL, NULL);
277}
278
279static inline void cpuset_reset_sched_domains(void)
280{
281 partition_sched_domains(1, NULL, NULL);
282}
283
284static inline void cpuset_print_current_mems_allowed(void)
285{
286}
287
288static inline void set_mems_allowed(nodemask_t nodemask)
289{
290}
291
292static inline unsigned int read_mems_allowed_begin(void)
293{
294 return 0;
295}
296
297static inline bool read_mems_allowed_retry(unsigned int seq)
298{
299 return false;
300}
301
302static inline void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask)
303{
304 nodes_copy(*mask, node_states[N_MEMORY]);
305}
306#endif /* !CONFIG_CPUSETS */
307
308#endif /* _LINUX_CPUSET_H */