Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_SMP_H
3#define __LINUX_SMP_H
4
5/*
6 * Generic SMP support
7 * Alan Cox. <alan@redhat.com>
8 */
9
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/list.h>
13#include <linux/cpumask.h>
14#include <linux/init.h>
15#include <linux/smp_types.h>
16
17typedef void (*smp_call_func_t)(void *info);
18typedef bool (*smp_cond_func_t)(int cpu, void *info);
19
20/*
21 * structure shares (partial) layout with struct irq_work
22 */
23struct __call_single_data {
24 struct __call_single_node node;
25 smp_call_func_t func;
26 void *info;
27};
28
29#define CSD_INIT(_func, _info) \
30 (struct __call_single_data){ .func = (_func), .info = (_info), }
31
32/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
33typedef struct __call_single_data call_single_data_t
34 __aligned(sizeof(struct __call_single_data));
35
36#define INIT_CSD(_csd, _func, _info) \
37do { \
38 *(_csd) = CSD_INIT((_func), (_info)); \
39} while (0)
40
41/*
42 * Enqueue a llist_node on the call_single_queue; be very careful, read
43 * flush_smp_call_function_queue() in detail.
44 */
45extern void __smp_call_single_queue(int cpu, struct llist_node *node);
46
47/* total number of cpus in this system (may exceed NR_CPUS) */
48extern unsigned int total_cpus;
49
50int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
51 int wait);
52
53void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
54 void *info, bool wait, const struct cpumask *mask);
55
56int smp_call_function_single_async(int cpu, call_single_data_t *csd);
57
58/*
59 * Cpus stopping functions in panic. All have default weak definitions.
60 * Architecture-dependent code may override them.
61 */
62void __noreturn panic_smp_self_stop(void);
63void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
64void crash_smp_send_stop(void);
65int panic_smp_redirect_cpu(int target_cpu, void *msg);
66
67/*
68 * Call a function on all processors
69 */
70static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
71{
72 on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
73}
74
75/**
76 * on_each_cpu_mask() - Run a function on processors specified by
77 * cpumask, which may include the local processor.
78 * @mask: The set of cpus to run on (only runs on online subset).
79 * @func: The function to run. This must be fast and non-blocking.
80 * @info: An arbitrary pointer to pass to the function.
81 * @wait: If true, wait (atomically) until function has completed
82 * on other CPUs.
83 *
84 * If @wait is true, then returns once @func has returned.
85 *
86 * You must not call this function with disabled interrupts or from a
87 * hardware interrupt handler or from a bottom half handler. The
88 * exception is that it may be used during early boot while
89 * early_boot_irqs_disabled is set.
90 */
91static inline void on_each_cpu_mask(const struct cpumask *mask,
92 smp_call_func_t func, void *info, bool wait)
93{
94 on_each_cpu_cond_mask(NULL, func, info, wait, mask);
95}
96
97/*
98 * Call a function on each processor for which the supplied function
99 * cond_func returns a positive value. This may include the local
100 * processor. May be used during early boot while early_boot_irqs_disabled is
101 * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
102 */
103static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
104 smp_call_func_t func, void *info, bool wait)
105{
106 on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
107}
108
109/*
110 * Architecture specific boot CPU setup. Defined as empty weak function in
111 * init/main.c. Architectures can override it.
112 */
113void __init smp_prepare_boot_cpu(void);
114
115#ifdef CONFIG_SMP
116
117#include <linux/preempt.h>
118#include <linux/compiler.h>
119#include <linux/thread_info.h>
120#include <asm/smp.h>
121
122/*
123 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
124 * (defined in asm header):
125 */
126
127/*
128 * stops all CPUs but the current one:
129 */
130extern void smp_send_stop(void);
131
132/*
133 * sends a 'reschedule' event to another CPU:
134 */
135extern void arch_smp_send_reschedule(int cpu);
136/*
137 * scheduler_ipi() is inline so can't be passed as callback reason, but the
138 * callsite IP should be sufficient for root-causing IPIs sent from here.
139 */
140#define smp_send_reschedule(cpu) ({ \
141 trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
142 arch_smp_send_reschedule(cpu); \
143})
144
145/*
146 * Prepare machine for booting other CPUs.
147 */
148extern void smp_prepare_cpus(unsigned int max_cpus);
149
150/*
151 * Bring a CPU up
152 */
153extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
154
155/*
156 * Final polishing of CPUs
157 */
158extern void smp_cpus_done(unsigned int max_cpus);
159
160/*
161 * Call a function on all other processors
162 */
163void smp_call_function(smp_call_func_t func, void *info, int wait);
164void smp_call_function_many(const struct cpumask *mask,
165 smp_call_func_t func, void *info, bool wait);
166
167int smp_call_function_any(const struct cpumask *mask,
168 smp_call_func_t func, void *info, int wait);
169
170void kick_all_cpus_sync(void);
171void wake_up_all_idle_cpus(void);
172bool cpus_peek_for_pending_ipi(const struct cpumask *mask);
173
174/*
175 * Generic and arch helpers
176 */
177void __init call_function_init(void);
178void generic_smp_call_function_single_interrupt(void);
179#define generic_smp_call_function_interrupt \
180 generic_smp_call_function_single_interrupt
181
182extern unsigned int setup_max_cpus;
183extern void __init setup_nr_cpu_ids(void);
184extern void __init smp_init(void);
185
186extern int __boot_cpu_id;
187
188static inline int get_boot_cpu_id(void)
189{
190 return __boot_cpu_id;
191}
192
193#else /* !SMP */
194
195static inline void smp_send_stop(void) { }
196
197/*
198 * These macros fold the SMP functionality into a single CPU system
199 */
200#define raw_smp_processor_id() 0
201static inline void up_smp_call_function(smp_call_func_t func, void *info)
202{
203}
204#define smp_call_function(func, info, wait) \
205 (up_smp_call_function(func, info))
206
207static inline void smp_send_reschedule(int cpu) { }
208#define smp_call_function_many(mask, func, info, wait) \
209 (up_smp_call_function(func, info))
210static inline void call_function_init(void) { }
211
212static inline int
213smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
214 void *info, int wait)
215{
216 return smp_call_function_single(0, func, info, wait);
217}
218
219static inline void kick_all_cpus_sync(void) { }
220static inline void wake_up_all_idle_cpus(void) { }
221static inline bool cpus_peek_for_pending_ipi(const struct cpumask *mask)
222{
223 return false;
224}
225
226#define setup_max_cpus 0
227
228#ifdef CONFIG_UP_LATE_INIT
229extern void __init up_late_init(void);
230static __always_inline void smp_init(void) { up_late_init(); }
231#else
232static inline void smp_init(void) { }
233#endif
234
235static inline int get_boot_cpu_id(void)
236{
237 return 0;
238}
239
240#endif /* !SMP */
241
242/*
243 * raw_smp_processor_id() - get the current (unstable) CPU id
244 *
245 * raw_smp_processor_id() is arch-specific/arch-defined and
246 * may be a macro or a static inline function.
247 *
248 * For when you know what you are doing and need an unstable
249 * CPU id.
250 */
251
252/*
253 * Allow the architecture to differentiate between a stable and unstable read.
254 * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
255 * regular asm read for the stable.
256 */
257#ifndef __smp_processor_id
258#define __smp_processor_id() raw_smp_processor_id()
259#endif
260
261#ifdef CONFIG_DEBUG_PREEMPT
262 extern unsigned int debug_smp_processor_id(void);
263# define smp_processor_id() debug_smp_processor_id()
264
265#else
266/**
267 * smp_processor_id() - get the current (stable) CPU id
268 *
269 * This is the normal accessor to the CPU id and should be used
270 * whenever possible.
271 *
272 * The CPU id is stable when:
273 *
274 * - IRQs are disabled;
275 * - preemption is disabled;
276 * - the task is CPU affine.
277 *
278 * When CONFIG_DEBUG_PREEMPT=y, we verify these assumptions and WARN
279 * when smp_processor_id() is used when the CPU id is not stable.
280 */
281
282# define smp_processor_id() __smp_processor_id()
283#endif
284
285#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
286#define put_cpu() preempt_enable()
287
288/*
289 * Callback to arch code if there's nosmp or maxcpus=0 on the
290 * boot command line:
291 */
292extern void arch_disable_smp_support(void);
293
294extern void arch_thaw_secondary_cpus_begin(void);
295extern void arch_thaw_secondary_cpus_end(void);
296
297void smp_setup_processor_id(void);
298
299int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
300 bool phys);
301
302/* SMP core functions */
303int smpcfd_prepare_cpu(unsigned int cpu);
304int smpcfd_dead_cpu(unsigned int cpu);
305int smpcfd_dying_cpu(unsigned int cpu);
306
307#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
308bool csd_lock_is_stuck(void);
309#else
310static inline bool csd_lock_is_stuck(void) { return false; }
311#endif
312
313#endif /* __LINUX_SMP_H */