Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_IRQENTRYCOMMON_H
3#define __LINUX_IRQENTRYCOMMON_H
4
5#include <linux/context_tracking.h>
6#include <linux/hrtimer_rearm.h>
7#include <linux/kmsan.h>
8#include <linux/rseq_entry.h>
9#include <linux/static_call_types.h>
10#include <linux/syscalls.h>
11#include <linux/tick.h>
12#include <linux/unwind_deferred.h>
13
14#include <asm/entry-common.h>
15
16/*
17 * Define dummy _TIF work flags if not defined by the architecture or for
18 * disabled functionality.
19 */
20#ifndef _TIF_PATCH_PENDING
21# define _TIF_PATCH_PENDING (0)
22#endif
23
24/*
25 * TIF flags handled in exit_to_user_mode_loop()
26 */
27#ifndef ARCH_EXIT_TO_USER_MODE_WORK
28# define ARCH_EXIT_TO_USER_MODE_WORK (0)
29#endif
30
31#define EXIT_TO_USER_MODE_WORK \
32 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
33 _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \
34 _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | _TIF_RSEQ | \
35 ARCH_EXIT_TO_USER_MODE_WORK)
36
37#ifdef CONFIG_HRTIMER_REARM_DEFERRED
38# define EXIT_TO_USER_MODE_WORK_SYSCALL (EXIT_TO_USER_MODE_WORK)
39# define EXIT_TO_USER_MODE_WORK_IRQ (EXIT_TO_USER_MODE_WORK | _TIF_HRTIMER_REARM)
40#else
41# define EXIT_TO_USER_MODE_WORK_SYSCALL (EXIT_TO_USER_MODE_WORK)
42# define EXIT_TO_USER_MODE_WORK_IRQ (EXIT_TO_USER_MODE_WORK)
43#endif
44
45/**
46 * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs
47 * @regs: Pointer to currents pt_regs
48 *
49 * Defaults to an empty implementation. Can be replaced by architecture
50 * specific code.
51 *
52 * Invoked from syscall_enter_from_user_mode() in the non-instrumentable
53 * section. Use __always_inline so the compiler cannot push it out of line
54 * and make it instrumentable.
55 */
56static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs);
57
58#ifndef arch_enter_from_user_mode
59static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
60#endif
61
62/**
63 * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent
64 * states.
65 *
66 * Returns: true if the CPU is potentially in an RCU EQS, false otherwise.
67 *
68 * Architectures only need to define this if threads other than the idle thread
69 * may have an interruptible EQS. This does not need to handle idle threads. It
70 * is safe to over-estimate at the cost of redundant RCU management work.
71 *
72 * Invoked from irqentry_enter()
73 */
74#ifndef arch_in_rcu_eqs
75static __always_inline bool arch_in_rcu_eqs(void) { return false; }
76#endif
77
78/**
79 * enter_from_user_mode - Establish state when coming from user mode
80 * @regs: Pointer to currents pt_regs
81 *
82 * Syscall/interrupt entry disables interrupts, but user mode is traced as
83 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
84 *
85 * 1) Tell lockdep that interrupts are disabled
86 * 2) Invoke context tracking if enabled to reactivate RCU
87 * 3) Trace interrupts off state
88 *
89 * Invoked from architecture specific syscall entry code with interrupts
90 * disabled. The calling code has to be non-instrumentable. When the
91 * function returns all state is correct and interrupts are still
92 * disabled. The subsequent functions can be instrumented.
93 *
94 * This is invoked when there is architecture specific functionality to be
95 * done between establishing state and enabling interrupts. The caller must
96 * enable interrupts before invoking syscall_enter_from_user_mode_work().
97 */
98static __always_inline void enter_from_user_mode(struct pt_regs *regs)
99{
100 arch_enter_from_user_mode(regs);
101 lockdep_hardirqs_off(CALLER_ADDR0);
102
103 CT_WARN_ON(__ct_state() != CT_STATE_USER);
104 user_exit_irqoff();
105
106 instrumentation_begin();
107 kmsan_unpoison_entry_regs(regs);
108 trace_hardirqs_off_finish();
109 instrumentation_end();
110}
111
112/**
113 * arch_exit_to_user_mode_work - Architecture specific TIF work for exit
114 * to user mode.
115 * @regs: Pointer to currents pt_regs
116 * @ti_work: Cached TIF flags gathered with interrupts disabled
117 *
118 * Invoked from exit_to_user_mode_loop() with interrupt enabled
119 *
120 * Defaults to NOOP. Can be supplied by architecture specific code.
121 */
122static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
123 unsigned long ti_work);
124
125#ifndef arch_exit_to_user_mode_work
126static inline void arch_exit_to_user_mode_work(struct pt_regs *regs,
127 unsigned long ti_work)
128{
129}
130#endif
131
132/**
133 * arch_exit_to_user_mode_prepare - Architecture specific preparation for
134 * exit to user mode.
135 * @regs: Pointer to currents pt_regs
136 * @ti_work: Cached TIF flags gathered with interrupts disabled
137 *
138 * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last
139 * function before return. Defaults to NOOP.
140 */
141static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
142 unsigned long ti_work);
143
144#ifndef arch_exit_to_user_mode_prepare
145static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
146 unsigned long ti_work)
147{
148}
149#endif
150
151/**
152 * arch_exit_to_user_mode - Architecture specific final work before
153 * exit to user mode.
154 *
155 * Invoked from exit_to_user_mode() with interrupt disabled as the last
156 * function before return. Defaults to NOOP.
157 *
158 * This needs to be __always_inline because it is non-instrumentable code
159 * invoked after context tracking switched to user mode.
160 *
161 * An architecture implementation must not do anything complex, no locking
162 * etc. The main purpose is for speculation mitigations.
163 */
164static __always_inline void arch_exit_to_user_mode(void);
165
166#ifndef arch_exit_to_user_mode
167static __always_inline void arch_exit_to_user_mode(void) { }
168#endif
169
170/**
171 * arch_do_signal_or_restart - Architecture specific signal delivery function
172 * @regs: Pointer to currents pt_regs
173 *
174 * Invoked from exit_to_user_mode_loop().
175 */
176void arch_do_signal_or_restart(struct pt_regs *regs);
177
178/* Handle pending TIF work */
179unsigned long exit_to_user_mode_loop(struct pt_regs *regs, unsigned long ti_work);
180
181/**
182 * __exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
183 * @regs: Pointer to pt_regs on entry stack
184 * @work_mask: Which TIF bits need to be evaluated
185 *
186 * 1) check that interrupts are disabled
187 * 2) call tick_nohz_user_enter_prepare()
188 * 3) call exit_to_user_mode_loop() if any flags from
189 * EXIT_TO_USER_MODE_WORK are set
190 * 4) check that interrupts are still disabled
191 *
192 * Don't invoke directly, use the syscall/irqentry_ prefixed variants below
193 */
194static __always_inline void __exit_to_user_mode_prepare(struct pt_regs *regs,
195 const unsigned long work_mask)
196{
197 unsigned long ti_work;
198
199 lockdep_assert_irqs_disabled();
200
201 /* Flush pending rcuog wakeup before the last need_resched() check */
202 tick_nohz_user_enter_prepare();
203
204 ti_work = read_thread_flags();
205 if (unlikely(ti_work & work_mask)) {
206 if (!hrtimer_rearm_deferred_user_irq(&ti_work, work_mask))
207 ti_work = exit_to_user_mode_loop(regs, ti_work);
208 }
209
210 arch_exit_to_user_mode_prepare(regs, ti_work);
211}
212
213static __always_inline void __exit_to_user_mode_validate(void)
214{
215 /* Ensure that kernel state is sane for a return to userspace */
216 kmap_assert_nomap();
217 lockdep_assert_irqs_disabled();
218 lockdep_sys_exit();
219}
220
221/* Temporary workaround to keep ARM64 alive */
222static __always_inline void exit_to_user_mode_prepare_legacy(struct pt_regs *regs)
223{
224 __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK);
225 rseq_exit_to_user_mode_legacy();
226 __exit_to_user_mode_validate();
227}
228
229/**
230 * syscall_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
231 * @regs: Pointer to pt_regs on entry stack
232 *
233 * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
234 * syscalls and interrupts.
235 */
236static __always_inline void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
237{
238 __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK_SYSCALL);
239 rseq_syscall_exit_to_user_mode();
240 __exit_to_user_mode_validate();
241}
242
243/**
244 * irqentry_exit_to_user_mode_prepare - call exit_to_user_mode_loop() if required
245 * @regs: Pointer to pt_regs on entry stack
246 *
247 * Wrapper around __exit_to_user_mode_prepare() to separate the exit work for
248 * syscalls and interrupts.
249 */
250static __always_inline void irqentry_exit_to_user_mode_prepare(struct pt_regs *regs)
251{
252 __exit_to_user_mode_prepare(regs, EXIT_TO_USER_MODE_WORK_IRQ);
253 rseq_irqentry_exit_to_user_mode();
254 __exit_to_user_mode_validate();
255}
256
257/**
258 * exit_to_user_mode - Fixup state when exiting to user mode
259 *
260 * Syscall/interrupt exit enables interrupts, but the kernel state is
261 * interrupts disabled when this is invoked. Also tell RCU about it.
262 *
263 * 1) Trace interrupts on state
264 * 2) Invoke context tracking if enabled to adjust RCU state
265 * 3) Invoke architecture specific last minute exit code, e.g. speculation
266 * mitigations, etc.: arch_exit_to_user_mode()
267 * 4) Tell lockdep that interrupts are enabled
268 *
269 * Invoked from architecture specific code when syscall_exit_to_user_mode()
270 * is not suitable as the last step before returning to userspace. Must be
271 * invoked with interrupts disabled and the caller must be
272 * non-instrumentable.
273 * The caller has to invoke syscall_exit_to_user_mode_work() before this.
274 */
275static __always_inline void exit_to_user_mode(void)
276{
277 instrumentation_begin();
278 unwind_reset_info();
279 trace_hardirqs_on_prepare();
280 lockdep_hardirqs_on_prepare();
281 instrumentation_end();
282
283 user_enter_irqoff();
284 arch_exit_to_user_mode();
285 lockdep_hardirqs_on(CALLER_ADDR0);
286}
287
288/**
289 * irqentry_enter_from_user_mode - Establish state before invoking the irq handler
290 * @regs: Pointer to currents pt_regs
291 *
292 * Invoked from architecture specific entry code with interrupts disabled.
293 * Can only be called when the interrupt entry came from user mode. The
294 * calling code must be non-instrumentable. When the function returns all
295 * state is correct and the subsequent functions can be instrumented.
296 *
297 * The function establishes state (lockdep, RCU (context tracking), tracing)
298 */
299static __always_inline void irqentry_enter_from_user_mode(struct pt_regs *regs)
300{
301 enter_from_user_mode(regs);
302 rseq_note_user_irq_entry();
303}
304
305/**
306 * irqentry_exit_to_user_mode - Interrupt exit work
307 * @regs: Pointer to current's pt_regs
308 *
309 * Invoked with interrupts disabled and fully valid regs. Returns with all
310 * work handled, interrupts disabled such that the caller can immediately
311 * switch to user mode. Called from architecture specific interrupt
312 * handling code.
313 *
314 * The call order is #2 and #3 as described in syscall_exit_to_user_mode().
315 * Interrupt exit is not invoking #1 which is the syscall specific one time
316 * work.
317 */
318static __always_inline void irqentry_exit_to_user_mode(struct pt_regs *regs)
319{
320 lockdep_assert_irqs_disabled();
321
322 instrumentation_begin();
323 irqentry_exit_to_user_mode_prepare(regs);
324 instrumentation_end();
325 exit_to_user_mode();
326}
327
328#ifndef irqentry_state
329/**
330 * struct irqentry_state - Opaque object for exception state storage
331 * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
332 * exit path has to invoke ct_irq_exit().
333 * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
334 * lockdep state is restored correctly on exit from nmi.
335 *
336 * This opaque object is filled in by the irqentry_*_enter() functions and
337 * must be passed back into the corresponding irqentry_*_exit() functions
338 * when the exception is complete.
339 *
340 * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
341 * and all members private. Descriptions of the members are provided to aid in
342 * the maintenance of the irqentry_*() functions.
343 */
344typedef struct irqentry_state {
345 union {
346 bool exit_rcu;
347 bool lockdep;
348 };
349} irqentry_state_t;
350#endif
351
352/**
353 * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt
354 *
355 * Conditional reschedule with additional sanity checks.
356 */
357void raw_irqentry_exit_cond_resched(void);
358
359#ifdef CONFIG_PREEMPT_DYNAMIC
360#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
361#define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched
362#define irqentry_exit_cond_resched_dynamic_disabled NULL
363DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched);
364#define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)()
365#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
366DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched);
367void dynamic_irqentry_exit_cond_resched(void);
368#define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched()
369#endif
370#else /* CONFIG_PREEMPT_DYNAMIC */
371#define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched()
372#endif /* CONFIG_PREEMPT_DYNAMIC */
373
374/**
375 * irqentry_enter_from_kernel_mode - Establish state before invoking the irq handler
376 * @regs: Pointer to currents pt_regs
377 *
378 * Invoked from architecture specific entry code with interrupts disabled.
379 * Can only be called when the interrupt entry came from kernel mode. The
380 * calling code must be non-instrumentable. When the function returns all
381 * state is correct and the subsequent functions can be instrumented.
382 *
383 * The function establishes state (lockdep, RCU (context tracking), tracing) and
384 * is provided for architectures which require a strict split between entry from
385 * kernel and user mode and therefore cannot use irqentry_enter() which handles
386 * both entry modes.
387 *
388 * Returns: An opaque object that must be passed to irqentry_exit_to_kernel_mode().
389 */
390static __always_inline irqentry_state_t irqentry_enter_from_kernel_mode(struct pt_regs *regs)
391{
392 irqentry_state_t ret = {
393 .exit_rcu = false,
394 };
395
396 /*
397 * If this entry hit the idle task invoke ct_irq_enter() whether
398 * RCU is watching or not.
399 *
400 * Interrupts can nest when the first interrupt invokes softirq
401 * processing on return which enables interrupts.
402 *
403 * Scheduler ticks in the idle task can mark quiescent state and
404 * terminate a grace period, if and only if the timer interrupt is
405 * not nested into another interrupt.
406 *
407 * Checking for rcu_is_watching() here would prevent the nesting
408 * interrupt to invoke ct_irq_enter(). If that nested interrupt is
409 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
410 * assume that it is the first interrupt and eventually claim
411 * quiescent state and end grace periods prematurely.
412 *
413 * Unconditionally invoke ct_irq_enter() so RCU state stays
414 * consistent.
415 *
416 * TINY_RCU does not support EQS, so let the compiler eliminate
417 * this part when enabled.
418 */
419 if (!IS_ENABLED(CONFIG_TINY_RCU) &&
420 (is_idle_task(current) || arch_in_rcu_eqs())) {
421 /*
422 * If RCU is not watching then the same careful
423 * sequence vs. lockdep and tracing is required
424 * as in irqentry_enter_from_user_mode().
425 */
426 lockdep_hardirqs_off(CALLER_ADDR0);
427 ct_irq_enter();
428 instrumentation_begin();
429 kmsan_unpoison_entry_regs(regs);
430 trace_hardirqs_off_finish();
431 instrumentation_end();
432
433 ret.exit_rcu = true;
434 return ret;
435 }
436
437 /*
438 * If RCU is watching then RCU only wants to check whether it needs
439 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
440 * already contains a warning when RCU is not watching, so no point
441 * in having another one here.
442 */
443 lockdep_hardirqs_off(CALLER_ADDR0);
444 instrumentation_begin();
445 kmsan_unpoison_entry_regs(regs);
446 rcu_irq_enter_check_tick();
447 trace_hardirqs_off_finish();
448 instrumentation_end();
449
450 return ret;
451}
452
453/**
454 * irqentry_exit_to_kernel_mode_preempt - Run preempt checks on return to kernel mode
455 * @regs: Pointer to current's pt_regs
456 * @state: Return value from matching call to irqentry_enter_from_kernel_mode()
457 *
458 * This is to be invoked before irqentry_exit_to_kernel_mode_after_preempt() to
459 * allow kernel preemption on return from interrupt.
460 *
461 * Must be invoked with interrupts disabled and CPU state which allows kernel
462 * preemption.
463 *
464 * After returning from this function, the caller can modify CPU state before
465 * invoking irqentry_exit_to_kernel_mode_after_preempt(), which is required to
466 * re-establish the tracing, lockdep and RCU state for returning to the
467 * interrupted context.
468 */
469static inline void irqentry_exit_to_kernel_mode_preempt(struct pt_regs *regs,
470 irqentry_state_t state)
471{
472 if (regs_irqs_disabled(regs) || state.exit_rcu)
473 return;
474
475 if (IS_ENABLED(CONFIG_PREEMPTION))
476 irqentry_exit_cond_resched();
477}
478
479/**
480 * irqentry_exit_to_kernel_mode_after_preempt - Establish trace, lockdep and RCU state
481 * @regs: Pointer to current's pt_regs
482 * @state: Return value from matching call to irqentry_enter_from_kernel_mode()
483 *
484 * This is to be invoked after irqentry_exit_to_kernel_mode_preempt() and before
485 * actually returning to the interrupted context.
486 *
487 * There are no requirements for the CPU state other than being able to complete
488 * the tracing, lockdep and RCU state transitions. After this function returns
489 * the caller must return directly to the interrupted context.
490 */
491static __always_inline void
492irqentry_exit_to_kernel_mode_after_preempt(struct pt_regs *regs, irqentry_state_t state)
493{
494 if (!regs_irqs_disabled(regs)) {
495 /*
496 * If RCU was not watching on entry this needs to be done
497 * carefully and needs the same ordering of lockdep/tracing
498 * and RCU as the return to user mode path.
499 */
500 if (state.exit_rcu) {
501 instrumentation_begin();
502 hrtimer_rearm_deferred();
503 /* Tell the tracer that IRET will enable interrupts */
504 trace_hardirqs_on_prepare();
505 lockdep_hardirqs_on_prepare();
506 instrumentation_end();
507 ct_irq_exit();
508 lockdep_hardirqs_on(CALLER_ADDR0);
509 return;
510 }
511
512 instrumentation_begin();
513 hrtimer_rearm_deferred();
514 /* Covers both tracing and lockdep */
515 trace_hardirqs_on();
516 instrumentation_end();
517 } else {
518 /*
519 * IRQ flags state is correct already. Just tell RCU if it
520 * was not watching on entry.
521 */
522 if (state.exit_rcu)
523 ct_irq_exit();
524 }
525}
526
527/**
528 * irqentry_exit_to_kernel_mode - Run preempt checks and establish state after
529 * invoking the interrupt handler
530 * @regs: Pointer to current's pt_regs
531 * @state: Return value from matching call to irqentry_enter_from_kernel_mode()
532 *
533 * This is the counterpart of irqentry_enter_from_kernel_mode() and combines
534 * the calls to irqentry_exit_to_kernel_mode_preempt() and
535 * irqentry_exit_to_kernel_mode_after_preempt().
536 *
537 * The requirement for the CPU state is that it can schedule. After the function
538 * returns the tracing, lockdep and RCU state transitions are completed and the
539 * caller must return directly to the interrupted context.
540 */
541static __always_inline void irqentry_exit_to_kernel_mode(struct pt_regs *regs,
542 irqentry_state_t state)
543{
544 lockdep_assert_irqs_disabled();
545
546 instrumentation_begin();
547 irqentry_exit_to_kernel_mode_preempt(regs, state);
548 instrumentation_end();
549
550 irqentry_exit_to_kernel_mode_after_preempt(regs, state);
551}
552
553/**
554 * irqentry_enter - Handle state tracking on ordinary interrupt entries
555 * @regs: Pointer to pt_regs of interrupted context
556 *
557 * Invokes:
558 * - lockdep irqflag state tracking as low level ASM entry disabled
559 * interrupts.
560 *
561 * - Context tracking if the exception hit user mode.
562 *
563 * - The hardirq tracer to keep the state consistent as low level ASM
564 * entry disabled interrupts.
565 *
566 * As a precondition, this requires that the entry came from user mode,
567 * idle, or a kernel context in which RCU is watching.
568 *
569 * For kernel mode entries RCU handling is done conditional. If RCU is
570 * watching then the only RCU requirement is to check whether the tick has
571 * to be restarted. If RCU is not watching then ct_irq_enter() has to be
572 * invoked on entry and ct_irq_exit() on exit.
573 *
574 * Avoiding the ct_irq_enter/exit() calls is an optimization but also
575 * solves the problem of kernel mode pagefaults which can schedule, which
576 * is not possible after invoking ct_irq_enter() without undoing it.
577 *
578 * For user mode entries irqentry_enter_from_user_mode() is invoked to
579 * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit
580 * would not be possible.
581 *
582 * Returns: An opaque object that must be passed to irqentry_exit()
583 */
584irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs);
585
586/**
587 * irqentry_exit - Handle return from exception that used irqentry_enter()
588 * @regs: Pointer to pt_regs (exception entry regs)
589 * @state: Return value from matching call to irqentry_enter()
590 *
591 * Depending on the return target (kernel/user) this runs the necessary
592 * preemption and work checks if possible and required and returns to
593 * the caller with interrupts disabled and no further work pending.
594 *
595 * This is the last action before returning to the low level ASM code which
596 * just needs to return to the appropriate context.
597 *
598 * Counterpart to irqentry_enter().
599 */
600void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
601
602/**
603 * irqentry_nmi_enter - Handle NMI entry
604 * @regs: Pointer to currents pt_regs
605 *
606 * Similar to irqentry_enter() but taking care of the NMI constraints.
607 */
608irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
609
610/**
611 * irqentry_nmi_exit - Handle return from NMI handling
612 * @regs: Pointer to pt_regs (NMI entry regs)
613 * @irq_state: Return value from matching call to irqentry_nmi_enter()
614 *
615 * Last action before returning to the low level assembly code.
616 *
617 * Counterpart to irqentry_nmi_enter().
618 */
619void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
620
621#endif