Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

sched: add a few helpers to wake up tasks on the current cpu

Add complete_on_current_cpu, wake_up_poll_on_current_cpu helpers to wake
up tasks on the current CPU.

These two helpers are useful when the task needs to make a synchronous context
switch to another task. In this context, synchronous means it wakes up the
target task and falls asleep right after that.

One example of such workloads is seccomp user notifies. This mechanism allows
the supervisor process handles system calls on behalf of a target process.
While the supervisor is handling an intercepted system call, the target process
will be blocked in the kernel, waiting for a response to come back.

On-CPU context switches are much faster than regular ones.

Signed-off-by: Andrei Vagin <avagin@google.com>
Acked-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Link: https://lore.kernel.org/r/20230308073201.3102738-4-avagin@google.com
Signed-off-by: Kees Cook <keescook@chromium.org>

authored by

Andrei Vagin and committed by
Kees Cook
6f63904c ab83f455

+33 -14
+1
include/linux/completion.h
··· 116 116 extern bool completion_done(struct completion *x); 117 117 118 118 extern void complete(struct completion *); 119 + extern void complete_on_current_cpu(struct completion *x); 119 120 extern void complete_all(struct completion *); 120 121 121 122 #endif
+1 -1
include/linux/swait.h
··· 146 146 147 147 extern void swake_up_one(struct swait_queue_head *q); 148 148 extern void swake_up_all(struct swait_queue_head *q); 149 - extern void swake_up_locked(struct swait_queue_head *q); 149 + extern void swake_up_locked(struct swait_queue_head *q, int wake_flags); 150 150 151 151 extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); 152 152 extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
+3
include/linux/wait.h
··· 210 210 } 211 211 212 212 int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key); 213 + void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key); 213 214 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); 214 215 void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head, 215 216 unsigned int mode, void *key, wait_queue_entry_t *bookmark); ··· 238 237 #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) 239 238 #define wake_up_poll(x, m) \ 240 239 __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) 240 + #define wake_up_poll_on_current_cpu(x, m) \ 241 + __wake_up_on_current_cpu(x, TASK_NORMAL, poll_to_key(m)) 241 242 #define wake_up_locked_poll(x, m) \ 242 243 __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) 243 244 #define wake_up_interruptible_poll(x, m) \
+18 -8
kernel/sched/completion.c
··· 13 13 * Waiting for completion is a typically sync point, but not an exclusion point. 14 14 */ 15 15 16 + static void complete_with_flags(struct completion *x, int wake_flags) 17 + { 18 + unsigned long flags; 19 + 20 + raw_spin_lock_irqsave(&x->wait.lock, flags); 21 + 22 + if (x->done != UINT_MAX) 23 + x->done++; 24 + swake_up_locked(&x->wait, wake_flags); 25 + raw_spin_unlock_irqrestore(&x->wait.lock, flags); 26 + } 27 + 28 + void complete_on_current_cpu(struct completion *x) 29 + { 30 + return complete_with_flags(x, WF_CURRENT_CPU); 31 + } 32 + 16 33 /** 17 34 * complete: - signals a single thread waiting on this completion 18 35 * @x: holds the state of this particular completion ··· 44 27 */ 45 28 void complete(struct completion *x) 46 29 { 47 - unsigned long flags; 48 - 49 - raw_spin_lock_irqsave(&x->wait.lock, flags); 50 - 51 - if (x->done != UINT_MAX) 52 - x->done++; 53 - swake_up_locked(&x->wait); 54 - raw_spin_unlock_irqrestore(&x->wait.lock, flags); 30 + complete_with_flags(x, 0); 55 31 } 56 32 EXPORT_SYMBOL(complete); 57 33
+1 -1
kernel/sched/core.c
··· 7029 7029 int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, 7030 7030 void *key) 7031 7031 { 7032 - WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); 7032 + WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~(WF_SYNC|WF_CURRENT_CPU)); 7033 7033 return try_to_wake_up(curr->private, mode, wake_flags); 7034 7034 } 7035 7035 EXPORT_SYMBOL(default_wake_function);
+4 -4
kernel/sched/swait.c
··· 18 18 * If for some reason it would return 0, that means the previously waiting 19 19 * task is already running, so it will observe condition true (or has already). 20 20 */ 21 - void swake_up_locked(struct swait_queue_head *q) 21 + void swake_up_locked(struct swait_queue_head *q, int wake_flags) 22 22 { 23 23 struct swait_queue *curr; 24 24 ··· 26 26 return; 27 27 28 28 curr = list_first_entry(&q->task_list, typeof(*curr), task_list); 29 - wake_up_process(curr->task); 29 + try_to_wake_up(curr->task, TASK_NORMAL, wake_flags); 30 30 list_del_init(&curr->task_list); 31 31 } 32 32 EXPORT_SYMBOL(swake_up_locked); ··· 41 41 void swake_up_all_locked(struct swait_queue_head *q) 42 42 { 43 43 while (!list_empty(&q->task_list)) 44 - swake_up_locked(q); 44 + swake_up_locked(q, 0); 45 45 } 46 46 47 47 void swake_up_one(struct swait_queue_head *q) ··· 49 49 unsigned long flags; 50 50 51 51 raw_spin_lock_irqsave(&q->lock, flags); 52 - swake_up_locked(q); 52 + swake_up_locked(q, 0); 53 53 raw_spin_unlock_irqrestore(&q->lock, flags); 54 54 } 55 55 EXPORT_SYMBOL(swake_up_one);
+5
kernel/sched/wait.c
··· 161 161 } 162 162 EXPORT_SYMBOL(__wake_up); 163 163 164 + void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key) 165 + { 166 + __wake_up_common_lock(wq_head, mode, 1, WF_CURRENT_CPU, key); 167 + } 168 + 164 169 /* 165 170 * Same as __wake_up but called with the spinlock in wait_queue_head_t held. 166 171 */