Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring/eventfd: dedup signalling helpers

Consolidate io_eventfd_flush_signal() and io_eventfd_signal(). Not much
of a difference for now, but it prepares it for following changes.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/5beecd4da65d8d2d83df499196f84b329387f6a2.1745493845.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Pavel Begunkov and committed by
Jens Axboe
62f666df 76f1cc98

+12 -21
+9 -17
io_uring/eventfd.c
··· 112 112 return NULL; 113 113 } 114 114 115 - void io_eventfd_signal(struct io_ring_ctx *ctx) 115 + void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event) 116 116 { 117 + bool skip = false, put_ref = true; 117 118 struct io_ev_fd *ev_fd; 118 119 119 120 ev_fd = io_eventfd_grab(ctx); 120 - if (ev_fd) 121 - io_eventfd_release(ev_fd, __io_eventfd_signal(ev_fd)); 122 - } 121 + if (!ev_fd) 122 + return; 123 123 124 - void io_eventfd_flush_signal(struct io_ring_ctx *ctx) 125 - { 126 - struct io_ev_fd *ev_fd; 127 - 128 - ev_fd = io_eventfd_grab(ctx); 129 - if (ev_fd) { 130 - bool skip, put_ref = true; 131 - 124 + if (cqe_event) { 132 125 /* 133 126 * Eventfd should only get triggered when at least one event 134 127 * has been posted. Some applications rely on the eventfd ··· 135 142 skip = ctx->cached_cq_tail == ev_fd->last_cq_tail; 136 143 ev_fd->last_cq_tail = ctx->cached_cq_tail; 137 144 spin_unlock(&ctx->completion_lock); 138 - 139 - if (!skip) 140 - put_ref = __io_eventfd_signal(ev_fd); 141 - 142 - io_eventfd_release(ev_fd, put_ref); 143 145 } 146 + 147 + if (!skip) 148 + put_ref = __io_eventfd_signal(ev_fd); 149 + io_eventfd_release(ev_fd, put_ref); 144 150 } 145 151 146 152 int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
+1 -2
io_uring/eventfd.h
··· 4 4 unsigned int eventfd_async); 5 5 int io_eventfd_unregister(struct io_ring_ctx *ctx); 6 6 7 - void io_eventfd_flush_signal(struct io_ring_ctx *ctx); 8 - void io_eventfd_signal(struct io_ring_ctx *ctx); 7 + void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event);
+2 -2
io_uring/io_uring.c
··· 584 584 if (ctx->drain_active) 585 585 io_queue_deferred(ctx); 586 586 if (ctx->has_evfd) 587 - io_eventfd_flush_signal(ctx); 587 + io_eventfd_signal(ctx, true); 588 588 } 589 589 590 590 static inline void __io_cq_lock(struct io_ring_ctx *ctx) ··· 1199 1199 if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) 1200 1200 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); 1201 1201 if (ctx->has_evfd) 1202 - io_eventfd_signal(ctx); 1202 + io_eventfd_signal(ctx, false); 1203 1203 } 1204 1204 1205 1205 nr_wait = atomic_read(&ctx->cq_wait_nr);