Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: uring_cmd: add multishot support

Add UAPI flag IORING_URING_CMD_MULTISHOT for supporting multishot
uring_cmd operations with provided buffer.

This enables drivers to post multiple completion events from a single
uring_cmd submission, which is useful for:

- Notifying userspace of device events (e.g., interrupt handling)
- Supporting devices with multiple event sources (e.g., multi-queue devices)
- Avoiding the need for device poll() support when events originate
from multiple sources device-wide

The implementation adds two new APIs:
- io_uring_cmd_select_buffer(): selects a buffer from the provided
buffer group for multishot uring_cmd
- io_uring_mshot_cmd_post_cqe(): posts a CQE after event data is
pushed to the provided buffer

Multishot uring_cmd must be used with buffer select (IOSQE_BUFFER_SELECT)
and is mutually exclusive with IORING_URING_CMD_FIXED for now.

The ublk driver will be the first user of this functionality:

https://github.com/ming1/linux/commits/ublk-devel/

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20250821040210.1152145-3-ming.lei@redhat.com
[axboe: fold in fix for !CONFIG_IO_URING]
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Ming Lei and committed by
Jens Axboe
620a50c9 d589bcdd

+102 -2
+26
include/linux/io_uring/cmd.h
··· 70 70 /* Execute the request from a blocking context */ 71 71 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); 72 72 73 + /* 74 + * Select a buffer from the provided buffer group for multishot uring_cmd. 75 + * Returns the selected buffer address and size. 76 + */ 77 + struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, 78 + unsigned buf_group, size_t *len, 79 + unsigned int issue_flags); 80 + 81 + /* 82 + * Complete a multishot uring_cmd event. This will post a CQE to the completion 83 + * queue and update the provided buffer. 84 + */ 85 + bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, 86 + struct io_br_sel *sel, unsigned int issue_flags); 87 + 73 88 #else 74 89 static inline int 75 90 io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, ··· 116 101 } 117 102 static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) 118 103 { 104 + } 105 + static inline struct io_br_sel 106 + io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, unsigned buf_group, 107 + size_t *len, unsigned int issue_flags) 108 + { 109 + return (struct io_br_sel) { .val = -EOPNOTSUPP }; 110 + } 111 + static inline bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, 112 + ssize_t ret, unsigned int issue_flags) 113 + { 114 + return true; 119 115 } 120 116 #endif 121 117
+5 -1
include/uapi/linux/io_uring.h
··· 298 298 * sqe->uring_cmd_flags top 8bits aren't available for userspace 299 299 * IORING_URING_CMD_FIXED use registered buffer; pass this flag 300 300 * along with setting sqe->buf_index. 301 + * IORING_URING_CMD_MULTISHOT must be used with buffer select, like other 302 + * multishot commands. Not compatible with 303 + * IORING_URING_CMD_FIXED, for now. 301 304 */ 302 305 #define IORING_URING_CMD_FIXED (1U << 0) 303 - #define IORING_URING_CMD_MASK IORING_URING_CMD_FIXED 306 + #define IORING_URING_CMD_MULTISHOT (1U << 1) 307 + #define IORING_URING_CMD_MASK (IORING_URING_CMD_FIXED | IORING_URING_CMD_MULTISHOT) 304 308 305 309 306 310 /*
+1
io_uring/opdef.c
··· 413 413 #endif 414 414 }, 415 415 [IORING_OP_URING_CMD] = { 416 + .buffer_select = 1, 416 417 .needs_file = 1, 417 418 .plug = 1, 418 419 .iopoll = 1,
+70 -1
io_uring/uring_cmd.c
··· 11 11 #include "io_uring.h" 12 12 #include "alloc_cache.h" 13 13 #include "rsrc.h" 14 + #include "kbuf.h" 14 15 #include "uring_cmd.h" 15 16 #include "poll.h" 16 17 ··· 195 194 if (ioucmd->flags & ~IORING_URING_CMD_MASK) 196 195 return -EINVAL; 197 196 198 - if (ioucmd->flags & IORING_URING_CMD_FIXED) 197 + if (ioucmd->flags & IORING_URING_CMD_FIXED) { 198 + if (ioucmd->flags & IORING_URING_CMD_MULTISHOT) 199 + return -EINVAL; 199 200 req->buf_index = READ_ONCE(sqe->buf_index); 201 + } 202 + 203 + if (ioucmd->flags & IORING_URING_CMD_MULTISHOT) { 204 + if (ioucmd->flags & IORING_URING_CMD_FIXED) 205 + return -EINVAL; 206 + if (!(req->flags & REQ_F_BUFFER_SELECT)) 207 + return -EINVAL; 208 + } else { 209 + if (req->flags & REQ_F_BUFFER_SELECT) 210 + return -EINVAL; 211 + } 200 212 201 213 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); 202 214 ··· 265 251 } 266 252 267 253 ret = file->f_op->uring_cmd(ioucmd, issue_flags); 254 + if (ioucmd->flags & IORING_URING_CMD_MULTISHOT) { 255 + if (ret >= 0) 256 + return IOU_ISSUE_SKIP_COMPLETE; 257 + } 268 258 if (ret == -EAGAIN) { 269 259 ioucmd->flags |= IORING_URING_CMD_REISSUE; 270 260 return ret; ··· 351 333 return false; 352 334 return io_req_post_cqe32(req, cqe); 353 335 } 336 + 337 + /* 338 + * Work with io_uring_mshot_cmd_post_cqe() together for committing the 339 + * provided buffer upfront 340 + */ 341 + struct io_br_sel io_uring_cmd_buffer_select(struct io_uring_cmd *ioucmd, 342 + unsigned buf_group, size_t *len, 343 + unsigned int issue_flags) 344 + { 345 + struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); 346 + 347 + if (!(ioucmd->flags & IORING_URING_CMD_MULTISHOT)) 348 + return (struct io_br_sel) { .val = -EINVAL }; 349 + 350 + if (WARN_ON_ONCE(!io_do_buffer_select(req))) 351 + return (struct io_br_sel) { .val = -EINVAL }; 352 + 353 + return io_buffer_select(req, len, buf_group, issue_flags); 354 + } 355 + EXPORT_SYMBOL_GPL(io_uring_cmd_buffer_select); 356 + 357 + /* 358 + * Return true if this multishot uring_cmd needs to be completed, otherwise 359 + * the event CQE is posted successfully. 360 + * 361 + * This function must use `struct io_br_sel` returned from 362 + * io_uring_cmd_buffer_select() for committing the buffer in the same 363 + * uring_cmd submission context. 364 + */ 365 + bool io_uring_mshot_cmd_post_cqe(struct io_uring_cmd *ioucmd, 366 + struct io_br_sel *sel, unsigned int issue_flags) 367 + { 368 + struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); 369 + unsigned int cflags = 0; 370 + 371 + if (!(ioucmd->flags & IORING_URING_CMD_MULTISHOT)) 372 + return true; 373 + 374 + if (sel->val > 0) { 375 + cflags = io_put_kbuf(req, sel->val, sel->buf_list); 376 + if (io_req_post_cqe(req, sel->val, cflags | IORING_CQE_F_MORE)) 377 + return false; 378 + } 379 + 380 + io_kbuf_recycle(req, sel->buf_list, issue_flags); 381 + if (sel->val < 0) 382 + req_set_fail(req); 383 + io_req_set_res(req, sel->val, cflags); 384 + return true; 385 + } 386 + EXPORT_SYMBOL_GPL(io_uring_mshot_cmd_post_cqe);