Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'io_uring-20260221' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull io_uring fixes from Jens Axboe:

- A fix for a missing URING_CMD128 opcode check, fixing an issue with
the SQE mixed mode support introduced in 6.19. Merged late due to
having multiple dependencies

- Add sqe->cmd size checking for big SQEs, similar to what we have for
normal sized SQEs

- Fix a race condition in zcrx, that leads to a double free

* tag 'io_uring-20260221' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
io_uring: Add size check for sqe->cmd
io_uring: add IORING_OP_URING_CMD128 to opcode checks
io_uring/zcrx: fix user_ref race between scrub and refill paths

+41 -17
+8 -4
drivers/block/ublk_drv.c
··· 3255 3255 unsigned int issue_flags) 3256 3256 { 3257 3257 /* May point to userspace-mapped memory */ 3258 - const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe); 3258 + const struct ublksrv_io_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe, 3259 + struct ublksrv_io_cmd); 3259 3260 u16 buf_idx = UBLK_INVALID_BUF_IDX; 3260 3261 struct ublk_device *ub = cmd->file->private_data; 3261 3262 struct ublk_queue *ubq; ··· 3834 3833 static int ublk_handle_non_batch_cmd(struct io_uring_cmd *cmd, 3835 3834 unsigned int issue_flags) 3836 3835 { 3837 - const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe); 3836 + const struct ublksrv_io_cmd *ub_cmd = io_uring_sqe_cmd(cmd->sqe, 3837 + struct ublksrv_io_cmd); 3838 3838 struct ublk_device *ub = cmd->file->private_data; 3839 3839 unsigned tag = READ_ONCE(ub_cmd->tag); 3840 3840 unsigned q_id = READ_ONCE(ub_cmd->q_id); ··· 3864 3862 static int ublk_ch_batch_io_uring_cmd(struct io_uring_cmd *cmd, 3865 3863 unsigned int issue_flags) 3866 3864 { 3867 - const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe); 3865 + const struct ublk_batch_io *uc = io_uring_sqe_cmd(cmd->sqe, 3866 + struct ublk_batch_io); 3868 3867 struct ublk_device *ub = cmd->file->private_data; 3869 3868 struct ublk_batch_io_data data = { 3870 3869 .ub = ub, ··· 5256 5253 unsigned int issue_flags) 5257 5254 { 5258 5255 /* May point to userspace-mapped memory */ 5259 - const struct ublksrv_ctrl_cmd *ub_src = io_uring_sqe_cmd(cmd->sqe); 5256 + const struct ublksrv_ctrl_cmd *ub_src = io_uring_sqe128_cmd(cmd->sqe, 5257 + struct ublksrv_ctrl_cmd); 5260 5258 struct ublksrv_ctrl_cmd header; 5261 5259 struct ublk_device *ub = NULL; 5262 5260 u32 cmd_op = cmd->cmd_op;
+2 -1
drivers/nvme/host/ioctl.c
··· 447 447 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) 448 448 { 449 449 struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd); 450 - const struct nvme_uring_cmd *cmd = io_uring_sqe_cmd(ioucmd->sqe); 450 + const struct nvme_uring_cmd *cmd = io_uring_sqe128_cmd(ioucmd->sqe, 451 + struct nvme_uring_cmd); 451 452 struct request_queue *q = ns ? ns->queue : ctrl->admin_q; 452 453 struct nvme_uring_data d; 453 454 struct nvme_command c;
+4 -2
fs/fuse/dev_uring.c
··· 879 879 static int fuse_uring_commit_fetch(struct io_uring_cmd *cmd, int issue_flags, 880 880 struct fuse_conn *fc) 881 881 { 882 - const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); 882 + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe128_cmd(cmd->sqe, 883 + struct fuse_uring_cmd_req); 883 884 struct fuse_ring_ent *ent; 884 885 int err; 885 886 struct fuse_ring *ring = fc->ring; ··· 1084 1083 static int fuse_uring_register(struct io_uring_cmd *cmd, 1085 1084 unsigned int issue_flags, struct fuse_conn *fc) 1086 1085 { 1087 - const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe); 1086 + const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe128_cmd(cmd->sqe, 1087 + struct fuse_uring_cmd_req); 1088 1088 struct fuse_ring *ring = smp_load_acquire(&fc->ring); 1089 1089 struct fuse_ring_queue *queue; 1090 1090 struct fuse_ring_ent *ent;
+11 -4
include/linux/io_uring/cmd.h
··· 20 20 u8 unused[8]; 21 21 }; 22 22 23 - static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) 24 - { 25 - return sqe->cmd; 26 - } 23 + #define io_uring_sqe128_cmd(sqe, type) ({ \ 24 + BUILD_BUG_ON(sizeof(type) > ((2 * sizeof(struct io_uring_sqe)) - \ 25 + offsetof(struct io_uring_sqe, cmd))); \ 26 + (const type *)(sqe)->cmd; \ 27 + }) 28 + 29 + #define io_uring_sqe_cmd(sqe, type) ({ \ 30 + BUILD_BUG_ON(sizeof(type) > (sizeof(struct io_uring_sqe) - \ 31 + offsetof(struct io_uring_sqe, cmd))); \ 32 + (const type *)(sqe)->cmd; \ 33 + }) 27 34 28 35 static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) 29 36 {
+6
io_uring/io_uring.h
··· 530 530 return false; 531 531 } 532 532 533 + static inline bool io_is_uring_cmd(const struct io_kiocb *req) 534 + { 535 + return req->opcode == IORING_OP_URING_CMD || 536 + req->opcode == IORING_OP_URING_CMD128; 537 + } 538 + 533 539 static inline ktime_t io_get_time(struct io_ring_ctx *ctx) 534 540 { 535 541 if (ctx->clockid == CLOCK_MONOTONIC)
+1 -1
io_uring/kbuf.c
··· 171 171 return true; 172 172 173 173 /* uring_cmd commits kbuf upfront, no need to auto-commit */ 174 - if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD) 174 + if (!io_file_can_poll(req) && !io_is_uring_cmd(req)) 175 175 return true; 176 176 return false; 177 177 }
+2 -2
io_uring/rw.c
··· 1254 1254 { 1255 1255 struct file *file = req->file; 1256 1256 1257 - if (req->opcode == IORING_OP_URING_CMD) { 1257 + if (io_is_uring_cmd(req)) { 1258 1258 struct io_uring_cmd *ioucmd; 1259 1259 1260 1260 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); ··· 1380 1380 wq_list_add_tail(&req->comp_list, &ctx->submit_state.compl_reqs); 1381 1381 nr_events++; 1382 1382 req->cqe.flags = io_put_kbuf(req, req->cqe.res, NULL); 1383 - if (req->opcode != IORING_OP_URING_CMD) 1383 + if (!io_is_uring_cmd(req)) 1384 1384 io_req_rw_cleanup(req, 0); 1385 1385 } 1386 1386 if (nr_events)
+7 -3
io_uring/zcrx.c
··· 349 349 static bool io_zcrx_put_niov_uref(struct net_iov *niov) 350 350 { 351 351 atomic_t *uref = io_get_user_counter(niov); 352 + int old; 352 353 353 - if (unlikely(!atomic_read(uref))) 354 - return false; 355 - atomic_dec(uref); 354 + old = atomic_read(uref); 355 + do { 356 + if (unlikely(old == 0)) 357 + return false; 358 + } while (!atomic_try_cmpxchg(uref, &old, old - 1)); 359 + 356 360 return true; 357 361 } 358 362