Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring/cmd: drop unused res2 param from io_uring_cmd_done()

Commit 79525b51acc1 ("io_uring: fix nvme's 32b cqes on mixed cq") split
out a separate io_uring_cmd_done32() helper for ->uring_cmd()
implementations that return 32-byte CQEs. The res2 value passed to
io_uring_cmd_done() is now unused because __io_uring_cmd_done() ignores
it when is_cqe32 is passed as false. So drop the parameter from
io_uring_cmd_done() to simplify the callers and clarify that it's not
possible to return an extra value beyond the 32-bit CQE result.

Signed-off-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Caleb Sander Mateos and committed by
Jens Axboe
ef9f603f 79525b51

+11 -11
+1 -1
block/ioctl.c
··· 776 776 if (bic->res == -EAGAIN && bic->nowait) 777 777 io_uring_cmd_issue_blocking(cmd); 778 778 else 779 - io_uring_cmd_done(cmd, bic->res, 0, issue_flags); 779 + io_uring_cmd_done(cmd, bic->res, issue_flags); 780 780 } 781 781 782 782 static void bio_cmd_bio_end_io(struct bio *bio)
+3 -3
drivers/block/ublk_drv.c
··· 1188 1188 struct io_uring_cmd *cmd = __ublk_prep_compl_io_cmd(io, req); 1189 1189 1190 1190 /* tell ublksrv one io request is coming */ 1191 - io_uring_cmd_done(cmd, res, 0, issue_flags); 1191 + io_uring_cmd_done(cmd, res, issue_flags); 1192 1192 } 1193 1193 1194 1194 #define UBLK_REQUEUE_DELAY_MS 3 ··· 1805 1805 spin_unlock(&ubq->cancel_lock); 1806 1806 1807 1807 if (!done) 1808 - io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0, issue_flags); 1808 + io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, issue_flags); 1809 1809 } 1810 1810 1811 1811 /* ··· 2452 2452 int ret = ublk_ch_uring_cmd_local(cmd, issue_flags); 2453 2453 2454 2454 if (ret != -EIOCBQUEUED) 2455 - io_uring_cmd_done(cmd, ret, 0, issue_flags); 2455 + io_uring_cmd_done(cmd, ret, issue_flags); 2456 2456 } 2457 2457 2458 2458 static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
+1 -1
fs/btrfs/ioctl.c
··· 4685 4685 btrfs_unlock_extent(io_tree, priv->start, priv->lockend, &priv->cached_state); 4686 4686 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED); 4687 4687 4688 - io_uring_cmd_done(cmd, ret, 0, issue_flags); 4688 + io_uring_cmd_done(cmd, ret, issue_flags); 4689 4689 add_rchar(current, ret); 4690 4690 4691 4691 for (index = 0; index < priv->nr_pages; index++)
+4 -4
fs/fuse/dev_uring.c
··· 351 351 spin_unlock(&queue->lock); 352 352 353 353 if (cmd) 354 - io_uring_cmd_done(cmd, -ENOTCONN, 0, IO_URING_F_UNLOCKED); 354 + io_uring_cmd_done(cmd, -ENOTCONN, IO_URING_F_UNLOCKED); 355 355 356 356 if (req) 357 357 fuse_uring_stop_fuse_req_end(req); ··· 518 518 519 519 if (need_cmd_done) { 520 520 /* no queue lock to avoid lock order issues */ 521 - io_uring_cmd_done(cmd, -ENOTCONN, 0, issue_flags); 521 + io_uring_cmd_done(cmd, -ENOTCONN, issue_flags); 522 522 } 523 523 } 524 524 ··· 733 733 list_move_tail(&ent->list, &queue->ent_in_userspace); 734 734 spin_unlock(&queue->lock); 735 735 736 - io_uring_cmd_done(cmd, 0, 0, issue_flags); 736 + io_uring_cmd_done(cmd, 0, issue_flags); 737 737 return 0; 738 738 } 739 739 ··· 1200 1200 ent->cmd = NULL; 1201 1201 spin_unlock(&queue->lock); 1202 1202 1203 - io_uring_cmd_done(cmd, ret, 0, issue_flags); 1203 + io_uring_cmd_done(cmd, ret, issue_flags); 1204 1204 } 1205 1205 1206 1206 /*
+2 -2
include/linux/io_uring/cmd.h
··· 160 160 } 161 161 162 162 static inline void io_uring_cmd_done(struct io_uring_cmd *ioucmd, s32 ret, 163 - u64 res2, unsigned issue_flags) 163 + unsigned issue_flags) 164 164 { 165 - return __io_uring_cmd_done(ioucmd, ret, res2, issue_flags, false); 165 + return __io_uring_cmd_done(ioucmd, ret, 0, issue_flags, false); 166 166 } 167 167 168 168 static inline void io_uring_cmd_done32(struct io_uring_cmd *ioucmd, s32 ret,