Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: finish IOU_OK -> IOU_COMPLETE transition

IOU_COMPLETE is more descriptive, in that it explicitly says that the
return value means "please post a completion for this request". This
patch completes the transition from IOU_OK to IOU_COMPLETE, replacing
existing IOU_OK users.

This is a purely mechanical change.

Signed-off-by: Jens Axboe <axboe@kernel.dk>

Jens Axboe 8bb9d6cc f660fd2c

+46 -47
+2 -2
io_uring/advise.c
··· 58 58 59 59 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice); 60 60 io_req_set_res(req, ret, 0); 61 - return IOU_OK; 61 + return IOU_COMPLETE; 62 62 #else 63 63 return -EOPNOTSUPP; 64 64 #endif ··· 104 104 if (ret < 0) 105 105 req_set_fail(req); 106 106 io_req_set_res(req, ret, 0); 107 - return IOU_OK; 107 + return IOU_COMPLETE; 108 108 }
+1 -1
io_uring/cancel.c
··· 229 229 if (ret < 0) 230 230 req_set_fail(req); 231 231 io_req_set_res(req, ret, 0); 232 - return IOU_OK; 232 + return IOU_COMPLETE; 233 233 } 234 234 235 235 static int __io_sync_cancel(struct io_uring_task *tctx,
+2 -2
io_uring/epoll.c
··· 61 61 if (ret < 0) 62 62 req_set_fail(req); 63 63 io_req_set_res(req, ret, 0); 64 - return IOU_OK; 64 + return IOU_COMPLETE; 65 65 } 66 66 67 67 int io_epoll_wait_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 88 88 req_set_fail(req); 89 89 90 90 io_req_set_res(req, ret, 0); 91 - return IOU_OK; 91 + return IOU_COMPLETE; 92 92 }
+5 -5
io_uring/fs.c
··· 90 90 91 91 req->flags &= ~REQ_F_NEED_CLEANUP; 92 92 io_req_set_res(req, ret, 0); 93 - return IOU_OK; 93 + return IOU_COMPLETE; 94 94 } 95 95 96 96 void io_renameat_cleanup(struct io_kiocb *req) ··· 141 141 142 142 req->flags &= ~REQ_F_NEED_CLEANUP; 143 143 io_req_set_res(req, ret, 0); 144 - return IOU_OK; 144 + return IOU_COMPLETE; 145 145 } 146 146 147 147 void io_unlinkat_cleanup(struct io_kiocb *req) ··· 185 185 186 186 req->flags &= ~REQ_F_NEED_CLEANUP; 187 187 io_req_set_res(req, ret, 0); 188 - return IOU_OK; 188 + return IOU_COMPLETE; 189 189 } 190 190 191 191 void io_mkdirat_cleanup(struct io_kiocb *req) ··· 235 235 236 236 req->flags &= ~REQ_F_NEED_CLEANUP; 237 237 io_req_set_res(req, ret, 0); 238 - return IOU_OK; 238 + return IOU_COMPLETE; 239 239 } 240 240 241 241 int io_linkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 281 281 282 282 req->flags &= ~REQ_F_NEED_CLEANUP; 283 283 io_req_set_res(req, ret, 0); 284 - return IOU_OK; 284 + return IOU_COMPLETE; 285 285 } 286 286 287 287 void io_link_cleanup(struct io_kiocb *req)
+3 -3
io_uring/futex.c
··· 234 234 kfree(futexv); 235 235 req->async_data = NULL; 236 236 req->flags &= ~REQ_F_ASYNC_DATA; 237 - return IOU_OK; 237 + return IOU_COMPLETE; 238 238 } 239 239 240 240 /* ··· 311 311 req_set_fail(req); 312 312 io_req_set_res(req, ret, 0); 313 313 kfree(ifd); 314 - return IOU_OK; 314 + return IOU_COMPLETE; 315 315 } 316 316 317 317 int io_futex_wake(struct io_kiocb *req, unsigned int issue_flags) ··· 328 328 if (ret < 0) 329 329 req_set_fail(req); 330 330 io_req_set_res(req, ret, 0); 331 - return IOU_OK; 331 + return IOU_COMPLETE; 332 332 }
+1 -1
io_uring/io_uring.c
··· 1751 1751 1752 1752 ret = __io_issue_sqe(req, issue_flags, def); 1753 1753 1754 - if (ret == IOU_OK) { 1754 + if (ret == IOU_COMPLETE) { 1755 1755 if (issue_flags & IO_URING_F_COMPLETE_DEFER) 1756 1756 io_req_complete_defer(req); 1757 1757 else
-1
io_uring/io_uring.h
··· 19 19 #endif 20 20 21 21 enum { 22 - IOU_OK = 0, /* deprecated, use IOU_COMPLETE */ 23 22 IOU_COMPLETE = 0, 24 23 25 24 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED,
+1 -1
io_uring/kbuf.c
··· 554 554 if (ret < 0) 555 555 req_set_fail(req); 556 556 io_req_set_res(req, ret, 0); 557 - return IOU_OK; 557 + return IOU_COMPLETE; 558 558 } 559 559 560 560 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg)
+1 -1
io_uring/msg_ring.c
··· 328 328 req_set_fail(req); 329 329 } 330 330 io_req_set_res(req, ret, 0); 331 - return IOU_OK; 331 + return IOU_COMPLETE; 332 332 } 333 333 334 334 int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
+7 -7
io_uring/net.c
··· 128 128 129 129 ret = __sys_shutdown_sock(sock, shutdown->how); 130 130 io_req_set_res(req, ret, 0); 131 - return IOU_OK; 131 + return IOU_COMPLETE; 132 132 } 133 133 134 134 static bool io_net_retry(struct socket *sock, int flags) ··· 502 502 /* Otherwise stop bundle and use the current result. */ 503 503 finish: 504 504 io_req_set_res(req, *ret, cflags); 505 - *ret = IOU_OK; 505 + *ret = IOU_COMPLETE; 506 506 return true; 507 507 } 508 508 ··· 553 553 else if (sr->done_io) 554 554 ret = sr->done_io; 555 555 io_req_set_res(req, ret, 0); 556 - return IOU_OK; 556 + return IOU_COMPLETE; 557 557 } 558 558 559 559 static int io_send_select_buffer(struct io_kiocb *req, unsigned int issue_flags, ··· 1459 1459 io_req_msg_cleanup(req, 0); 1460 1460 } 1461 1461 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1462 - return IOU_OK; 1462 + return IOU_COMPLETE; 1463 1463 } 1464 1464 1465 1465 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags) ··· 1530 1530 io_req_msg_cleanup(req, 0); 1531 1531 } 1532 1532 io_req_set_res(req, ret, IORING_CQE_F_MORE); 1533 - return IOU_OK; 1533 + return IOU_COMPLETE; 1534 1534 } 1535 1535 1536 1536 void io_sendrecv_fail(struct io_kiocb *req) ··· 1694 1694 sock->file_slot); 1695 1695 } 1696 1696 io_req_set_res(req, ret, 0); 1697 - return IOU_OK; 1697 + return IOU_COMPLETE; 1698 1698 } 1699 1699 1700 1700 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 1761 1761 req_set_fail(req); 1762 1762 io_req_msg_cleanup(req, issue_flags); 1763 1763 io_req_set_res(req, ret, 0); 1764 - return IOU_OK; 1764 + return IOU_COMPLETE; 1765 1765 } 1766 1766 1767 1767 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+1 -1
io_uring/nop.c
··· 68 68 if (ret < 0) 69 69 req_set_fail(req); 70 70 io_req_set_res(req, nop->result, 0); 71 - return IOU_OK; 71 + return IOU_COMPLETE; 72 72 }
+4 -4
io_uring/openclose.c
··· 171 171 if (ret < 0) 172 172 req_set_fail(req); 173 173 io_req_set_res(req, ret, 0); 174 - return IOU_OK; 174 + return IOU_COMPLETE; 175 175 } 176 176 177 177 int io_openat(struct io_kiocb *req, unsigned int issue_flags) ··· 259 259 if (ret < 0) 260 260 req_set_fail(req); 261 261 io_req_set_res(req, ret, 0); 262 - return IOU_OK; 262 + return IOU_COMPLETE; 263 263 } 264 264 265 265 int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 302 302 if (ret < 0) 303 303 req_set_fail(req); 304 304 io_req_set_res(req, ret, 0); 305 - return IOU_OK; 305 + return IOU_COMPLETE; 306 306 } 307 307 308 308 struct io_pipe { ··· 426 426 427 427 io_req_set_res(req, ret, 0); 428 428 if (!ret) 429 - return IOU_OK; 429 + return IOU_COMPLETE; 430 430 431 431 req_set_fail(req); 432 432 if (files[0])
+2 -2
io_uring/poll.c
··· 893 893 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); 894 894 if (ret > 0) { 895 895 io_req_set_res(req, ipt.result_mask, 0); 896 - return IOU_OK; 896 + return IOU_COMPLETE; 897 897 } 898 898 return ret ?: IOU_ISSUE_SKIP_COMPLETE; 899 899 } ··· 948 948 } 949 949 /* complete update request, we're done with it */ 950 950 io_req_set_res(req, ret, 0); 951 - return IOU_OK; 951 + return IOU_COMPLETE; 952 952 }
+1 -1
io_uring/rsrc.c
··· 500 500 if (ret < 0) 501 501 req_set_fail(req); 502 502 io_req_set_res(req, ret, 0); 503 - return IOU_OK; 503 + return IOU_COMPLETE; 504 504 } 505 505 506 506 void io_free_rsrc_node(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
+1 -1
io_uring/rw.c
··· 660 660 io_req_io_end(req); 661 661 io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags)); 662 662 io_req_rw_cleanup(req, issue_flags); 663 - return IOU_OK; 663 + return IOU_COMPLETE; 664 664 } else { 665 665 io_rw_done(req, ret); 666 666 }
+2 -2
io_uring/splice.c
··· 103 103 if (ret != sp->len) 104 104 req_set_fail(req); 105 105 io_req_set_res(req, ret, 0); 106 - return IOU_OK; 106 + return IOU_COMPLETE; 107 107 } 108 108 109 109 int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 144 144 if (ret != sp->len) 145 145 req_set_fail(req); 146 146 io_req_set_res(req, ret, 0); 147 - return IOU_OK; 147 + return IOU_COMPLETE; 148 148 }
+1 -1
io_uring/statx.c
··· 59 59 60 60 ret = do_statx(sx->dfd, sx->filename, sx->flags, sx->mask, sx->buffer); 61 61 io_req_set_res(req, ret, 0); 62 - return IOU_OK; 62 + return IOU_COMPLETE; 63 63 } 64 64 65 65 void io_statx_cleanup(struct io_kiocb *req)
+3 -3
io_uring/sync.c
··· 47 47 48 48 ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); 49 49 io_req_set_res(req, ret, 0); 50 - return IOU_OK; 50 + return IOU_COMPLETE; 51 51 } 52 52 53 53 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 79 79 ret = vfs_fsync_range(req->file, sync->off, end > 0 ? end : LLONG_MAX, 80 80 sync->flags & IORING_FSYNC_DATASYNC); 81 81 io_req_set_res(req, ret, 0); 82 - return IOU_OK; 82 + return IOU_COMPLETE; 83 83 } 84 84 85 85 int io_fallocate_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ··· 108 108 if (ret >= 0) 109 109 fsnotify_modify(req->file); 110 110 io_req_set_res(req, ret, 0); 111 - return IOU_OK; 111 + return IOU_COMPLETE; 112 112 }
+1 -1
io_uring/timeout.c
··· 505 505 if (ret < 0) 506 506 req_set_fail(req); 507 507 io_req_set_res(req, ret, 0); 508 - return IOU_OK; 508 + return IOU_COMPLETE; 509 509 } 510 510 511 511 static int __io_timeout_prep(struct io_kiocb *req,
+1 -1
io_uring/truncate.c
··· 44 44 ret = do_ftruncate(req->file, ft->len, 1); 45 45 46 46 io_req_set_res(req, ret, 0); 47 - return IOU_OK; 47 + return IOU_COMPLETE; 48 48 }
+1 -1
io_uring/uring_cmd.c
··· 265 265 req_set_fail(req); 266 266 io_req_uring_cleanup(req, issue_flags); 267 267 io_req_set_res(req, ret, 0); 268 - return IOU_OK; 268 + return IOU_COMPLETE; 269 269 } 270 270 271 271 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
+1 -1
io_uring/waitid.c
··· 323 323 if (ret < 0) 324 324 req_set_fail(req); 325 325 io_req_set_res(req, ret, 0); 326 - return IOU_OK; 326 + return IOU_COMPLETE; 327 327 }
+4 -4
io_uring/xattr.c
··· 109 109 110 110 ret = file_getxattr(req->file, &ix->ctx); 111 111 io_xattr_finish(req, ret); 112 - return IOU_OK; 112 + return IOU_COMPLETE; 113 113 } 114 114 115 115 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags) ··· 122 122 ret = filename_getxattr(AT_FDCWD, ix->filename, LOOKUP_FOLLOW, &ix->ctx); 123 123 ix->filename = NULL; 124 124 io_xattr_finish(req, ret); 125 - return IOU_OK; 125 + return IOU_COMPLETE; 126 126 } 127 127 128 128 static int __io_setxattr_prep(struct io_kiocb *req, ··· 190 190 191 191 ret = file_setxattr(req->file, &ix->ctx); 192 192 io_xattr_finish(req, ret); 193 - return IOU_OK; 193 + return IOU_COMPLETE; 194 194 } 195 195 196 196 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags) ··· 203 203 ret = filename_setxattr(AT_FDCWD, ix->filename, LOOKUP_FOLLOW, &ix->ctx); 204 204 ix->filename = NULL; 205 205 io_xattr_finish(req, ret); 206 - return IOU_OK; 206 + return IOU_COMPLETE; 207 207 }