Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: add struct io_cold_def->sqe_copy() method

Will be called by the core of io_uring, if inline issue is not going
to be tried for a request. Opcodes can define this handler to defer
copying of SQE data that should remain stable.

Only called if IO_URING_F_INLINE is set. If it isn't set, then there's a
bug in the core handling of this, and -EFAULT will be returned instead
to terminate the request. This will trigger a WARN_ON_ONCE(). Don't
expect this to ever trigger, and down the line this can be removed.

Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

Jens Axboe af19388a 4d811e39

+29 -2
+3
include/linux/io_uring_types.h
··· 504 504 REQ_F_BUF_NODE_BIT, 505 505 REQ_F_HAS_METADATA_BIT, 506 506 REQ_F_IMPORT_BUFFER_BIT, 507 + REQ_F_SQE_COPIED_BIT, 507 508 508 509 /* not a real bit, just to check we're not overflowing the space */ 509 510 __REQ_F_LAST_BIT, ··· 594 593 * For SEND_ZC, whether to import buffers (i.e. the first issue). 595 594 */ 596 595 REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT), 596 + /* ->sqe_copy() has been called, if necessary */ 597 + REQ_F_SQE_COPIED = IO_REQ_FLAG(REQ_F_SQE_COPIED_BIT), 597 598 }; 598 599 599 600 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, io_tw_token_t tw);
+25 -2
io_uring/io_uring.c
··· 1938 1938 return file; 1939 1939 } 1940 1940 1941 - static void io_queue_async(struct io_kiocb *req, int ret) 1941 + static int io_req_sqe_copy(struct io_kiocb *req, unsigned int issue_flags) 1942 + { 1943 + const struct io_cold_def *def = &io_cold_defs[req->opcode]; 1944 + 1945 + if (req->flags & REQ_F_SQE_COPIED) 1946 + return 0; 1947 + req->flags |= REQ_F_SQE_COPIED; 1948 + if (!def->sqe_copy) 1949 + return 0; 1950 + if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_INLINE))) 1951 + return -EFAULT; 1952 + def->sqe_copy(req); 1953 + return 0; 1954 + } 1955 + 1956 + static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret) 1942 1957 __must_hold(&req->ctx->uring_lock) 1943 1958 { 1944 1959 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { 1960 + fail: 1945 1961 io_req_defer_failed(req, ret); 1946 1962 return; 1947 1963 } 1964 + 1965 + ret = io_req_sqe_copy(req, issue_flags); 1966 + if (unlikely(ret)) 1967 + goto fail; 1948 1968 1949 1969 switch (io_arm_poll_handler(req, 0)) { 1950 1970 case IO_APOLL_READY: ··· 1994 1974 * doesn't support non-blocking read/write attempts 1995 1975 */ 1996 1976 if (unlikely(ret)) 1997 - io_queue_async(req, ret); 1977 + io_queue_async(req, issue_flags, ret); 1998 1978 } 1999 1979 2000 1980 static void io_queue_sqe_fallback(struct io_kiocb *req) ··· 2009 1989 req->flags |= REQ_F_LINK; 2010 1990 io_req_defer_failed(req, req->cqe.res); 2011 1991 } else { 1992 + /* can't fail with IO_URING_F_INLINE */ 1993 + io_req_sqe_copy(req, IO_URING_F_INLINE); 2012 1994 if (unlikely(req->ctx->drain_active)) 2013 1995 io_drain_req(req); 2014 1996 else ··· 2222 2200 */ 2223 2201 if (unlikely(link->head)) { 2224 2202 trace_io_uring_link(req, link->last); 2203 + io_req_sqe_copy(req, IO_URING_F_INLINE); 2225 2204 link->last->link = req; 2226 2205 link->last = req; 2227 2206
+1
io_uring/opdef.h
··· 38 38 struct io_cold_def { 39 39 const char *name; 40 40 41 + void (*sqe_copy)(struct io_kiocb *); 41 42 void (*cleanup)(struct io_kiocb *); 42 43 void (*fail)(struct io_kiocb *); 43 44 };