Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'io_uring-7.0-20260305' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull io_uring fixes from Jens Axboe:

- Fix a typo in the mock_file help text

- Fix a comment regarding IORING_SETUP_TASKRUN_FLAG in the
io_uring.h UAPI header

- Use READ_ONCE() for reading refill queue entries

- Reject SEND_VECTORIZED for fixed buffer sends, as it isn't
implemented. Currently this flag is silently ignored

This is in preparation for making these work, but first we
need a fixup so that older kernels will correctly reject them

- Ensure "0" means default for the rx page size

* tag 'io_uring-7.0-20260305' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
io_uring/zcrx: use READ_ONCE with user shared RQEs
io_uring/mock: Fix typo in help text
io_uring/net: reject SEND_VECTORIZED when unsupported
io_uring: correct comment for IORING_SETUP_TASKRUN_FLAG
io_uring/zcrx: don't set rx_page_size when not requested

+10 -5
+2 -1
include/uapi/linux/io_uring.h
··· 188 188 /* 189 189 * If COOP_TASKRUN is set, get notified if task work is available for 190 190 * running and a kernel transition would be needed to run it. This sets 191 - * IORING_SQ_TASKRUN in the sq ring flags. Not valid with COOP_TASKRUN. 191 + * IORING_SQ_TASKRUN in the sq ring flags. Not valid without COOP_TASKRUN 192 + * or DEFER_TASKRUN. 192 193 */ 193 194 #define IORING_SETUP_TASKRUN_FLAG (1U << 9) 194 195 #define IORING_SETUP_SQE128 (1U << 10) /* SQEs are 128 byte */
+1 -1
init/Kconfig
··· 1902 1902 default n 1903 1903 depends on IO_URING 1904 1904 help 1905 - Enable mock files for io_uring subststem testing. The ABI might 1905 + Enable mock files for io_uring subsystem testing. The ABI might 1906 1906 still change, so it's still experimental and should only be enabled 1907 1907 for specific test purposes. 1908 1908
+2
io_uring/net.c
··· 375 375 kmsg->msg.msg_namelen = addr_len; 376 376 } 377 377 if (sr->flags & IORING_RECVSEND_FIXED_BUF) { 378 + if (sr->flags & IORING_SEND_VECTORIZED) 379 + return -EINVAL; 378 380 req->flags |= REQ_F_IMPORT_BUFFER; 379 381 return 0; 380 382 }
+5 -3
io_uring/zcrx.c
··· 837 837 if (ret) 838 838 goto netdev_put_unlock; 839 839 840 - mp_param.rx_page_size = 1U << ifq->niov_shift; 840 + if (reg.rx_buf_len) 841 + mp_param.rx_page_size = 1U << ifq->niov_shift; 841 842 mp_param.mp_ops = &io_uring_pp_zc_ops; 842 843 mp_param.mp_priv = ifq; 843 844 ret = __net_mp_open_rxq(ifq->netdev, reg.if_rxq, &mp_param, NULL); ··· 927 926 struct io_zcrx_ifq *ifq, 928 927 struct net_iov **ret_niov) 929 928 { 929 + __u64 off = READ_ONCE(rqe->off); 930 930 unsigned niov_idx, area_idx; 931 931 struct io_zcrx_area *area; 932 932 933 - area_idx = rqe->off >> IORING_ZCRX_AREA_SHIFT; 934 - niov_idx = (rqe->off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift; 933 + area_idx = off >> IORING_ZCRX_AREA_SHIFT; 934 + niov_idx = (off & ~IORING_ZCRX_AREA_MASK) >> ifq->niov_shift; 935 935 936 936 if (unlikely(rqe->__pad || area_idx)) 937 937 return false;