Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

selftests/net: Extract uring helpers to be reusable

Instead of defining basic io_uring functions in the test case, move them
to a common directory, so, other tests can use them.

This simplify the test code and reuse the common liburing
infrastructure. This is basically a copy of what we have in
io_uring_zerocopy_tx with some minor improvements to make checkpatch
happy.

A follow-up test will use the same helpers in a BPF sockopt test.

Signed-off-by: Breno Leitao <leitao@debian.org>
Link: https://lore.kernel.org/r/20231016134750.1381153-8-leitao@debian.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Breno Leitao and committed by
Jens Axboe
ba6e0e5c 7746a6ad

+285 -266
+282
tools/include/io_uring/mini_liburing.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + 3 + #include <linux/io_uring.h> 4 + #include <sys/mman.h> 5 + #include <sys/syscall.h> 6 + #include <stdio.h> 7 + #include <string.h> 8 + #include <unistd.h> 9 + 10 + struct io_sq_ring { 11 + unsigned int *head; 12 + unsigned int *tail; 13 + unsigned int *ring_mask; 14 + unsigned int *ring_entries; 15 + unsigned int *flags; 16 + unsigned int *array; 17 + }; 18 + 19 + struct io_cq_ring { 20 + unsigned int *head; 21 + unsigned int *tail; 22 + unsigned int *ring_mask; 23 + unsigned int *ring_entries; 24 + struct io_uring_cqe *cqes; 25 + }; 26 + 27 + struct io_uring_sq { 28 + unsigned int *khead; 29 + unsigned int *ktail; 30 + unsigned int *kring_mask; 31 + unsigned int *kring_entries; 32 + unsigned int *kflags; 33 + unsigned int *kdropped; 34 + unsigned int *array; 35 + struct io_uring_sqe *sqes; 36 + 37 + unsigned int sqe_head; 38 + unsigned int sqe_tail; 39 + 40 + size_t ring_sz; 41 + }; 42 + 43 + struct io_uring_cq { 44 + unsigned int *khead; 45 + unsigned int *ktail; 46 + unsigned int *kring_mask; 47 + unsigned int *kring_entries; 48 + unsigned int *koverflow; 49 + struct io_uring_cqe *cqes; 50 + 51 + size_t ring_sz; 52 + }; 53 + 54 + struct io_uring { 55 + struct io_uring_sq sq; 56 + struct io_uring_cq cq; 57 + int ring_fd; 58 + }; 59 + 60 + #if defined(__x86_64) || defined(__i386__) 61 + #define read_barrier() __asm__ __volatile__("":::"memory") 62 + #define write_barrier() __asm__ __volatile__("":::"memory") 63 + #else 64 + #define read_barrier() __sync_synchronize() 65 + #define write_barrier() __sync_synchronize() 66 + #endif 67 + 68 + static inline int io_uring_mmap(int fd, struct io_uring_params *p, 69 + struct io_uring_sq *sq, struct io_uring_cq *cq) 70 + { 71 + size_t size; 72 + void *ptr; 73 + int ret; 74 + 75 + sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned int); 76 + ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, 77 + MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); 78 + if (ptr == MAP_FAILED) 79 + return -errno; 80 + sq->khead = ptr + p->sq_off.head; 81 + sq->ktail = ptr + p->sq_off.tail; 82 + sq->kring_mask = ptr + p->sq_off.ring_mask; 83 + sq->kring_entries = ptr + p->sq_off.ring_entries; 84 + sq->kflags = ptr + p->sq_off.flags; 85 + sq->kdropped = ptr + p->sq_off.dropped; 86 + sq->array = ptr + p->sq_off.array; 87 + 88 + size = p->sq_entries * sizeof(struct io_uring_sqe); 89 + sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, 90 + MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES); 91 + if (sq->sqes == MAP_FAILED) { 92 + ret = -errno; 93 + err: 94 + munmap(sq->khead, sq->ring_sz); 95 + return ret; 96 + } 97 + 98 + cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); 99 + ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE, 100 + MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); 101 + if (ptr == MAP_FAILED) { 102 + ret = -errno; 103 + munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe)); 104 + goto err; 105 + } 106 + cq->khead = ptr + p->cq_off.head; 107 + cq->ktail = ptr + p->cq_off.tail; 108 + cq->kring_mask = ptr + p->cq_off.ring_mask; 109 + cq->kring_entries = ptr + p->cq_off.ring_entries; 110 + cq->koverflow = ptr + p->cq_off.overflow; 111 + cq->cqes = ptr + p->cq_off.cqes; 112 + return 0; 113 + } 114 + 115 + static inline int io_uring_setup(unsigned int entries, 116 + struct io_uring_params *p) 117 + { 118 + return syscall(__NR_io_uring_setup, entries, p); 119 + } 120 + 121 + static inline int io_uring_enter(int fd, unsigned int to_submit, 122 + unsigned int min_complete, 123 + unsigned int flags, sigset_t *sig) 124 + { 125 + return syscall(__NR_io_uring_enter, fd, to_submit, min_complete, 126 + flags, sig, _NSIG / 8); 127 + } 128 + 129 + static inline int io_uring_queue_init(unsigned int entries, 130 + struct io_uring *ring, 131 + unsigned int flags) 132 + { 133 + struct io_uring_params p; 134 + int fd, ret; 135 + 136 + memset(ring, 0, sizeof(*ring)); 137 + memset(&p, 0, sizeof(p)); 138 + p.flags = flags; 139 + 140 + fd = io_uring_setup(entries, &p); 141 + if (fd < 0) 142 + return fd; 143 + ret = io_uring_mmap(fd, &p, &ring->sq, &ring->cq); 144 + if (!ret) 145 + ring->ring_fd = fd; 146 + else 147 + close(fd); 148 + return ret; 149 + } 150 + 151 + /* Get a sqe */ 152 + static inline struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) 153 + { 154 + struct io_uring_sq *sq = &ring->sq; 155 + 156 + if (sq->sqe_tail + 1 - sq->sqe_head > *sq->kring_entries) 157 + return NULL; 158 + return &sq->sqes[sq->sqe_tail++ & *sq->kring_mask]; 159 + } 160 + 161 + static inline int io_uring_wait_cqe(struct io_uring *ring, 162 + struct io_uring_cqe **cqe_ptr) 163 + { 164 + struct io_uring_cq *cq = &ring->cq; 165 + const unsigned int mask = *cq->kring_mask; 166 + unsigned int head = *cq->khead; 167 + int ret; 168 + 169 + *cqe_ptr = NULL; 170 + do { 171 + read_barrier(); 172 + if (head != *cq->ktail) { 173 + *cqe_ptr = &cq->cqes[head & mask]; 174 + break; 175 + } 176 + ret = io_uring_enter(ring->ring_fd, 0, 1, 177 + IORING_ENTER_GETEVENTS, NULL); 178 + if (ret < 0) 179 + return -errno; 180 + } while (1); 181 + 182 + return 0; 183 + } 184 + 185 + static inline int io_uring_submit(struct io_uring *ring) 186 + { 187 + struct io_uring_sq *sq = &ring->sq; 188 + const unsigned int mask = *sq->kring_mask; 189 + unsigned int ktail, submitted, to_submit; 190 + int ret; 191 + 192 + read_barrier(); 193 + if (*sq->khead != *sq->ktail) { 194 + submitted = *sq->kring_entries; 195 + goto submit; 196 + } 197 + if (sq->sqe_head == sq->sqe_tail) 198 + return 0; 199 + 200 + ktail = *sq->ktail; 201 + to_submit = sq->sqe_tail - sq->sqe_head; 202 + for (submitted = 0; submitted < to_submit; submitted++) { 203 + read_barrier(); 204 + sq->array[ktail++ & mask] = sq->sqe_head++ & mask; 205 + } 206 + if (!submitted) 207 + return 0; 208 + 209 + if (*sq->ktail != ktail) { 210 + write_barrier(); 211 + *sq->ktail = ktail; 212 + write_barrier(); 213 + } 214 + submit: 215 + ret = io_uring_enter(ring->ring_fd, submitted, 0, 216 + IORING_ENTER_GETEVENTS, NULL); 217 + return ret < 0 ? -errno : ret; 218 + } 219 + 220 + static inline void io_uring_queue_exit(struct io_uring *ring) 221 + { 222 + struct io_uring_sq *sq = &ring->sq; 223 + 224 + munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe)); 225 + munmap(sq->khead, sq->ring_sz); 226 + close(ring->ring_fd); 227 + } 228 + 229 + /* Prepare and send the SQE */ 230 + static inline void io_uring_prep_cmd(struct io_uring_sqe *sqe, int op, 231 + int sockfd, 232 + int level, int optname, 233 + const void *optval, 234 + int optlen) 235 + { 236 + memset(sqe, 0, sizeof(*sqe)); 237 + sqe->opcode = (__u8)IORING_OP_URING_CMD; 238 + sqe->fd = sockfd; 239 + sqe->cmd_op = op; 240 + 241 + sqe->level = level; 242 + sqe->optname = optname; 243 + sqe->optval = (unsigned long long)optval; 244 + sqe->optlen = optlen; 245 + } 246 + 247 + static inline int io_uring_register_buffers(struct io_uring *ring, 248 + const struct iovec *iovecs, 249 + unsigned int nr_iovecs) 250 + { 251 + int ret; 252 + 253 + ret = syscall(__NR_io_uring_register, ring->ring_fd, 254 + IORING_REGISTER_BUFFERS, iovecs, nr_iovecs); 255 + return (ret < 0) ? -errno : ret; 256 + } 257 + 258 + static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd, 259 + const void *buf, size_t len, int flags) 260 + { 261 + memset(sqe, 0, sizeof(*sqe)); 262 + sqe->opcode = (__u8)IORING_OP_SEND; 263 + sqe->fd = sockfd; 264 + sqe->addr = (unsigned long)buf; 265 + sqe->len = len; 266 + sqe->msg_flags = (__u32)flags; 267 + } 268 + 269 + static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd, 270 + const void *buf, size_t len, int flags, 271 + unsigned int zc_flags) 272 + { 273 + io_uring_prep_send(sqe, sockfd, buf, len, flags); 274 + sqe->opcode = (__u8)IORING_OP_SEND_ZC; 275 + sqe->ioprio = zc_flags; 276 + } 277 + 278 + static inline void io_uring_cqe_seen(struct io_uring *ring) 279 + { 280 + *(&ring->cq)->khead += 1; 281 + write_barrier(); 282 + }
+1
tools/testing/selftests/net/Makefile
··· 98 98 $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread -lcrypto 99 99 $(OUTPUT)/tcp_inq: LDLIBS += -lpthread 100 100 $(OUTPUT)/bind_bhash: LDLIBS += -lpthread 101 + $(OUTPUT)/io_uring_zerocopy_tx: CFLAGS += -I../../../include/ 101 102 102 103 # Rules to generate bpf obj nat6to4.o 103 104 CLANG ?= clang
+2 -266
tools/testing/selftests/net/io_uring_zerocopy_tx.c
··· 36 36 #include <sys/un.h> 37 37 #include <sys/wait.h> 38 38 39 + #include <io_uring/mini_liburing.h> 40 + 39 41 #define NOTIF_TAG 0xfffffffULL 40 42 #define NONZC_TAG 0 41 43 #define ZC_TAG 1 ··· 61 59 static struct sockaddr_storage cfg_dst_addr; 62 60 63 61 static char payload[IP_MAXPACKET] __attribute__((aligned(4096))); 64 - 65 - struct io_sq_ring { 66 - unsigned *head; 67 - unsigned *tail; 68 - unsigned *ring_mask; 69 - unsigned *ring_entries; 70 - unsigned *flags; 71 - unsigned *array; 72 - }; 73 - 74 - struct io_cq_ring { 75 - unsigned *head; 76 - unsigned *tail; 77 - unsigned *ring_mask; 78 - unsigned *ring_entries; 79 - struct io_uring_cqe *cqes; 80 - }; 81 - 82 - struct io_uring_sq { 83 - unsigned *khead; 84 - unsigned *ktail; 85 - unsigned *kring_mask; 86 - unsigned *kring_entries; 87 - unsigned *kflags; 88 - unsigned *kdropped; 89 - unsigned *array; 90 - struct io_uring_sqe *sqes; 91 - 92 - unsigned sqe_head; 93 - unsigned sqe_tail; 94 - 95 - size_t ring_sz; 96 - }; 97 - 98 - struct io_uring_cq { 99 - unsigned *khead; 100 - unsigned *ktail; 101 - unsigned *kring_mask; 102 - unsigned *kring_entries; 103 - unsigned *koverflow; 104 - struct io_uring_cqe *cqes; 105 - 106 - size_t ring_sz; 107 - }; 108 - 109 - struct io_uring { 110 - struct io_uring_sq sq; 111 - struct io_uring_cq cq; 112 - int ring_fd; 113 - }; 114 - 115 - #ifdef __alpha__ 116 - # ifndef __NR_io_uring_setup 117 - # define __NR_io_uring_setup 535 118 - # endif 119 - # ifndef __NR_io_uring_enter 120 - # define __NR_io_uring_enter 536 121 - # endif 122 - # ifndef __NR_io_uring_register 123 - # define __NR_io_uring_register 537 124 - # endif 125 - #else /* !__alpha__ */ 126 - # ifndef __NR_io_uring_setup 127 - # define __NR_io_uring_setup 425 128 - # endif 129 - # ifndef __NR_io_uring_enter 130 - # define __NR_io_uring_enter 426 131 - # endif 132 - # ifndef __NR_io_uring_register 133 - # define __NR_io_uring_register 427 134 - # endif 135 - #endif 136 - 137 - #if defined(__x86_64) || defined(__i386__) 138 - #define read_barrier() __asm__ __volatile__("":::"memory") 139 - #define write_barrier() __asm__ __volatile__("":::"memory") 140 - #else 141 - 142 - #define read_barrier() __sync_synchronize() 143 - #define write_barrier() __sync_synchronize() 144 - #endif 145 - 146 - static int io_uring_setup(unsigned int entries, struct io_uring_params *p) 147 - { 148 - return syscall(__NR_io_uring_setup, entries, p); 149 - } 150 - 151 - static int io_uring_enter(int fd, unsigned int to_submit, 152 - unsigned int min_complete, 153 - unsigned int flags, sigset_t *sig) 154 - { 155 - return syscall(__NR_io_uring_enter, fd, to_submit, min_complete, 156 - flags, sig, _NSIG / 8); 157 - } 158 - 159 - static int io_uring_register_buffers(struct io_uring *ring, 160 - const struct iovec *iovecs, 161 - unsigned nr_iovecs) 162 - { 163 - int ret; 164 - 165 - ret = syscall(__NR_io_uring_register, ring->ring_fd, 166 - IORING_REGISTER_BUFFERS, iovecs, nr_iovecs); 167 - return (ret < 0) ? -errno : ret; 168 - } 169 - 170 - static int io_uring_mmap(int fd, struct io_uring_params *p, 171 - struct io_uring_sq *sq, struct io_uring_cq *cq) 172 - { 173 - size_t size; 174 - void *ptr; 175 - int ret; 176 - 177 - sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned); 178 - ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, 179 - MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQ_RING); 180 - if (ptr == MAP_FAILED) 181 - return -errno; 182 - sq->khead = ptr + p->sq_off.head; 183 - sq->ktail = ptr + p->sq_off.tail; 184 - sq->kring_mask = ptr + p->sq_off.ring_mask; 185 - sq->kring_entries = ptr + p->sq_off.ring_entries; 186 - sq->kflags = ptr + p->sq_off.flags; 187 - sq->kdropped = ptr + p->sq_off.dropped; 188 - sq->array = ptr + p->sq_off.array; 189 - 190 - size = p->sq_entries * sizeof(struct io_uring_sqe); 191 - sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, 192 - MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_SQES); 193 - if (sq->sqes == MAP_FAILED) { 194 - ret = -errno; 195 - err: 196 - munmap(sq->khead, sq->ring_sz); 197 - return ret; 198 - } 199 - 200 - cq->ring_sz = p->cq_off.cqes + p->cq_entries * sizeof(struct io_uring_cqe); 201 - ptr = mmap(0, cq->ring_sz, PROT_READ | PROT_WRITE, 202 - MAP_SHARED | MAP_POPULATE, fd, IORING_OFF_CQ_RING); 203 - if (ptr == MAP_FAILED) { 204 - ret = -errno; 205 - munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe)); 206 - goto err; 207 - } 208 - cq->khead = ptr + p->cq_off.head; 209 - cq->ktail = ptr + p->cq_off.tail; 210 - cq->kring_mask = ptr + p->cq_off.ring_mask; 211 - cq->kring_entries = ptr + p->cq_off.ring_entries; 212 - cq->koverflow = ptr + p->cq_off.overflow; 213 - cq->cqes = ptr + p->cq_off.cqes; 214 - return 0; 215 - } 216 - 217 - static int io_uring_queue_init(unsigned entries, struct io_uring *ring, 218 - unsigned flags) 219 - { 220 - struct io_uring_params p; 221 - int fd, ret; 222 - 223 - memset(ring, 0, sizeof(*ring)); 224 - memset(&p, 0, sizeof(p)); 225 - p.flags = flags; 226 - 227 - fd = io_uring_setup(entries, &p); 228 - if (fd < 0) 229 - return fd; 230 - ret = io_uring_mmap(fd, &p, &ring->sq, &ring->cq); 231 - if (!ret) 232 - ring->ring_fd = fd; 233 - else 234 - close(fd); 235 - return ret; 236 - } 237 - 238 - static int io_uring_submit(struct io_uring *ring) 239 - { 240 - struct io_uring_sq *sq = &ring->sq; 241 - const unsigned mask = *sq->kring_mask; 242 - unsigned ktail, submitted, to_submit; 243 - int ret; 244 - 245 - read_barrier(); 246 - if (*sq->khead != *sq->ktail) { 247 - submitted = *sq->kring_entries; 248 - goto submit; 249 - } 250 - if (sq->sqe_head == sq->sqe_tail) 251 - return 0; 252 - 253 - ktail = *sq->ktail; 254 - to_submit = sq->sqe_tail - sq->sqe_head; 255 - for (submitted = 0; submitted < to_submit; submitted++) { 256 - read_barrier(); 257 - sq->array[ktail++ & mask] = sq->sqe_head++ & mask; 258 - } 259 - if (!submitted) 260 - return 0; 261 - 262 - if (*sq->ktail != ktail) { 263 - write_barrier(); 264 - *sq->ktail = ktail; 265 - write_barrier(); 266 - } 267 - submit: 268 - ret = io_uring_enter(ring->ring_fd, submitted, 0, 269 - IORING_ENTER_GETEVENTS, NULL); 270 - return ret < 0 ? -errno : ret; 271 - } 272 - 273 - static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd, 274 - const void *buf, size_t len, int flags) 275 - { 276 - memset(sqe, 0, sizeof(*sqe)); 277 - sqe->opcode = (__u8) IORING_OP_SEND; 278 - sqe->fd = sockfd; 279 - sqe->addr = (unsigned long) buf; 280 - sqe->len = len; 281 - sqe->msg_flags = (__u32) flags; 282 - } 283 - 284 - static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd, 285 - const void *buf, size_t len, int flags, 286 - unsigned zc_flags) 287 - { 288 - io_uring_prep_send(sqe, sockfd, buf, len, flags); 289 - sqe->opcode = (__u8) IORING_OP_SEND_ZC; 290 - sqe->ioprio = zc_flags; 291 - } 292 - 293 - static struct io_uring_sqe *io_uring_get_sqe(struct io_uring *ring) 294 - { 295 - struct io_uring_sq *sq = &ring->sq; 296 - 297 - if (sq->sqe_tail + 1 - sq->sqe_head > *sq->kring_entries) 298 - return NULL; 299 - return &sq->sqes[sq->sqe_tail++ & *sq->kring_mask]; 300 - } 301 - 302 - static int io_uring_wait_cqe(struct io_uring *ring, struct io_uring_cqe **cqe_ptr) 303 - { 304 - struct io_uring_cq *cq = &ring->cq; 305 - const unsigned mask = *cq->kring_mask; 306 - unsigned head = *cq->khead; 307 - int ret; 308 - 309 - *cqe_ptr = NULL; 310 - do { 311 - read_barrier(); 312 - if (head != *cq->ktail) { 313 - *cqe_ptr = &cq->cqes[head & mask]; 314 - break; 315 - } 316 - ret = io_uring_enter(ring->ring_fd, 0, 1, 317 - IORING_ENTER_GETEVENTS, NULL); 318 - if (ret < 0) 319 - return -errno; 320 - } while (1); 321 - 322 - return 0; 323 - } 324 - 325 - static inline void io_uring_cqe_seen(struct io_uring *ring) 326 - { 327 - *(&ring->cq)->khead += 1; 328 - write_barrier(); 329 - } 330 62 331 63 static unsigned long gettimeofday_ms(void) 332 64 {