Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring/bpf_filter: move filter size and populate helper into struct

Rather than open-code this logic in io_uring_populate_bpf_ctx() with
a switch, move it to the issue side definitions. Outside of making this
easier to extend in the future, it's also a prep patch for using the
pdu size for a given opcode filter elsewhere.

Signed-off-by: Jens Axboe <axboe@kernel.dk>

+18 -11
+6 -11
io_uring/bpf_filter.c
··· 26 26 static void io_uring_populate_bpf_ctx(struct io_uring_bpf_ctx *bctx, 27 27 struct io_kiocb *req) 28 28 { 29 + const struct io_issue_def *def = &io_issue_defs[req->opcode]; 30 + 29 31 bctx->opcode = req->opcode; 30 32 bctx->sqe_flags = (__force int) req->flags & SQE_VALID_FLAGS; 31 33 bctx->user_data = req->cqe.user_data; ··· 36 34 sizeof(*bctx) - offsetof(struct io_uring_bpf_ctx, pdu_size)); 37 35 38 36 /* 39 - * Opcodes can provide a handler fo populating more data into bctx, 37 + * Opcodes can provide a handler for populating more data into bctx, 40 38 * for filters to use. 41 39 */ 42 - switch (req->opcode) { 43 - case IORING_OP_SOCKET: 44 - bctx->pdu_size = sizeof(bctx->socket); 45 - io_socket_bpf_populate(bctx, req); 46 - break; 47 - case IORING_OP_OPENAT: 48 - case IORING_OP_OPENAT2: 49 - bctx->pdu_size = sizeof(bctx->open); 50 - io_openat_bpf_populate(bctx, req); 51 - break; 40 + if (def->filter_pdu_size) { 41 + bctx->pdu_size = def->filter_pdu_size; 42 + def->filter_populate(bctx, req); 52 43 } 53 44 } 54 45
+6
io_uring/opdef.c
··· 221 221 .issue = io_fallocate, 222 222 }, 223 223 [IORING_OP_OPENAT] = { 224 + .filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, open), 224 225 .prep = io_openat_prep, 225 226 .issue = io_openat, 227 + .filter_populate = io_openat_bpf_populate, 226 228 }, 227 229 [IORING_OP_CLOSE] = { 228 230 .prep = io_close_prep, ··· 311 309 #endif 312 310 }, 313 311 [IORING_OP_OPENAT2] = { 312 + .filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, open), 314 313 .prep = io_openat2_prep, 315 314 .issue = io_openat2, 315 + .filter_populate = io_openat_bpf_populate, 316 316 }, 317 317 [IORING_OP_EPOLL_CTL] = { 318 318 .unbound_nonreg_file = 1, ··· 410 406 [IORING_OP_SOCKET] = { 411 407 .audit_skip = 1, 412 408 #if defined(CONFIG_NET) 409 + .filter_pdu_size = sizeof_field(struct io_uring_bpf_ctx, socket), 413 410 .prep = io_socket_prep, 414 411 .issue = io_socket, 412 + .filter_populate = io_socket_bpf_populate, 415 413 #else 416 414 .prep = io_eopnotsupp_prep, 417 415 #endif
+6
io_uring/opdef.h
··· 2 2 #ifndef IOU_OP_DEF_H 3 3 #define IOU_OP_DEF_H 4 4 5 + struct io_uring_bpf_ctx; 6 + 5 7 struct io_issue_def { 6 8 /* needs req->file assigned */ 7 9 unsigned needs_file : 1; ··· 35 33 /* size of async data needed, if any */ 36 34 unsigned short async_size; 37 35 36 + /* bpf filter pdu size, if any */ 37 + unsigned short filter_pdu_size; 38 + 38 39 int (*issue)(struct io_kiocb *, unsigned int); 39 40 int (*prep)(struct io_kiocb *, const struct io_uring_sqe *); 41 + void (*filter_populate)(struct io_uring_bpf_ctx *, struct io_kiocb *); 40 42 }; 41 43 42 44 struct io_cold_def {