Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: temporarily disable registered waits

Disable wait argument registration as it'll be replaced with a more
generic feature. We'll still need IORING_ENTER_EXT_ARG_REG parsing
in a few commits so leave it be.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/70b1d1d218c41ba77a76d1789c8641dab0b0563e.1731689588.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Pavel Begunkov and committed by
Jens Axboe
83e04152 3730aebb

-106
-10
include/linux/io_uring_types.h
··· 330 330 atomic_t cq_wait_nr; 331 331 atomic_t cq_timeouts; 332 332 struct wait_queue_head cq_wait; 333 - 334 - /* 335 - * If registered with IORING_REGISTER_CQWAIT_REG, a single 336 - * page holds N entries, mapped in cq_wait_arg. cq_wait_index 337 - * is the maximum allowable index. 338 - */ 339 - struct io_uring_reg_wait *cq_wait_arg; 340 - unsigned char cq_wait_index; 341 333 } ____cacheline_aligned_in_smp; 342 334 343 335 /* timeouts */ ··· 423 431 unsigned short n_sqe_pages; 424 432 struct page **ring_pages; 425 433 struct page **sqe_pages; 426 - 427 - struct page **cq_wait_page; 428 434 }; 429 435 430 436 struct io_tw_state {
-3
include/uapi/linux/io_uring.h
··· 627 627 /* resize CQ ring */ 628 628 IORING_REGISTER_RESIZE_RINGS = 33, 629 629 630 - /* register fixed io_uring_reg_wait arguments */ 631 - IORING_REGISTER_CQWAIT_REG = 34, 632 - 633 630 /* this goes last */ 634 631 IORING_REGISTER_LAST, 635 632
-10
io_uring/io_uring.c
··· 2709 2709 io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free); 2710 2710 io_futex_cache_free(ctx); 2711 2711 io_destroy_buffers(ctx); 2712 - io_unregister_cqwait_reg(ctx); 2713 2712 mutex_unlock(&ctx->uring_lock); 2714 2713 if (ctx->sq_creds) 2715 2714 put_cred(ctx->sq_creds); ··· 3194 3195 static struct io_uring_reg_wait *io_get_ext_arg_reg(struct io_ring_ctx *ctx, 3195 3196 const struct io_uring_getevents_arg __user *uarg) 3196 3197 { 3197 - struct io_uring_reg_wait *arg = READ_ONCE(ctx->cq_wait_arg); 3198 - 3199 - if (arg) { 3200 - unsigned int index = (unsigned int) (uintptr_t) uarg; 3201 - 3202 - if (index <= ctx->cq_wait_index) 3203 - return arg + index; 3204 - } 3205 - 3206 3198 return ERR_PTR(-EFAULT); 3207 3199 } 3208 3200
-82
io_uring/register.c
··· 570 570 return ret; 571 571 } 572 572 573 - void io_unregister_cqwait_reg(struct io_ring_ctx *ctx) 574 - { 575 - unsigned short npages = 1; 576 - 577 - if (!ctx->cq_wait_page) 578 - return; 579 - 580 - io_pages_unmap(ctx->cq_wait_arg, &ctx->cq_wait_page, &npages, true); 581 - ctx->cq_wait_arg = NULL; 582 - if (ctx->user) 583 - __io_unaccount_mem(ctx->user, 1); 584 - } 585 - 586 - /* 587 - * Register a page holding N entries of struct io_uring_reg_wait, which can 588 - * be used via io_uring_enter(2) if IORING_GETEVENTS_EXT_ARG_REG is set. 589 - * If that is set with IORING_GETEVENTS_EXT_ARG, then instead of passing 590 - * in a pointer for a struct io_uring_getevents_arg, an index into this 591 - * registered array is passed, avoiding two (arg + timeout) copies per 592 - * invocation. 593 - */ 594 - static int io_register_cqwait_reg(struct io_ring_ctx *ctx, void __user *uarg) 595 - { 596 - struct io_uring_cqwait_reg_arg arg; 597 - struct io_uring_reg_wait *reg; 598 - struct page **pages; 599 - unsigned long len; 600 - int nr_pages, poff; 601 - int ret; 602 - 603 - if (ctx->cq_wait_page || ctx->cq_wait_arg) 604 - return -EBUSY; 605 - if (copy_from_user(&arg, uarg, sizeof(arg))) 606 - return -EFAULT; 607 - if (!arg.nr_entries || arg.flags) 608 - return -EINVAL; 609 - if (arg.struct_size != sizeof(*reg)) 610 - return -EINVAL; 611 - if (check_mul_overflow(arg.struct_size, arg.nr_entries, &len)) 612 - return -EOVERFLOW; 613 - if (len > PAGE_SIZE) 614 - return -EINVAL; 615 - /* offset + len must fit within a page, and must be reg_wait aligned */ 616 - poff = arg.user_addr & ~PAGE_MASK; 617 - if (len + poff > PAGE_SIZE) 618 - return -EINVAL; 619 - if (poff % arg.struct_size) 620 - return -EINVAL; 621 - 622 - pages = io_pin_pages(arg.user_addr, len, &nr_pages); 623 - if (IS_ERR(pages)) 624 - return PTR_ERR(pages); 625 - ret = -EINVAL; 626 - if (nr_pages != 1) 627 - goto out_free; 628 - if (ctx->user) { 629 - ret = __io_account_mem(ctx->user, 1); 630 - if (ret) 631 - goto out_free; 632 - } 633 - 634 - reg = vmap(pages, 1, VM_MAP, PAGE_KERNEL); 635 - if (reg) { 636 - ctx->cq_wait_index = arg.nr_entries - 1; 637 - WRITE_ONCE(ctx->cq_wait_page, pages); 638 - WRITE_ONCE(ctx->cq_wait_arg, (void *) reg + poff); 639 - return 0; 640 - } 641 - ret = -ENOMEM; 642 - if (ctx->user) 643 - __io_unaccount_mem(ctx->user, 1); 644 - out_free: 645 - io_pages_free(&pages, nr_pages); 646 - return ret; 647 - } 648 - 649 573 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, 650 574 void __user *arg, unsigned nr_args) 651 575 __releases(ctx->uring_lock) ··· 763 839 if (!arg || nr_args != 1) 764 840 break; 765 841 ret = io_register_resize_rings(ctx, arg); 766 - break; 767 - case IORING_REGISTER_CQWAIT_REG: 768 - ret = -EINVAL; 769 - if (!arg || nr_args != 1) 770 - break; 771 - ret = io_register_cqwait_reg(ctx, arg); 772 842 break; 773 843 default: 774 844 ret = -EINVAL;
-1
io_uring/register.h
··· 5 5 int io_eventfd_unregister(struct io_ring_ctx *ctx); 6 6 int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id); 7 7 struct file *io_uring_register_get_file(unsigned int fd, bool registered); 8 - void io_unregister_cqwait_reg(struct io_ring_ctx *ctx); 9 8 10 9 #endif