Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: add new helpers for posting overflows

Add two helpers, one for posting overflows for lockless_cq rings, and
one for non-lockless_cq rings. The former can allocate sanely with
GFP_KERNEL, but needs to grab the completion lock for posting, while the
latter must do non-sleeping allocs as it already holds the completion
lock.

While at it, mark the overflow handling functions as __cold as well, as
they should not generally be called during normal operations of the
ring.

Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>

+29 -21
+29 -21
io_uring/io_uring.c
··· 697 697 } 698 698 } 699 699 700 - static bool io_cqring_add_overflow(struct io_ring_ctx *ctx, 701 - struct io_overflow_cqe *ocqe) 700 + static __cold bool io_cqring_add_overflow(struct io_ring_ctx *ctx, 701 + struct io_overflow_cqe *ocqe) 702 702 { 703 703 lockdep_assert_held(&ctx->completion_lock); 704 704 ··· 813 813 return (struct io_cqe) { .user_data = user_data, .res = res, .flags = cflags }; 814 814 } 815 815 816 + static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe, 817 + struct io_big_cqe *big_cqe) 818 + { 819 + struct io_overflow_cqe *ocqe; 820 + 821 + ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_KERNEL); 822 + spin_lock(&ctx->completion_lock); 823 + io_cqring_add_overflow(ctx, ocqe); 824 + spin_unlock(&ctx->completion_lock); 825 + } 826 + 827 + static __cold bool io_cqe_overflow_locked(struct io_ring_ctx *ctx, 828 + struct io_cqe *cqe, 829 + struct io_big_cqe *big_cqe) 830 + { 831 + struct io_overflow_cqe *ocqe; 832 + 833 + ocqe = io_alloc_ocqe(ctx, cqe, big_cqe, GFP_ATOMIC); 834 + return io_cqring_add_overflow(ctx, ocqe); 835 + } 836 + 816 837 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) 817 838 { 818 839 bool filled; ··· 841 820 io_cq_lock(ctx); 842 821 filled = io_fill_cqe_aux(ctx, user_data, res, cflags); 843 822 if (unlikely(!filled)) { 844 - struct io_overflow_cqe *ocqe; 845 823 struct io_cqe cqe = io_init_cqe(user_data, res, cflags); 846 824 847 - ocqe = io_alloc_ocqe(ctx, &cqe, NULL, GFP_ATOMIC); 848 - filled = io_cqring_add_overflow(ctx, ocqe); 825 + filled = io_cqe_overflow_locked(ctx, &cqe, NULL); 849 826 } 850 827 io_cq_unlock_post(ctx); 851 828 return filled; ··· 859 840 lockdep_assert(ctx->lockless_cq); 860 841 861 842 if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) { 862 - struct io_overflow_cqe *ocqe; 863 843 struct io_cqe cqe = io_init_cqe(user_data, res, cflags); 864 844 865 - ocqe = io_alloc_ocqe(ctx, &cqe, NULL, GFP_KERNEL); 866 - spin_lock(&ctx->completion_lock); 867 - io_cqring_add_overflow(ctx, ocqe); 868 - spin_unlock(&ctx->completion_lock); 845 + io_cqe_overflow(ctx, &cqe, NULL); 869 846 } 870 847 ctx->submit_state.cq_flush = true; 871 848 } ··· 1465 1450 */ 1466 1451 if (!(req->flags & (REQ_F_CQE_SKIP | REQ_F_REISSUE)) && 1467 1452 unlikely(!io_fill_cqe_req(ctx, req))) { 1468 - gfp_t gfp = ctx->lockless_cq ? GFP_KERNEL : GFP_ATOMIC; 1469 - struct io_overflow_cqe *ocqe; 1470 - 1471 - ocqe = io_alloc_ocqe(ctx, &req->cqe, &req->big_cqe, gfp); 1472 - if (ctx->lockless_cq) { 1473 - spin_lock(&ctx->completion_lock); 1474 - io_cqring_add_overflow(ctx, ocqe); 1475 - spin_unlock(&ctx->completion_lock); 1476 - } else { 1477 - io_cqring_add_overflow(ctx, ocqe); 1478 - } 1453 + if (ctx->lockless_cq) 1454 + io_cqe_overflow(ctx, &req->cqe, &req->big_cqe); 1455 + else 1456 + io_cqe_overflow_locked(ctx, &req->cqe, &req->big_cqe); 1479 1457 } 1480 1458 } 1481 1459 __io_cq_unlock_post(ctx);