Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: move cancelations to be io_uring_task based

Right now the task_struct pointer is used as the key to match a task,
but in preparation for some io_kiocb changes, move it to using struct
io_uring_task instead. No functional changes intended in this patch.

Signed-off-by: Jens Axboe <axboe@kernel.dk>

+40 -40
+2 -2
io_uring/futex.c
··· 141 141 return -ENOENT; 142 142 } 143 143 144 - bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, 144 + bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 145 145 bool cancel_all) 146 146 { 147 147 struct hlist_node *tmp; ··· 151 151 lockdep_assert_held(&ctx->uring_lock); 152 152 153 153 hlist_for_each_entry_safe(req, tmp, &ctx->futex_list, hash_node) { 154 - if (!io_match_task_safe(req, task, cancel_all)) 154 + if (!io_match_task_safe(req, tctx, cancel_all)) 155 155 continue; 156 156 hlist_del_init(&req->hash_node); 157 157 __io_futex_cancel(ctx, req);
+2 -2
io_uring/futex.h
··· 11 11 #if defined(CONFIG_FUTEX) 12 12 int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 13 13 unsigned int issue_flags); 14 - bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, 14 + bool io_futex_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 15 15 bool cancel_all); 16 16 bool io_futex_cache_init(struct io_ring_ctx *ctx); 17 17 void io_futex_cache_free(struct io_ring_ctx *ctx); ··· 23 23 return 0; 24 24 } 25 25 static inline bool io_futex_remove_all(struct io_ring_ctx *ctx, 26 - struct task_struct *task, bool cancel_all) 26 + struct io_uring_task *tctx, bool cancel_all) 27 27 { 28 28 return false; 29 29 }
+21 -21
io_uring/io_uring.c
··· 142 142 #define IO_CQ_WAKE_FORCE (IO_CQ_WAKE_INIT >> 1) 143 143 144 144 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 145 - struct task_struct *task, 145 + struct io_uring_task *tctx, 146 146 bool cancel_all); 147 147 148 148 static void io_queue_sqe(struct io_kiocb *req); ··· 201 201 * As io_match_task() but protected against racing with linked timeouts. 202 202 * User must not hold timeout_lock. 203 203 */ 204 - bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 204 + bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, 205 205 bool cancel_all) 206 206 { 207 207 bool matched; 208 208 209 - if (task && head->task != task) 209 + if (tctx && head->task->io_uring != tctx) 210 210 return false; 211 211 if (cancel_all) 212 212 return true; ··· 2987 2987 } 2988 2988 2989 2989 struct io_task_cancel { 2990 - struct task_struct *task; 2990 + struct io_uring_task *tctx; 2991 2991 bool all; 2992 2992 }; 2993 2993 ··· 2996 2996 struct io_kiocb *req = container_of(work, struct io_kiocb, work); 2997 2997 struct io_task_cancel *cancel = data; 2998 2998 2999 - return io_match_task_safe(req, cancel->task, cancel->all); 2999 + return io_match_task_safe(req, cancel->tctx, cancel->all); 3000 3000 } 3001 3001 3002 3002 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, 3003 - struct task_struct *task, 3003 + struct io_uring_task *tctx, 3004 3004 bool cancel_all) 3005 3005 { 3006 3006 struct io_defer_entry *de; ··· 3008 3008 3009 3009 spin_lock(&ctx->completion_lock); 3010 3010 list_for_each_entry_reverse(de, &ctx->defer_list, list) { 3011 - if (io_match_task_safe(de->req, task, cancel_all)) { 3011 + if (io_match_task_safe(de->req, tctx, cancel_all)) { 3012 3012 list_cut_position(&list, &ctx->defer_list, &de->list); 3013 3013 break; 3014 3014 } ··· 3051 3051 } 3052 3052 3053 3053 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, 3054 - struct task_struct *task, 3054 + struct io_uring_task *tctx, 3055 3055 bool cancel_all) 3056 3056 { 3057 - struct io_task_cancel cancel = { .task = task, .all = cancel_all, }; 3058 - struct io_uring_task *tctx = task ? task->io_uring : NULL; 3057 + struct io_task_cancel cancel = { .tctx = tctx, .all = cancel_all, }; 3059 3058 enum io_wq_cancel cret; 3060 3059 bool ret = false; 3061 3060 ··· 3068 3069 if (!ctx->rings) 3069 3070 return false; 3070 3071 3071 - if (!task) { 3072 + if (!tctx) { 3072 3073 ret |= io_uring_try_cancel_iowq(ctx); 3073 - } else if (tctx && tctx->io_wq) { 3074 + } else if (tctx->io_wq) { 3074 3075 /* 3075 3076 * Cancels requests of all rings, not only @ctx, but 3076 3077 * it's fine as the task is in exit/exec. ··· 3093 3094 if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) && 3094 3095 io_allowed_defer_tw_run(ctx)) 3095 3096 ret |= io_run_local_work(ctx, INT_MAX) > 0; 3096 - ret |= io_cancel_defer_files(ctx, task, cancel_all); 3097 + ret |= io_cancel_defer_files(ctx, tctx, cancel_all); 3097 3098 mutex_lock(&ctx->uring_lock); 3098 - ret |= io_poll_remove_all(ctx, task, cancel_all); 3099 - ret |= io_waitid_remove_all(ctx, task, cancel_all); 3100 - ret |= io_futex_remove_all(ctx, task, cancel_all); 3101 - ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all); 3099 + ret |= io_poll_remove_all(ctx, tctx, cancel_all); 3100 + ret |= io_waitid_remove_all(ctx, tctx, cancel_all); 3101 + ret |= io_futex_remove_all(ctx, tctx, cancel_all); 3102 + ret |= io_uring_try_cancel_uring_cmd(ctx, tctx, cancel_all); 3102 3103 mutex_unlock(&ctx->uring_lock); 3103 - ret |= io_kill_timeouts(ctx, task, cancel_all); 3104 - if (task) 3104 + ret |= io_kill_timeouts(ctx, tctx, cancel_all); 3105 + if (tctx) 3105 3106 ret |= io_run_task_work() > 0; 3106 3107 else 3107 3108 ret |= flush_delayed_work(&ctx->fallback_work); ··· 3154 3155 if (node->ctx->sq_data) 3155 3156 continue; 3156 3157 loop |= io_uring_try_cancel_requests(node->ctx, 3157 - current, cancel_all); 3158 + current->io_uring, 3159 + cancel_all); 3158 3160 } 3159 3161 } else { 3160 3162 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) 3161 3163 loop |= io_uring_try_cancel_requests(ctx, 3162 - current, 3164 + current->io_uring, 3163 3165 cancel_all); 3164 3166 } 3165 3167
+1 -1
io_uring/io_uring.h
··· 115 115 void io_task_refs_refill(struct io_uring_task *tctx); 116 116 bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 117 117 118 - bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 118 + bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx, 119 119 bool cancel_all); 120 120 121 121 void io_activate_pollwq(struct io_ring_ctx *ctx);
+2 -2
io_uring/poll.c
··· 714 714 /* 715 715 * Returns true if we found and killed one or more poll requests 716 716 */ 717 - __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, 717 + __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 718 718 bool cancel_all) 719 719 { 720 720 unsigned nr_buckets = 1U << ctx->cancel_table.hash_bits; ··· 729 729 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; 730 730 731 731 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { 732 - if (io_match_task_safe(req, tsk, cancel_all)) { 732 + if (io_match_task_safe(req, tctx, cancel_all)) { 733 733 hlist_del_init(&req->hash_node); 734 734 io_poll_cancel_req(req); 735 735 found = true;
+1 -1
io_uring/poll.h
··· 40 40 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 41 41 unsigned issue_flags); 42 42 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags); 43 - bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, 43 + bool io_poll_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 44 44 bool cancel_all); 45 45 46 46 void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts);
+4 -4
io_uring/timeout.c
··· 637 637 io_put_req(req); 638 638 } 639 639 640 - static bool io_match_task(struct io_kiocb *head, struct task_struct *task, 640 + static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx, 641 641 bool cancel_all) 642 642 __must_hold(&head->ctx->timeout_lock) 643 643 { 644 644 struct io_kiocb *req; 645 645 646 - if (task && head->task != task) 646 + if (tctx && head->task->io_uring != tctx) 647 647 return false; 648 648 if (cancel_all) 649 649 return true; ··· 656 656 } 657 657 658 658 /* Returns true if we found and killed one or more timeouts */ 659 - __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 659 + __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 660 660 bool cancel_all) 661 661 { 662 662 struct io_timeout *timeout, *tmp; ··· 671 671 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 672 672 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 673 673 674 - if (io_match_task(req, tsk, cancel_all) && 674 + if (io_match_task(req, tctx, cancel_all) && 675 675 io_kill_timeout(req, -ECANCELED)) 676 676 canceled++; 677 677 }
+1 -1
io_uring/timeout.h
··· 24 24 __cold void io_flush_timeouts(struct io_ring_ctx *ctx); 25 25 struct io_cancel_data; 26 26 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd); 27 - __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 27 + __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 28 28 bool cancel_all); 29 29 void io_queue_linked_timeout(struct io_kiocb *req); 30 30 void io_disarm_next(struct io_kiocb *req);
+2 -2
io_uring/uring_cmd.c
··· 47 47 } 48 48 49 49 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, 50 - struct task_struct *task, bool cancel_all) 50 + struct io_uring_task *tctx, bool cancel_all) 51 51 { 52 52 struct hlist_node *tmp; 53 53 struct io_kiocb *req; ··· 61 61 struct io_uring_cmd); 62 62 struct file *file = req->file; 63 63 64 - if (!cancel_all && req->task != task) 64 + if (!cancel_all && req->task->io_uring != tctx) 65 65 continue; 66 66 67 67 if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
+1 -1
io_uring/uring_cmd.h
··· 8 8 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 9 9 10 10 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, 11 - struct task_struct *task, bool cancel_all); 11 + struct io_uring_task *tctx, bool cancel_all);
+2 -2
io_uring/waitid.c
··· 184 184 return -ENOENT; 185 185 } 186 186 187 - bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, 187 + bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 188 188 bool cancel_all) 189 189 { 190 190 struct hlist_node *tmp; ··· 194 194 lockdep_assert_held(&ctx->uring_lock); 195 195 196 196 hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { 197 - if (!io_match_task_safe(req, task, cancel_all)) 197 + if (!io_match_task_safe(req, tctx, cancel_all)) 198 198 continue; 199 199 hlist_del_init(&req->hash_node); 200 200 __io_waitid_cancel(ctx, req);
+1 -1
io_uring/waitid.h
··· 11 11 int io_waitid(struct io_kiocb *req, unsigned int issue_flags); 12 12 int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 13 13 unsigned int issue_flags); 14 - bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, 14 + bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 15 15 bool cancel_all);