Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring/napi: Use lock guards

Convert napi locks to use the shiny new Scope-Based Resource Management
machinery.

Signed-off-by: Olivier Langlois <olivier@trillion01.com>
Link: https://lore.kernel.org/r/2680ca47ee183cfdb89d1a40c84d349edeb620ab.1728828877.git.olivier@trillion01.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Olivier Langlois and committed by
Jens Axboe
db1e1adf a5e26f49

+21 -19
+21 -19
io_uring/napi.c
··· 49 49 50 50 hash_list = &ctx->napi_ht[hash_min(napi_id, HASH_BITS(ctx->napi_ht))]; 51 51 52 - rcu_read_lock(); 53 - e = io_napi_hash_find(hash_list, napi_id); 54 - if (e) { 55 - WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT); 56 - rcu_read_unlock(); 57 - return -EEXIST; 52 + scoped_guard(rcu) { 53 + e = io_napi_hash_find(hash_list, napi_id); 54 + if (e) { 55 + WRITE_ONCE(e->timeout, jiffies + NAPI_TIMEOUT); 56 + return -EEXIST; 57 + } 58 58 } 59 - rcu_read_unlock(); 60 59 61 60 e = kmalloc(sizeof(*e), GFP_NOWAIT); 62 61 if (!e) ··· 64 65 e->napi_id = napi_id; 65 66 e->timeout = jiffies + NAPI_TIMEOUT; 66 67 68 + /* 69 + * guard(spinlock) is not used to manually unlock it before calling 70 + * kfree() 71 + */ 67 72 spin_lock(&ctx->napi_lock); 68 73 if (unlikely(io_napi_hash_find(hash_list, napi_id))) { 69 74 spin_unlock(&ctx->napi_lock); ··· 85 82 { 86 83 struct io_napi_entry *e; 87 84 88 - spin_lock(&ctx->napi_lock); 85 + guard(spinlock)(&ctx->napi_lock); 89 86 /* 90 87 * list_for_each_entry_safe() is not required as long as: 91 88 * 1. list_del_rcu() does not reset the deleted node next pointer ··· 99 96 kfree_rcu(e, rcu); 100 97 } 101 98 } 102 - spin_unlock(&ctx->napi_lock); 103 99 } 104 100 105 101 static inline void io_napi_remove_stale(struct io_ring_ctx *ctx, bool is_stale) ··· 170 168 if (list_is_singular(&ctx->napi_list)) 171 169 loop_end_arg = iowq; 172 170 173 - rcu_read_lock(); 174 - do { 175 - is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg); 176 - } while (!io_napi_busy_loop_should_end(iowq, start_time) && !loop_end_arg); 177 - rcu_read_unlock(); 171 + scoped_guard(rcu) { 172 + do { 173 + is_stale = __io_napi_do_busy_loop(ctx, loop_end_arg); 174 + } while (!io_napi_busy_loop_should_end(iowq, start_time) && 175 + !loop_end_arg); 176 + } 178 177 179 178 io_napi_remove_stale(ctx, is_stale); 180 179 } ··· 206 203 { 207 204 struct io_napi_entry *e; 208 205 209 - spin_lock(&ctx->napi_lock); 206 + guard(spinlock)(&ctx->napi_lock); 210 207 list_for_each_entry(e, &ctx->napi_list, list) { 211 208 hash_del_rcu(&e->node); 212 209 kfree_rcu(e, rcu); 213 210 } 214 211 INIT_LIST_HEAD_RCU(&ctx->napi_list); 215 - spin_unlock(&ctx->napi_lock); 216 212 } 217 213 218 214 /* ··· 307 305 if (list_empty_careful(&ctx->napi_list)) 308 306 return 0; 309 307 310 - rcu_read_lock(); 311 - is_stale = __io_napi_do_busy_loop(ctx, NULL); 312 - rcu_read_unlock(); 308 + scoped_guard(rcu) { 309 + is_stale = __io_napi_do_busy_loop(ctx, NULL); 310 + } 313 311 314 312 io_napi_remove_stale(ctx, is_stale); 315 313 return 1;