Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

io_uring: add alloc_cache.c

Avoid inlining all and everything from alloc_cache.h and move cold bits
into a new file.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Gabriel Krisman Bertazi <krisman@suse.de>
Link: https://lore.kernel.org/r/06984c6cd58e703f7cfae5ab3067912f9f635a06.1738087204.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Pavel Begunkov and committed by
Jens Axboe
d19af0e9 16ac51a0

+54 -36
+1 -1
io_uring/Makefile
··· 13 13 sync.o msg_ring.o advise.o openclose.o \ 14 14 epoll.o statx.o timeout.o fdinfo.o \ 15 15 cancel.o waitid.o register.o \ 16 - truncate.o memmap.o 16 + truncate.o memmap.o alloc_cache.o 17 17 obj-$(CONFIG_IO_WQ) += io-wq.o 18 18 obj-$(CONFIG_FUTEX) += futex.o 19 19 obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o
+44
io_uring/alloc_cache.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "alloc_cache.h" 4 + 5 + void io_alloc_cache_free(struct io_alloc_cache *cache, 6 + void (*free)(const void *)) 7 + { 8 + void *entry; 9 + 10 + if (!cache->entries) 11 + return; 12 + 13 + while ((entry = io_alloc_cache_get(cache)) != NULL) 14 + free(entry); 15 + 16 + kvfree(cache->entries); 17 + cache->entries = NULL; 18 + } 19 + 20 + /* returns false if the cache was initialized properly */ 21 + bool io_alloc_cache_init(struct io_alloc_cache *cache, 22 + unsigned max_nr, unsigned int size, 23 + unsigned int init_bytes) 24 + { 25 + cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL); 26 + if (!cache->entries) 27 + return true; 28 + 29 + cache->nr_cached = 0; 30 + cache->max_cached = max_nr; 31 + cache->elem_size = size; 32 + cache->init_clear = init_bytes; 33 + return false; 34 + } 35 + 36 + void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp) 37 + { 38 + void *obj; 39 + 40 + obj = kmalloc(cache->elem_size, gfp); 41 + if (obj && cache->init_clear) 42 + memset(obj, 0, cache->init_clear); 43 + return obj; 44 + }
+9 -35
io_uring/alloc_cache.h
··· 8 8 */ 9 9 #define IO_ALLOC_CACHE_MAX 128 10 10 11 + void io_alloc_cache_free(struct io_alloc_cache *cache, 12 + void (*free)(const void *)); 13 + bool io_alloc_cache_init(struct io_alloc_cache *cache, 14 + unsigned max_nr, unsigned int size, 15 + unsigned int init_bytes); 16 + 17 + void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp); 18 + 11 19 static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr) 12 20 { 13 21 if (IS_ENABLED(CONFIG_KASAN)) { ··· 65 57 obj = io_alloc_cache_get(cache); 66 58 if (obj) 67 59 return obj; 68 - 69 - obj = kmalloc(cache->elem_size, gfp); 70 - if (obj && cache->init_clear) 71 - memset(obj, 0, cache->init_clear); 72 - return obj; 60 + return io_cache_alloc_new(cache, gfp); 73 61 } 74 62 75 - /* returns false if the cache was initialized properly */ 76 - static inline bool io_alloc_cache_init(struct io_alloc_cache *cache, 77 - unsigned max_nr, unsigned int size, 78 - unsigned int init_bytes) 79 - { 80 - cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL); 81 - if (cache->entries) { 82 - cache->nr_cached = 0; 83 - cache->max_cached = max_nr; 84 - cache->elem_size = size; 85 - cache->init_clear = init_bytes; 86 - return false; 87 - } 88 - return true; 89 - } 90 - 91 - static inline void io_alloc_cache_free(struct io_alloc_cache *cache, 92 - void (*free)(const void *)) 93 - { 94 - void *entry; 95 - 96 - if (!cache->entries) 97 - return; 98 - 99 - while ((entry = io_alloc_cache_get(cache)) != NULL) 100 - free(entry); 101 - 102 - kvfree(cache->entries); 103 - cache->entries = NULL; 104 - } 105 63 #endif