Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/slab: move [__]ksize and slab_ksize() to mm/slub.c

To access SLUB's internal implementation details beyond cache flags in
ksize(), move __ksize(), ksize(), and slab_ksize() to mm/slub.c.

[vbabka@suse.cz: also make __ksize() static and move its kerneldoc to
ksize() ]

Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20260113061845.159790-9-harry.yoo@oracle.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

authored by

Harry Yoo and committed by
Vlastimil Babka
fab06946 70089d01

+86 -89
-1
include/linux/slab.h
··· 509 509 void kfree(const void *objp); 510 510 void kfree_nolock(const void *objp); 511 511 void kfree_sensitive(const void *objp); 512 - size_t __ksize(const void *objp); 513 512 514 513 DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) 515 514 DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
-27
mm/slab.h
··· 661 661 662 662 void kvfree_rcu_cb(struct rcu_head *head); 663 663 664 - size_t __ksize(const void *objp); 665 - 666 - static inline size_t slab_ksize(const struct kmem_cache *s) 667 - { 668 - #ifdef CONFIG_SLUB_DEBUG 669 - /* 670 - * Debugging requires use of the padding between object 671 - * and whatever may come after it. 672 - */ 673 - if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 674 - return s->object_size; 675 - #endif 676 - if (s->flags & SLAB_KASAN) 677 - return s->object_size; 678 - /* 679 - * If we have the need to store the freelist pointer 680 - * back there or track user information then we can 681 - * only use the space before that information. 682 - */ 683 - if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 684 - return s->inuse; 685 - /* 686 - * Else we can use all the padding etc for the allocation 687 - */ 688 - return s->size; 689 - } 690 - 691 664 static inline unsigned int large_kmalloc_order(const struct page *page) 692 665 { 693 666 return page[1].flags.f & 0xff;
-61
mm/slab_common.c
··· 1021 1021 0, SLAB_NO_MERGE, NULL); 1022 1022 } 1023 1023 1024 - /** 1025 - * __ksize -- Report full size of underlying allocation 1026 - * @object: pointer to the object 1027 - * 1028 - * This should only be used internally to query the true size of allocations. 1029 - * It is not meant to be a way to discover the usable size of an allocation 1030 - * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond 1031 - * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, 1032 - * and/or FORTIFY_SOURCE. 1033 - * 1034 - * Return: size of the actual memory used by @object in bytes 1035 - */ 1036 - size_t __ksize(const void *object) 1037 - { 1038 - const struct page *page; 1039 - const struct slab *slab; 1040 - 1041 - if (unlikely(object == ZERO_SIZE_PTR)) 1042 - return 0; 1043 - 1044 - page = virt_to_page(object); 1045 - 1046 - if (unlikely(PageLargeKmalloc(page))) 1047 - return large_kmalloc_size(page); 1048 - 1049 - slab = page_slab(page); 1050 - /* Delete this after we're sure there are no users */ 1051 - if (WARN_ON(!slab)) 1052 - return page_size(page); 1053 - 1054 - #ifdef CONFIG_SLUB_DEBUG 1055 - skip_orig_size_check(slab->slab_cache, object); 1056 - #endif 1057 - 1058 - return slab_ksize(slab->slab_cache); 1059 - } 1060 - 1061 1024 gfp_t kmalloc_fix_flags(gfp_t flags) 1062 1025 { 1063 1026 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK; ··· 1235 1272 kfree(mem); 1236 1273 } 1237 1274 EXPORT_SYMBOL(kfree_sensitive); 1238 - 1239 - size_t ksize(const void *objp) 1240 - { 1241 - /* 1242 - * We need to first check that the pointer to the object is valid. 1243 - * The KASAN report printed from ksize() is more useful, then when 1244 - * it's printed later when the behaviour could be undefined due to 1245 - * a potential use-after-free or double-free. 1246 - * 1247 - * We use kasan_check_byte(), which is supported for the hardware 1248 - * tag-based KASAN mode, unlike kasan_check_read/write(). 1249 - * 1250 - * If the pointed to memory is invalid, we return 0 to avoid users of 1251 - * ksize() writing to and potentially corrupting the memory region. 1252 - * 1253 - * We want to perform the check before __ksize(), to avoid potentially 1254 - * crashing in __ksize() due to accessing invalid metadata. 1255 - */ 1256 - if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) 1257 - return 0; 1258 - 1259 - return kfence_ksize(objp) ?: __ksize(objp); 1260 - } 1261 - EXPORT_SYMBOL(ksize); 1262 1275 1263 1276 #ifdef CONFIG_BPF_SYSCALL 1264 1277 #include <linux/btf.h>
+86
mm/slub.c
··· 7028 7028 } 7029 7029 EXPORT_SYMBOL(kmem_cache_free); 7030 7030 7031 + static inline size_t slab_ksize(const struct kmem_cache *s) 7032 + { 7033 + #ifdef CONFIG_SLUB_DEBUG 7034 + /* 7035 + * Debugging requires use of the padding between object 7036 + * and whatever may come after it. 7037 + */ 7038 + if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 7039 + return s->object_size; 7040 + #endif 7041 + if (s->flags & SLAB_KASAN) 7042 + return s->object_size; 7043 + /* 7044 + * If we have the need to store the freelist pointer 7045 + * back there or track user information then we can 7046 + * only use the space before that information. 7047 + */ 7048 + if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 7049 + return s->inuse; 7050 + /* 7051 + * Else we can use all the padding etc for the allocation 7052 + */ 7053 + return s->size; 7054 + } 7055 + 7056 + static size_t __ksize(const void *object) 7057 + { 7058 + const struct page *page; 7059 + const struct slab *slab; 7060 + 7061 + if (unlikely(object == ZERO_SIZE_PTR)) 7062 + return 0; 7063 + 7064 + page = virt_to_page(object); 7065 + 7066 + if (unlikely(PageLargeKmalloc(page))) 7067 + return large_kmalloc_size(page); 7068 + 7069 + slab = page_slab(page); 7070 + /* Delete this after we're sure there are no users */ 7071 + if (WARN_ON(!slab)) 7072 + return page_size(page); 7073 + 7074 + #ifdef CONFIG_SLUB_DEBUG 7075 + skip_orig_size_check(slab->slab_cache, object); 7076 + #endif 7077 + 7078 + return slab_ksize(slab->slab_cache); 7079 + } 7080 + 7081 + /** 7082 + * ksize -- Report full size of underlying allocation 7083 + * @objp: pointer to the object 7084 + * 7085 + * This should only be used internally to query the true size of allocations. 7086 + * It is not meant to be a way to discover the usable size of an allocation 7087 + * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond 7088 + * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS, 7089 + * and/or FORTIFY_SOURCE. 7090 + * 7091 + * Return: size of the actual memory used by @objp in bytes 7092 + */ 7093 + size_t ksize(const void *objp) 7094 + { 7095 + /* 7096 + * We need to first check that the pointer to the object is valid. 7097 + * The KASAN report printed from ksize() is more useful, then when 7098 + * it's printed later when the behaviour could be undefined due to 7099 + * a potential use-after-free or double-free. 7100 + * 7101 + * We use kasan_check_byte(), which is supported for the hardware 7102 + * tag-based KASAN mode, unlike kasan_check_read/write(). 7103 + * 7104 + * If the pointed to memory is invalid, we return 0 to avoid users of 7105 + * ksize() writing to and potentially corrupting the memory region. 7106 + * 7107 + * We want to perform the check before __ksize(), to avoid potentially 7108 + * crashing in __ksize() due to accessing invalid metadata. 7109 + */ 7110 + if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp)) 7111 + return 0; 7112 + 7113 + return kfence_ksize(objp) ?: __ksize(objp); 7114 + } 7115 + EXPORT_SYMBOL(ksize); 7116 + 7031 7117 static void free_large_kmalloc(struct page *page, void *object) 7032 7118 { 7033 7119 unsigned int order = compound_order(page);