Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

slab: handle kmalloc sheaves bootstrap

Enable sheaves for kmalloc caches. For other types than KMALLOC_NORMAL,
we can simply allow them in calculate_sizes() as they are created later
than KMALLOC_NORMAL caches and can allocate sheaves and barns from
those.

For KMALLOC_NORMAL caches we perform additional step after first
creating them without sheaves. Then bootstrap_cache_sheaves() simply
allocates and initializes barns and sheaves and finally sets
s->sheaf_capacity to make them actually used.

Afterwards the only caches left without sheaves (unless SLUB_TINY or
debugging is enabled) are kmem_cache and kmem_cache_node. These are only
used when creating or destroying other kmem_caches. Thus they are not
performance critical and we can simply leave it that way.

Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

+84 -4
+84 -4
mm/slub.c
··· 2593 2593 return object; 2594 2594 } 2595 2595 2596 - static struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp) 2596 + static struct slab_sheaf *__alloc_empty_sheaf(struct kmem_cache *s, gfp_t gfp, 2597 + unsigned int capacity) 2597 2598 { 2598 2599 struct slab_sheaf *sheaf; 2599 2600 size_t sheaf_size; ··· 2612 2611 if (s->flags & SLAB_KMALLOC) 2613 2612 gfp |= __GFP_NO_OBJ_EXT; 2614 2613 2615 - sheaf_size = struct_size(sheaf, objects, s->sheaf_capacity); 2614 + sheaf_size = struct_size(sheaf, objects, capacity); 2616 2615 sheaf = kzalloc(sheaf_size, gfp); 2617 2616 2618 2617 if (unlikely(!sheaf)) ··· 2623 2622 stat(s, SHEAF_ALLOC); 2624 2623 2625 2624 return sheaf; 2625 + } 2626 + 2627 + static inline struct slab_sheaf *alloc_empty_sheaf(struct kmem_cache *s, 2628 + gfp_t gfp) 2629 + { 2630 + return __alloc_empty_sheaf(s, gfp, s->sheaf_capacity); 2626 2631 } 2627 2632 2628 2633 static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf) ··· 8164 8157 if (s->flags & SLAB_RECLAIM_ACCOUNT) 8165 8158 s->allocflags |= __GFP_RECLAIMABLE; 8166 8159 8167 - /* kmalloc caches need extra care to support sheaves */ 8168 - if (!is_kmalloc_cache(s)) 8160 + /* 8161 + * For KMALLOC_NORMAL caches we enable sheaves later by 8162 + * bootstrap_kmalloc_sheaves() to avoid recursion 8163 + */ 8164 + if (!is_kmalloc_normal(s)) 8169 8165 s->sheaf_capacity = calculate_sheaf_capacity(s, args); 8170 8166 8171 8167 /* ··· 8663 8653 return s; 8664 8654 } 8665 8655 8656 + /* 8657 + * Finish the sheaves initialization done normally by init_percpu_sheaves() and 8658 + * init_kmem_cache_nodes(). For normal kmalloc caches we have to bootstrap it 8659 + * since sheaves and barns are allocated by kmalloc. 8660 + */ 8661 + static void __init bootstrap_cache_sheaves(struct kmem_cache *s) 8662 + { 8663 + struct kmem_cache_args empty_args = {}; 8664 + unsigned int capacity; 8665 + bool failed = false; 8666 + int node, cpu; 8667 + 8668 + capacity = calculate_sheaf_capacity(s, &empty_args); 8669 + 8670 + /* capacity can be 0 due to debugging or SLUB_TINY */ 8671 + if (!capacity) 8672 + return; 8673 + 8674 + for_each_node_mask(node, slab_nodes) { 8675 + struct node_barn *barn; 8676 + 8677 + barn = kmalloc_node(sizeof(*barn), GFP_KERNEL, node); 8678 + 8679 + if (!barn) { 8680 + failed = true; 8681 + goto out; 8682 + } 8683 + 8684 + barn_init(barn); 8685 + get_node(s, node)->barn = barn; 8686 + } 8687 + 8688 + for_each_possible_cpu(cpu) { 8689 + struct slub_percpu_sheaves *pcs; 8690 + 8691 + pcs = per_cpu_ptr(s->cpu_sheaves, cpu); 8692 + 8693 + pcs->main = __alloc_empty_sheaf(s, GFP_KERNEL, capacity); 8694 + 8695 + if (!pcs->main) { 8696 + failed = true; 8697 + break; 8698 + } 8699 + } 8700 + 8701 + out: 8702 + /* 8703 + * It's still early in boot so treat this like same as a failure to 8704 + * create the kmalloc cache in the first place 8705 + */ 8706 + if (failed) 8707 + panic("Out of memory when creating kmem_cache %s\n", s->name); 8708 + 8709 + s->sheaf_capacity = capacity; 8710 + } 8711 + 8712 + static void __init bootstrap_kmalloc_sheaves(void) 8713 + { 8714 + enum kmalloc_cache_type type; 8715 + 8716 + for (type = KMALLOC_NORMAL; type <= KMALLOC_RANDOM_END; type++) { 8717 + for (int idx = 0; idx < KMALLOC_SHIFT_HIGH + 1; idx++) { 8718 + if (kmalloc_caches[type][idx]) 8719 + bootstrap_cache_sheaves(kmalloc_caches[type][idx]); 8720 + } 8721 + } 8722 + } 8723 + 8666 8724 void __init kmem_cache_init(void) 8667 8725 { 8668 8726 static __initdata struct kmem_cache boot_kmem_cache, ··· 8773 8695 /* Now we can use the kmem_cache to allocate kmalloc slabs */ 8774 8696 setup_kmalloc_cache_index_table(); 8775 8697 create_kmalloc_caches(); 8698 + 8699 + bootstrap_kmalloc_sheaves(); 8776 8700 8777 8701 /* Setup random freelists for each cache */ 8778 8702 init_freelist_randomization();