Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

slab: remove defer_deactivate_slab()

There are no more cpu slabs so we don't need their deferred
deactivation. The function is now only used from places where we
allocate a new slab but then can't spin on node list_lock to put it on
the partial list. Instead of the deferred action we can free it directly
via __free_slab(), we just need to tell it to use _nolock() freeing of
the underlying pages and take care of the accounting.

Since free_frozen_pages_nolock() variant does not yet exist for code
outside of the page allocator, create it as a trivial wrapper for
__free_frozen_pages(..., FPI_TRYLOCK).

Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

+28 -44
+1
mm/internal.h
··· 846 846 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order); 847 847 #define alloc_frozen_pages_nolock(...) \ 848 848 alloc_hooks(alloc_frozen_pages_nolock_noprof(__VA_ARGS__)) 849 + void free_frozen_pages_nolock(struct page *page, unsigned int order); 849 850 850 851 extern void zone_pcp_reset(struct zone *zone); 851 852 extern void zone_pcp_disable(struct zone *zone);
+5
mm/page_alloc.c
··· 2981 2981 __free_frozen_pages(page, order, FPI_NONE); 2982 2982 } 2983 2983 2984 + void free_frozen_pages_nolock(struct page *page, unsigned int order) 2985 + { 2986 + __free_frozen_pages(page, order, FPI_TRYLOCK); 2987 + } 2988 + 2984 2989 /* 2985 2990 * Free a batch of folios 2986 2991 */
+1 -7
mm/slab.h
··· 71 71 struct kmem_cache *slab_cache; 72 72 union { 73 73 struct { 74 - union { 75 - struct list_head slab_list; 76 - struct { /* For deferred deactivate_slab() */ 77 - struct llist_node llnode; 78 - void *flush_freelist; 79 - }; 80 - }; 74 + struct list_head slab_list; 81 75 /* Double-word boundary */ 82 76 struct freelist_counters; 83 77 };
+21 -37
mm/slub.c
··· 3262 3262 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); 3263 3263 } 3264 3264 3265 - static void __free_slab(struct kmem_cache *s, struct slab *slab) 3265 + static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin) 3266 3266 { 3267 3267 struct page *page = slab_page(slab); 3268 3268 int order = compound_order(page); ··· 3273 3273 __ClearPageSlab(page); 3274 3274 mm_account_reclaimed_pages(pages); 3275 3275 unaccount_slab(slab, order, s); 3276 - free_frozen_pages(page, order); 3276 + if (allow_spin) 3277 + free_frozen_pages(page, order); 3278 + else 3279 + free_frozen_pages_nolock(page, order); 3280 + } 3281 + 3282 + static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab) 3283 + { 3284 + /* 3285 + * Since it was just allocated, we can skip the actions in 3286 + * discard_slab() and free_slab(). 3287 + */ 3288 + __free_slab(s, slab, false); 3277 3289 } 3278 3290 3279 3291 static void rcu_free_slab(struct rcu_head *h) 3280 3292 { 3281 3293 struct slab *slab = container_of(h, struct slab, rcu_head); 3282 3294 3283 - __free_slab(slab->slab_cache, slab); 3295 + __free_slab(slab->slab_cache, slab, true); 3284 3296 } 3285 3297 3286 3298 static void free_slab(struct kmem_cache *s, struct slab *slab) ··· 3308 3296 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) 3309 3297 call_rcu(&slab->rcu_head, rcu_free_slab); 3310 3298 else 3311 - __free_slab(s, slab); 3299 + __free_slab(s, slab, true); 3312 3300 } 3313 3301 3314 3302 static void discard_slab(struct kmem_cache *s, struct slab *slab) ··· 3401 3389 return object; 3402 3390 } 3403 3391 3404 - static void defer_deactivate_slab(struct slab *slab, void *flush_freelist); 3405 - 3406 3392 /* 3407 3393 * Called only for kmem_cache_debug() caches to allocate from a freshly 3408 3394 * allocated slab. Allocate a single object instead of whole freelist ··· 3416 3406 void *object; 3417 3407 3418 3408 if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) { 3419 - /* Unlucky, discard newly allocated slab */ 3420 - defer_deactivate_slab(slab, NULL); 3409 + /* Unlucky, discard newly allocated slab. */ 3410 + free_new_slab_nolock(s, slab); 3421 3411 return NULL; 3422 3412 } 3423 3413 ··· 4289 4279 4290 4280 if (!spin_trylock_irqsave(&n->list_lock, flags)) { 4291 4281 /* Unlucky, discard newly allocated slab */ 4292 - defer_deactivate_slab(slab, NULL); 4282 + free_new_slab_nolock(s, slab); 4293 4283 return 0; 4294 4284 } 4295 4285 } ··· 6069 6059 6070 6060 struct defer_free { 6071 6061 struct llist_head objects; 6072 - struct llist_head slabs; 6073 6062 struct irq_work work; 6074 6063 }; 6075 6064 ··· 6076 6067 6077 6068 static DEFINE_PER_CPU(struct defer_free, defer_free_objects) = { 6078 6069 .objects = LLIST_HEAD_INIT(objects), 6079 - .slabs = LLIST_HEAD_INIT(slabs), 6080 6070 .work = IRQ_WORK_INIT(free_deferred_objects), 6081 6071 }; 6082 6072 6083 6073 /* 6084 6074 * In PREEMPT_RT irq_work runs in per-cpu kthread, so it's safe 6085 - * to take sleeping spin_locks from __slab_free() and deactivate_slab(). 6075 + * to take sleeping spin_locks from __slab_free(). 6086 6076 * In !PREEMPT_RT irq_work will run after local_unlock_irqrestore(). 6087 6077 */ 6088 6078 static void free_deferred_objects(struct irq_work *work) 6089 6079 { 6090 6080 struct defer_free *df = container_of(work, struct defer_free, work); 6091 6081 struct llist_head *objs = &df->objects; 6092 - struct llist_head *slabs = &df->slabs; 6093 6082 struct llist_node *llnode, *pos, *t; 6094 6083 6095 - if (llist_empty(objs) && llist_empty(slabs)) 6084 + if (llist_empty(objs)) 6096 6085 return; 6097 6086 6098 6087 llnode = llist_del_all(objs); ··· 6114 6107 6115 6108 __slab_free(s, slab, x, x, 1, _THIS_IP_); 6116 6109 } 6117 - 6118 - llnode = llist_del_all(slabs); 6119 - llist_for_each_safe(pos, t, llnode) { 6120 - struct slab *slab = container_of(pos, struct slab, llnode); 6121 - 6122 - if (slab->frozen) 6123 - deactivate_slab(slab->slab_cache, slab, slab->flush_freelist); 6124 - else 6125 - free_slab(slab->slab_cache, slab); 6126 - } 6127 6110 } 6128 6111 6129 6112 static void defer_free(struct kmem_cache *s, void *head) ··· 6126 6129 6127 6130 df = this_cpu_ptr(&defer_free_objects); 6128 6131 if (llist_add(head + s->offset, &df->objects)) 6129 - irq_work_queue(&df->work); 6130 - } 6131 - 6132 - static void defer_deactivate_slab(struct slab *slab, void *flush_freelist) 6133 - { 6134 - struct defer_free *df; 6135 - 6136 - slab->flush_freelist = flush_freelist; 6137 - 6138 - guard(preempt)(); 6139 - 6140 - df = this_cpu_ptr(&defer_free_objects); 6141 - if (llist_add(&slab->llnode, &df->slabs)) 6142 6132 irq_work_queue(&df->work); 6143 6133 } 6144 6134