Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/slub: remove DEACTIVATE_TO_* stat items

The cpu slabs and their deactivations were removed, so remove the unused
stat items. Weirdly enough the values were also used to control
__add_partial() adding to head or tail of the list, so replace that with
a new enum add_mode, which is cleaner.

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

+15 -16
+15 -16
mm/slub.c
··· 329 329 static inline void debugfs_slab_add(struct kmem_cache *s) { } 330 330 #endif 331 331 332 + enum add_mode { 333 + ADD_TO_HEAD, 334 + ADD_TO_TAIL, 335 + }; 336 + 332 337 enum stat_item { 333 338 ALLOC_PCS, /* Allocation from percpu sheaf */ 334 339 ALLOC_FASTPATH, /* Allocation from cpu slab */ ··· 353 348 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ 354 349 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ 355 350 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ 356 - DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ 357 - DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ 358 351 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ 359 352 DEACTIVATE_BYPASS, /* Implicit deactivation */ 360 353 ORDER_FALLBACK, /* Number of times fallback was necessary */ ··· 3273 3270 * Management of partially allocated slabs. 3274 3271 */ 3275 3272 static inline void 3276 - __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) 3273 + __add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode) 3277 3274 { 3278 3275 n->nr_partial++; 3279 - if (tail == DEACTIVATE_TO_TAIL) 3276 + if (mode == ADD_TO_TAIL) 3280 3277 list_add_tail(&slab->slab_list, &n->partial); 3281 3278 else 3282 3279 list_add(&slab->slab_list, &n->partial); ··· 3284 3281 } 3285 3282 3286 3283 static inline void add_partial(struct kmem_cache_node *n, 3287 - struct slab *slab, int tail) 3284 + struct slab *slab, enum add_mode mode) 3288 3285 { 3289 3286 lockdep_assert_held(&n->list_lock); 3290 - __add_partial(n, slab, tail); 3287 + __add_partial(n, slab, mode); 3291 3288 } 3292 3289 3293 3290 static inline void remove_partial(struct kmem_cache_node *n, ··· 3380 3377 if (slab->inuse == slab->objects) 3381 3378 add_full(s, n, slab); 3382 3379 else 3383 - add_partial(n, slab, DEACTIVATE_TO_HEAD); 3380 + add_partial(n, slab, ADD_TO_HEAD); 3384 3381 3385 3382 inc_slabs_node(s, nid, slab->objects); 3386 3383 spin_unlock_irqrestore(&n->list_lock, flags); ··· 4002 3999 n = get_node(s, slab_nid(slab)); 4003 4000 spin_lock_irqsave(&n->list_lock, flags); 4004 4001 } 4005 - add_partial(n, slab, DEACTIVATE_TO_HEAD); 4002 + add_partial(n, slab, ADD_TO_HEAD); 4006 4003 spin_unlock_irqrestore(&n->list_lock, flags); 4007 4004 } 4008 4005 ··· 5073 5070 /* was on full list */ 5074 5071 remove_full(s, n, slab); 5075 5072 if (!slab_free) { 5076 - add_partial(n, slab, DEACTIVATE_TO_TAIL); 5073 + add_partial(n, slab, ADD_TO_TAIL); 5077 5074 stat(s, FREE_ADD_PARTIAL); 5078 5075 } 5079 5076 } else if (slab_free) { ··· 5193 5190 * then add it. 5194 5191 */ 5195 5192 if (unlikely(was_full)) { 5196 - add_partial(n, slab, DEACTIVATE_TO_TAIL); 5193 + add_partial(n, slab, ADD_TO_TAIL); 5197 5194 stat(s, FREE_ADD_PARTIAL); 5198 5195 } 5199 5196 spin_unlock_irqrestore(&n->list_lock, flags); ··· 6608 6605 continue; 6609 6606 6610 6607 list_del(&slab->slab_list); 6611 - add_partial(n, slab, DEACTIVATE_TO_HEAD); 6608 + add_partial(n, slab, ADD_TO_HEAD); 6612 6609 } 6613 6610 6614 6611 spin_unlock_irqrestore(&n->list_lock, flags); ··· 7075 7072 * No locks need to be taken here as it has just been 7076 7073 * initialized and there is no concurrent access. 7077 7074 */ 7078 - __add_partial(n, slab, DEACTIVATE_TO_HEAD); 7075 + __add_partial(n, slab, ADD_TO_HEAD); 7079 7076 } 7080 7077 7081 7078 static void free_kmem_cache_nodes(struct kmem_cache *s) ··· 8767 8764 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush); 8768 8765 STAT_ATTR(DEACTIVATE_FULL, deactivate_full); 8769 8766 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty); 8770 - STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head); 8771 - STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail); 8772 8767 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees); 8773 8768 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass); 8774 8769 STAT_ATTR(ORDER_FALLBACK, order_fallback); ··· 8869 8868 &cpuslab_flush_attr.attr, 8870 8869 &deactivate_full_attr.attr, 8871 8870 &deactivate_empty_attr.attr, 8872 - &deactivate_to_head_attr.attr, 8873 - &deactivate_to_tail_attr.attr, 8874 8871 &deactivate_remote_frees_attr.attr, 8875 8872 &deactivate_bypass_attr.attr, 8876 8873 &order_fallback_attr.attr,