Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

slab: remove frozen slab checks from __slab_free()

Currently slabs are only frozen after consistency checks failed. This
can happen only in caches with debugging enabled, and those use
free_to_partial_list() for freeing. The non-debug operation of
__slab_free() can thus stop considering the frozen field, and we can
remove the FREE_FROZEN stat.

Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Hao Li <hao.li@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

+4 -18
+4 -18
mm/slub.c
··· 338 338 FREE_RCU_SHEAF_FAIL, /* Failed to free to a rcu_free sheaf */ 339 339 FREE_FASTPATH, /* Free to cpu slab */ 340 340 FREE_SLOWPATH, /* Freeing not to cpu slab */ 341 - FREE_FROZEN, /* Freeing to frozen slab */ 342 341 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ 343 342 FREE_REMOVE_PARTIAL, /* Freeing removes last object */ 344 343 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ ··· 5108 5109 unsigned long addr) 5109 5110 5110 5111 { 5111 - bool was_frozen, was_full; 5112 + bool was_full; 5112 5113 struct freelist_counters old, new; 5113 5114 struct kmem_cache_node *n = NULL; 5114 5115 unsigned long flags; ··· 5131 5132 old.counters = slab->counters; 5132 5133 5133 5134 was_full = (old.freelist == NULL); 5134 - was_frozen = old.frozen; 5135 5135 5136 5136 set_freepointer(s, tail, old.freelist); 5137 5137 ··· 5143 5145 * to (due to not being full anymore) the partial list. 5144 5146 * Unless it's frozen. 5145 5147 */ 5146 - if ((!new.inuse || was_full) && !was_frozen) { 5148 + if (!new.inuse || was_full) { 5147 5149 5148 5150 n = get_node(s, slab_nid(slab)); 5149 5151 /* ··· 5162 5164 } while (!slab_update_freelist(s, slab, &old, &new, "__slab_free")); 5163 5165 5164 5166 if (likely(!n)) { 5165 - 5166 - if (likely(was_frozen)) { 5167 - /* 5168 - * The list lock was not taken therefore no list 5169 - * activity can be necessary. 5170 - */ 5171 - stat(s, FREE_FROZEN); 5172 - } 5173 - 5174 5167 /* 5175 - * In other cases we didn't take the list_lock because the slab 5176 - * was already on the partial list and will remain there. 5168 + * We didn't take the list_lock because the slab was already on 5169 + * the partial list and will remain there. 5177 5170 */ 5178 - 5179 5171 return; 5180 5172 } 5181 5173 ··· 8754 8766 STAT_ATTR(FREE_RCU_SHEAF_FAIL, free_rcu_sheaf_fail); 8755 8767 STAT_ATTR(FREE_FASTPATH, free_fastpath); 8756 8768 STAT_ATTR(FREE_SLOWPATH, free_slowpath); 8757 - STAT_ATTR(FREE_FROZEN, free_frozen); 8758 8769 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial); 8759 8770 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial); 8760 8771 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial); ··· 8858 8871 &free_rcu_sheaf_fail_attr.attr, 8859 8872 &free_fastpath_attr.attr, 8860 8873 &free_slowpath_attr.attr, 8861 - &free_frozen_attr.attr, 8862 8874 &free_add_partial_attr.attr, 8863 8875 &free_remove_partial_attr.attr, 8864 8876 &alloc_from_partial_attr.attr,