Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/slab: abstract slabobj_ext access via new slab_obj_ext() helper

Currently, the slab allocator assumes that slab->obj_exts is a pointer
to an array of struct slabobj_ext objects. However, to support storage
methods where struct slabobj_ext is embedded within objects, the slab
allocator should not make this assumption. Instead of directly
dereferencing the slabobj_exts array, abstract access to
struct slabobj_ext via helper functions.

Introduce a new API slabobj_ext metadata access:

slab_obj_ext(slab, obj_exts, index) - returns the pointer to
struct slabobj_ext element at the given index.

Directly dereferencing the return value of slab_obj_exts() is no longer
allowed. Instead, slab_obj_ext() must always be used to access
individual struct slabobj_ext objects.

Convert all users to use these APIs.
No functional changes intended.

Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20260113061845.159790-5-harry.yoo@oracle.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

authored by

Harry Yoo and committed by
Vlastimil Babka
52f1ca8a 43d9bb42

+79 -32
+16 -7
mm/memcontrol.c
··· 2596 2596 * Memcg membership data for each individual object is saved in 2597 2597 * slab->obj_exts. 2598 2598 */ 2599 - struct slabobj_ext *obj_exts; 2599 + unsigned long obj_exts; 2600 + struct slabobj_ext *obj_ext; 2600 2601 unsigned int off; 2601 2602 2602 2603 obj_exts = slab_obj_exts(slab); ··· 2605 2604 return NULL; 2606 2605 2607 2606 off = obj_to_index(slab->slab_cache, slab, p); 2608 - if (obj_exts[off].objcg) 2609 - return obj_cgroup_memcg(obj_exts[off].objcg); 2607 + obj_ext = slab_obj_ext(slab, obj_exts, off); 2608 + if (obj_ext->objcg) 2609 + return obj_cgroup_memcg(obj_ext->objcg); 2610 2610 2611 2611 return NULL; 2612 2612 } ··· 3193 3191 } 3194 3192 3195 3193 for (i = 0; i < size; i++) { 3194 + unsigned long obj_exts; 3195 + struct slabobj_ext *obj_ext; 3196 + 3196 3197 slab = virt_to_slab(p[i]); 3197 3198 3198 3199 if (!slab_obj_exts(slab) && ··· 3218 3213 slab_pgdat(slab), cache_vmstat_idx(s))) 3219 3214 return false; 3220 3215 3216 + obj_exts = slab_obj_exts(slab); 3221 3217 off = obj_to_index(s, slab, p[i]); 3218 + obj_ext = slab_obj_ext(slab, obj_exts, off); 3222 3219 obj_cgroup_get(objcg); 3223 - slab_obj_exts(slab)[off].objcg = objcg; 3220 + obj_ext->objcg = objcg; 3224 3221 } 3225 3222 3226 3223 return true; 3227 3224 } 3228 3225 3229 3226 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 3230 - void **p, int objects, struct slabobj_ext *obj_exts) 3227 + void **p, int objects, unsigned long obj_exts) 3231 3228 { 3232 3229 size_t obj_size = obj_full_size(s); 3233 3230 3234 3231 for (int i = 0; i < objects; i++) { 3235 3232 struct obj_cgroup *objcg; 3233 + struct slabobj_ext *obj_ext; 3236 3234 unsigned int off; 3237 3235 3238 3236 off = obj_to_index(s, slab, p[i]); 3239 - objcg = obj_exts[off].objcg; 3237 + obj_ext = slab_obj_ext(slab, obj_exts, off); 3238 + objcg = obj_ext->objcg; 3240 3239 if (!objcg) 3241 3240 continue; 3242 3241 3243 - obj_exts[off].objcg = NULL; 3242 + obj_ext->objcg = NULL; 3244 3243 refill_obj_stock(objcg, obj_size, true, -obj_size, 3245 3244 slab_pgdat(slab), cache_vmstat_idx(s)); 3246 3245 obj_cgroup_put(objcg);
+37 -6
mm/slab.h
··· 507 507 * associated with a slab. 508 508 * @slab: a pointer to the slab struct 509 509 * 510 - * Returns a pointer to the object extension vector associated with the slab, 511 - * or NULL if no such vector has been associated yet. 510 + * Returns the address of the object extension vector associated with the slab, 511 + * or zero if no such vector has been associated yet. 512 + * Do not dereference the return value directly; use slab_obj_ext() to access 513 + * its elements. 512 514 */ 513 - static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) 515 + static inline unsigned long slab_obj_exts(struct slab *slab) 514 516 { 515 517 unsigned long obj_exts = READ_ONCE(slab->obj_exts); 516 518 ··· 525 523 obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab)); 526 524 VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); 527 525 #endif 528 - return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK); 526 + 527 + return obj_exts & ~OBJEXTS_FLAGS_MASK; 528 + } 529 + 530 + /* 531 + * slab_obj_ext - get the pointer to the slab object extension metadata 532 + * associated with an object in a slab. 533 + * @slab: a pointer to the slab struct 534 + * @obj_exts: a pointer to the object extension vector 535 + * @index: an index of the object 536 + * 537 + * Returns a pointer to the object extension associated with the object. 538 + */ 539 + static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, 540 + unsigned long obj_exts, 541 + unsigned int index) 542 + { 543 + struct slabobj_ext *obj_ext; 544 + 545 + VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab)); 546 + 547 + obj_ext = (struct slabobj_ext *)obj_exts; 548 + return &obj_ext[index]; 529 549 } 530 550 531 551 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, ··· 555 531 556 532 #else /* CONFIG_SLAB_OBJ_EXT */ 557 533 558 - static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) 534 + static inline unsigned long slab_obj_exts(struct slab *slab) 535 + { 536 + return 0; 537 + } 538 + 539 + static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, 540 + unsigned long obj_exts, 541 + unsigned int index) 559 542 { 560 543 return NULL; 561 544 } ··· 579 548 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, 580 549 gfp_t flags, size_t size, void **p); 581 550 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, 582 - void **p, int objects, struct slabobj_ext *obj_exts); 551 + void **p, int objects, unsigned long obj_exts); 583 552 #endif 584 553 585 554 void kvfree_rcu_cb(struct rcu_head *head);
+26 -19
mm/slub.c
··· 2055 2055 2056 2056 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 2057 2057 { 2058 - struct slabobj_ext *slab_exts; 2058 + unsigned long slab_exts; 2059 2059 struct slab *obj_exts_slab; 2060 2060 2061 2061 obj_exts_slab = virt_to_slab(obj_exts); ··· 2063 2063 if (slab_exts) { 2064 2064 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 2065 2065 obj_exts_slab, obj_exts); 2066 + struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab, 2067 + slab_exts, offs); 2066 2068 2067 - if (unlikely(is_codetag_empty(&slab_exts[offs].ref))) 2069 + if (unlikely(is_codetag_empty(&ext->ref))) 2068 2070 return; 2069 2071 2070 2072 /* codetag should be NULL here */ 2071 - WARN_ON(slab_exts[offs].ref.ct); 2072 - set_codetag_empty(&slab_exts[offs].ref); 2073 + WARN_ON(ext->ref.ct); 2074 + set_codetag_empty(&ext->ref); 2073 2075 } 2074 2076 } 2075 2077 ··· 2239 2237 { 2240 2238 struct slabobj_ext *obj_exts; 2241 2239 2242 - obj_exts = slab_obj_exts(slab); 2240 + obj_exts = (struct slabobj_ext *)slab_obj_exts(slab); 2243 2241 if (!obj_exts) { 2244 2242 /* 2245 2243 * If obj_exts allocation failed, slab->obj_exts is set to ··· 2286 2284 #ifdef CONFIG_MEM_ALLOC_PROFILING 2287 2285 2288 2286 static inline struct slabobj_ext * 2289 - prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p) 2287 + prepare_slab_obj_ext_hook(struct kmem_cache *s, gfp_t flags, void *p) 2290 2288 { 2291 2289 struct slab *slab; 2290 + unsigned long obj_exts; 2292 2291 2293 2292 slab = virt_to_slab(p); 2294 - if (!slab_obj_exts(slab) && 2293 + obj_exts = slab_obj_exts(slab); 2294 + if (!obj_exts && 2295 2295 alloc_slab_obj_exts(slab, s, flags, false)) { 2296 2296 pr_warn_once("%s, %s: Failed to create slab extension vector!\n", 2297 2297 __func__, s->name); 2298 2298 return NULL; 2299 2299 } 2300 2300 2301 - return slab_obj_exts(slab) + obj_to_index(s, slab, p); 2301 + obj_exts = slab_obj_exts(slab); 2302 + return slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, p)); 2302 2303 } 2303 2304 2304 2305 /* Should be called only if mem_alloc_profiling_enabled() */ 2305 2306 static noinline void 2306 2307 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2307 2308 { 2308 - struct slabobj_ext *obj_exts; 2309 + struct slabobj_ext *obj_ext; 2309 2310 2310 2311 if (!object) 2311 2312 return; ··· 2319 2314 if (flags & __GFP_NO_OBJ_EXT) 2320 2315 return; 2321 2316 2322 - obj_exts = prepare_slab_obj_exts_hook(s, flags, object); 2317 + obj_ext = prepare_slab_obj_ext_hook(s, flags, object); 2323 2318 /* 2324 2319 * Currently obj_exts is used only for allocation profiling. 2325 2320 * If other users appear then mem_alloc_profiling_enabled() 2326 2321 * check should be added before alloc_tag_add(). 2327 2322 */ 2328 - if (likely(obj_exts)) 2329 - alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size); 2323 + if (likely(obj_ext)) 2324 + alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size); 2330 2325 else 2331 2326 alloc_tag_set_inaccurate(current->alloc_tag); 2332 2327 } ··· 2343 2338 __alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2344 2339 int objects) 2345 2340 { 2346 - struct slabobj_ext *obj_exts; 2347 2341 int i; 2342 + unsigned long obj_exts; 2348 2343 2349 2344 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */ 2350 2345 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE)) ··· 2357 2352 for (i = 0; i < objects; i++) { 2358 2353 unsigned int off = obj_to_index(s, slab, p[i]); 2359 2354 2360 - alloc_tag_sub(&obj_exts[off].ref, s->size); 2355 + alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size); 2361 2356 } 2362 2357 } 2363 2358 ··· 2416 2411 void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, 2417 2412 int objects) 2418 2413 { 2419 - struct slabobj_ext *obj_exts; 2414 + unsigned long obj_exts; 2420 2415 2421 2416 if (!memcg_kmem_online()) 2422 2417 return; ··· 2431 2426 static __fastpath_inline 2432 2427 bool memcg_slab_post_charge(void *p, gfp_t flags) 2433 2428 { 2434 - struct slabobj_ext *slab_exts; 2429 + unsigned long obj_exts; 2430 + struct slabobj_ext *obj_ext; 2435 2431 struct kmem_cache *s; 2436 2432 struct page *page; 2437 2433 struct slab *slab; ··· 2473 2467 return true; 2474 2468 2475 2469 /* Ignore already charged objects. */ 2476 - slab_exts = slab_obj_exts(slab); 2477 - if (slab_exts) { 2470 + obj_exts = slab_obj_exts(slab); 2471 + if (obj_exts) { 2478 2472 off = obj_to_index(s, slab, p); 2479 - if (unlikely(slab_exts[off].objcg)) 2473 + obj_ext = slab_obj_ext(slab, obj_exts, off); 2474 + if (unlikely(obj_ext->objcg)) 2480 2475 return true; 2481 2476 } 2482 2477