Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/memcontrol,alloc_tag: handle slabobj_ext access under KASAN poison

In the near future, slabobj_ext may reside outside the allocated slab
object range within a slab, which could be reported as an out-of-bounds
access by KASAN.

As suggested by Andrey Konovalov [1], explicitly disable KASAN and KMSAN
checks when accessing slabobj_ext within slab allocator, memory profiling,
and memory cgroup code. While an alternative approach could be to unpoison
slabobj_ext, out-of-bounds accesses outside the slab allocator are
generally more common.

Move metadata_access_enable()/disable() helpers to mm/slab.h so that
it can be used outside mm/slub.c. However, as suggested by Suren
Baghdasaryan [2], instead of calling them directly from mm code (which is
more prone to errors), change users to access slabobj_ext via get/put
APIs:

- Users should call get_slab_obj_exts() to access slabobj_metadata
and call put_slab_obj_exts() when it's done.

- From now on, accessing it outside the section covered by
get_slab_obj_exts() ~ put_slab_obj_exts() is illegal.
This ensures that accesses to slabobj_ext metadata won't be reported
as access violations.

Call kasan_reset_tag() in slab_obj_ext() before returning the address to
prevent SW or HW tag-based KASAN from reporting false positives.

Suggested-by: Andrey Konovalov <andreyknvl@gmail.com>
Suggested-by: Suren Baghdasaryan <surenb@google.com>
Link: https://lore.kernel.org/linux-mm/CA+fCnZezoWn40BaS3cgmCeLwjT+5AndzcQLc=wH3BjMCu6_YCw@mail.gmail.com [1]
Link: https://lore.kernel.org/linux-mm/CAJuCfpG=Lb4WhYuPkSpdNO4Ehtjm1YcEEK0OM=3g9i=LxmpHSQ@mail.gmail.com [2]
Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
Link: https://patch.msgid.link/20260113061845.159790-7-harry.yoo@oracle.com
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>

authored by

Harry Yoo and committed by
Vlastimil Babka
4b1530f8 7a8e71bc

+95 -40
+10 -2
mm/memcontrol.c
··· 2604 2604 if (!obj_exts) 2605 2605 return NULL; 2606 2606 2607 + get_slab_obj_exts(obj_exts); 2607 2608 off = obj_to_index(slab->slab_cache, slab, p); 2608 2609 obj_ext = slab_obj_ext(slab, obj_exts, off); 2609 - if (obj_ext->objcg) 2610 - return obj_cgroup_memcg(obj_ext->objcg); 2610 + if (obj_ext->objcg) { 2611 + struct obj_cgroup *objcg = obj_ext->objcg; 2612 + 2613 + put_slab_obj_exts(obj_exts); 2614 + return obj_cgroup_memcg(objcg); 2615 + } 2616 + put_slab_obj_exts(obj_exts); 2611 2617 2612 2618 return NULL; 2613 2619 } ··· 3225 3219 return false; 3226 3220 3227 3221 obj_exts = slab_obj_exts(slab); 3222 + get_slab_obj_exts(obj_exts); 3228 3223 off = obj_to_index(s, slab, p[i]); 3229 3224 obj_ext = slab_obj_ext(slab, obj_exts, off); 3230 3225 obj_cgroup_get(objcg); 3231 3226 obj_ext->objcg = objcg; 3227 + put_slab_obj_exts(obj_exts); 3232 3228 } 3233 3229 3234 3230 return true;
+51 -3
mm/slab.h
··· 508 508 static inline bool slab_in_kunit_test(void) { return false; } 509 509 #endif 510 510 511 + /* 512 + * slub is about to manipulate internal object metadata. This memory lies 513 + * outside the range of the allocated object, so accessing it would normally 514 + * be reported by kasan as a bounds error. metadata_access_enable() is used 515 + * to tell kasan that these accesses are OK. 516 + */ 517 + static inline void metadata_access_enable(void) 518 + { 519 + kasan_disable_current(); 520 + kmsan_disable_current(); 521 + } 522 + 523 + static inline void metadata_access_disable(void) 524 + { 525 + kmsan_enable_current(); 526 + kasan_enable_current(); 527 + } 528 + 511 529 #ifdef CONFIG_SLAB_OBJ_EXT 512 530 513 531 /* ··· 535 517 * 536 518 * Returns the address of the object extension vector associated with the slab, 537 519 * or zero if no such vector has been associated yet. 538 - * Do not dereference the return value directly; use slab_obj_ext() to access 539 - * its elements. 520 + * Do not dereference the return value directly; use get/put_slab_obj_exts() 521 + * pair and slab_obj_ext() to access individual elements. 522 + * 523 + * Example usage: 524 + * 525 + * obj_exts = slab_obj_exts(slab); 526 + * if (obj_exts) { 527 + * get_slab_obj_exts(obj_exts); 528 + * obj_ext = slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, obj)); 529 + * // do something with obj_ext 530 + * put_slab_obj_exts(obj_exts); 531 + * } 532 + * 533 + * Note that the get/put semantics does not involve reference counting. 534 + * Instead, it updates kasan/kmsan depth so that accesses to slabobj_ext 535 + * won't be reported as access violations. 540 536 */ 541 537 static inline unsigned long slab_obj_exts(struct slab *slab) 542 538 { ··· 567 535 #endif 568 536 569 537 return obj_exts & ~OBJEXTS_FLAGS_MASK; 538 + } 539 + 540 + static inline void get_slab_obj_exts(unsigned long obj_exts) 541 + { 542 + VM_WARN_ON_ONCE(!obj_exts); 543 + metadata_access_enable(); 544 + } 545 + 546 + static inline void put_slab_obj_exts(unsigned long obj_exts) 547 + { 548 + metadata_access_disable(); 570 549 } 571 550 572 551 #ifdef CONFIG_64BIT ··· 608 565 * @index: an index of the object 609 566 * 610 567 * Returns a pointer to the object extension associated with the object. 568 + * Must be called within a section covered by get/put_slab_obj_exts(). 611 569 */ 612 570 static inline struct slabobj_ext *slab_obj_ext(struct slab *slab, 613 571 unsigned long obj_exts, 614 572 unsigned int index) 615 573 { 574 + struct slabobj_ext *obj_ext; 575 + 616 576 VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab)); 617 577 618 - return (struct slabobj_ext *)(obj_exts + slab_get_stride(slab) * index); 578 + obj_ext = (struct slabobj_ext *)(obj_exts + 579 + slab_get_stride(slab) * index); 580 + return kasan_reset_tag(obj_ext); 619 581 } 620 582 621 583 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
+34 -35
mm/slub.c
··· 973 973 static int disable_higher_order_debug; 974 974 975 975 /* 976 - * slub is about to manipulate internal object metadata. This memory lies 977 - * outside the range of the allocated object, so accessing it would normally 978 - * be reported by kasan as a bounds error. metadata_access_enable() is used 979 - * to tell kasan that these accesses are OK. 980 - */ 981 - static inline void metadata_access_enable(void) 982 - { 983 - kasan_disable_current(); 984 - kmsan_disable_current(); 985 - } 986 - 987 - static inline void metadata_access_disable(void) 988 - { 989 - kmsan_enable_current(); 990 - kasan_enable_current(); 991 - } 992 - 993 - /* 994 976 * Object debugging 995 977 */ 996 978 ··· 2037 2055 2038 2056 static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) 2039 2057 { 2040 - unsigned long slab_exts; 2041 2058 struct slab *obj_exts_slab; 2059 + unsigned long slab_exts; 2042 2060 2043 2061 obj_exts_slab = virt_to_slab(obj_exts); 2044 2062 slab_exts = slab_obj_exts(obj_exts_slab); 2045 2063 if (slab_exts) { 2064 + get_slab_obj_exts(slab_exts); 2046 2065 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, 2047 2066 obj_exts_slab, obj_exts); 2048 2067 struct slabobj_ext *ext = slab_obj_ext(obj_exts_slab, 2049 2068 slab_exts, offs); 2050 2069 2051 - if (unlikely(is_codetag_empty(&ext->ref))) 2070 + if (unlikely(is_codetag_empty(&ext->ref))) { 2071 + put_slab_obj_exts(slab_exts); 2052 2072 return; 2073 + } 2053 2074 2054 2075 /* codetag should be NULL here */ 2055 2076 WARN_ON(ext->ref.ct); 2056 2077 set_codetag_empty(&ext->ref); 2078 + put_slab_obj_exts(slab_exts); 2057 2079 } 2058 2080 } 2059 2081 ··· 2273 2287 2274 2288 #ifdef CONFIG_MEM_ALLOC_PROFILING 2275 2289 2276 - static inline struct slabobj_ext * 2277 - prepare_slab_obj_ext_hook(struct kmem_cache *s, gfp_t flags, void *p) 2290 + static inline unsigned long 2291 + prepare_slab_obj_exts_hook(struct kmem_cache *s, struct slab *slab, 2292 + gfp_t flags, void *p) 2278 2293 { 2279 - struct slab *slab; 2280 - unsigned long obj_exts; 2281 - 2282 - slab = virt_to_slab(p); 2283 - obj_exts = slab_obj_exts(slab); 2284 - if (!obj_exts && 2294 + if (!slab_obj_exts(slab) && 2285 2295 alloc_slab_obj_exts(slab, s, flags, false)) { 2286 2296 pr_warn_once("%s, %s: Failed to create slab extension vector!\n", 2287 2297 __func__, s->name); 2288 - return NULL; 2298 + return 0; 2289 2299 } 2290 2300 2291 - obj_exts = slab_obj_exts(slab); 2292 - return slab_obj_ext(slab, obj_exts, obj_to_index(s, slab, p)); 2301 + return slab_obj_exts(slab); 2293 2302 } 2303 + 2294 2304 2295 2305 /* Should be called only if mem_alloc_profiling_enabled() */ 2296 2306 static noinline void 2297 2307 __alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags) 2298 2308 { 2309 + unsigned long obj_exts; 2299 2310 struct slabobj_ext *obj_ext; 2311 + struct slab *slab; 2300 2312 2301 2313 if (!object) 2302 2314 return; ··· 2305 2321 if (flags & __GFP_NO_OBJ_EXT) 2306 2322 return; 2307 2323 2308 - obj_ext = prepare_slab_obj_ext_hook(s, flags, object); 2324 + slab = virt_to_slab(object); 2325 + obj_exts = prepare_slab_obj_exts_hook(s, slab, flags, object); 2309 2326 /* 2310 2327 * Currently obj_exts is used only for allocation profiling. 2311 2328 * If other users appear then mem_alloc_profiling_enabled() 2312 2329 * check should be added before alloc_tag_add(). 2313 2330 */ 2314 - if (likely(obj_ext)) 2331 + if (obj_exts) { 2332 + unsigned int obj_idx = obj_to_index(s, slab, object); 2333 + 2334 + get_slab_obj_exts(obj_exts); 2335 + obj_ext = slab_obj_ext(slab, obj_exts, obj_idx); 2315 2336 alloc_tag_add(&obj_ext->ref, current->alloc_tag, s->size); 2316 - else 2337 + put_slab_obj_exts(obj_exts); 2338 + } else { 2317 2339 alloc_tag_set_inaccurate(current->alloc_tag); 2340 + } 2318 2341 } 2319 2342 2320 2343 static inline void ··· 2347 2356 if (!obj_exts) 2348 2357 return; 2349 2358 2359 + get_slab_obj_exts(obj_exts); 2350 2360 for (i = 0; i < objects; i++) { 2351 2361 unsigned int off = obj_to_index(s, slab, p[i]); 2352 2362 2353 2363 alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size); 2354 2364 } 2365 + put_slab_obj_exts(obj_exts); 2355 2366 } 2356 2367 2357 2368 static inline void ··· 2420 2427 if (likely(!obj_exts)) 2421 2428 return; 2422 2429 2430 + get_slab_obj_exts(obj_exts); 2423 2431 __memcg_slab_free_hook(s, slab, p, objects, obj_exts); 2432 + put_slab_obj_exts(obj_exts); 2424 2433 } 2425 2434 2426 2435 static __fastpath_inline ··· 2472 2477 /* Ignore already charged objects. */ 2473 2478 obj_exts = slab_obj_exts(slab); 2474 2479 if (obj_exts) { 2480 + get_slab_obj_exts(obj_exts); 2475 2481 off = obj_to_index(s, slab, p); 2476 2482 obj_ext = slab_obj_ext(slab, obj_exts, off); 2477 - if (unlikely(obj_ext->objcg)) 2483 + if (unlikely(obj_ext->objcg)) { 2484 + put_slab_obj_exts(obj_exts); 2478 2485 return true; 2486 + } 2487 + put_slab_obj_exts(obj_exts); 2479 2488 } 2480 2489 2481 2490 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);