Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
5 * (C) SGI 2006, Christoph Lameter
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
8 * (C) Linux Foundation 2008-2013
9 * Unified interface for all slab allocators
10 */
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
15#include <linux/bug.h>
16#include <linux/cache.h>
17#include <linux/gfp.h>
18#include <linux/overflow.h>
19#include <linux/types.h>
20#include <linux/rcupdate.h>
21#include <linux/workqueue.h>
22#include <linux/percpu-refcount.h>
23#include <linux/cleanup.h>
24#include <linux/hash.h>
25
26enum _slab_flag_bits {
27 _SLAB_CONSISTENCY_CHECKS,
28 _SLAB_RED_ZONE,
29 _SLAB_POISON,
30 _SLAB_KMALLOC,
31 _SLAB_HWCACHE_ALIGN,
32 _SLAB_CACHE_DMA,
33 _SLAB_CACHE_DMA32,
34 _SLAB_STORE_USER,
35 _SLAB_PANIC,
36 _SLAB_TYPESAFE_BY_RCU,
37 _SLAB_TRACE,
38#ifdef CONFIG_DEBUG_OBJECTS
39 _SLAB_DEBUG_OBJECTS,
40#endif
41 _SLAB_NOLEAKTRACE,
42 _SLAB_NO_MERGE,
43#ifdef CONFIG_FAILSLAB
44 _SLAB_FAILSLAB,
45#endif
46#ifdef CONFIG_MEMCG
47 _SLAB_ACCOUNT,
48#endif
49#ifdef CONFIG_KASAN_GENERIC
50 _SLAB_KASAN,
51#endif
52 _SLAB_NO_USER_FLAGS,
53#ifdef CONFIG_KFENCE
54 _SLAB_SKIP_KFENCE,
55#endif
56#ifndef CONFIG_SLUB_TINY
57 _SLAB_RECLAIM_ACCOUNT,
58#endif
59 _SLAB_OBJECT_POISON,
60 _SLAB_CMPXCHG_DOUBLE,
61 _SLAB_NO_OBJ_EXT,
62#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
63 _SLAB_OBJ_EXT_IN_OBJ,
64#endif
65 _SLAB_FLAGS_LAST_BIT
66};
67
68#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
69#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
70
71/*
72 * Flags to pass to kmem_cache_create().
73 * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
74 */
75/* DEBUG: Perform (expensive) checks on alloc/free */
76#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
77/* DEBUG: Red zone objs in a cache */
78#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
79/* DEBUG: Poison objects */
80#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
81/* Indicate a kmalloc slab */
82#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
83/**
84 * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries.
85 *
86 * Sufficiently large objects are aligned on cache line boundary. For object
87 * size smaller than a half of cache line size, the alignment is on the half of
88 * cache line size. In general, if object size is smaller than 1/2^n of cache
89 * line size, the alignment is adjusted to 1/2^n.
90 *
91 * If explicit alignment is also requested by the respective
92 * &struct kmem_cache_args field, the greater of both is alignments is applied.
93 */
94#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
95/* Use GFP_DMA memory */
96#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
97/* Use GFP_DMA32 memory */
98#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
99/* DEBUG: Store the last owner for bug hunting */
100#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
101/* Panic if kmem_cache_create() fails */
102#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
103/**
104 * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
105 *
106 * This delays freeing the SLAB page by a grace period, it does _NOT_
107 * delay object freeing. This means that if you do kmem_cache_free()
108 * that memory location is free to be reused at any time. Thus it may
109 * be possible to see another object there in the same RCU grace period.
110 *
111 * This feature only ensures the memory location backing the object
112 * stays valid, the trick to using this is relying on an independent
113 * object validation pass. Something like:
114 *
115 * ::
116 *
117 * begin:
118 * rcu_read_lock();
119 * obj = lockless_lookup(key);
120 * if (obj) {
121 * if (!try_get_ref(obj)) // might fail for free objects
122 * rcu_read_unlock();
123 * goto begin;
124 *
125 * if (obj->key != key) { // not the object we expected
126 * put_ref(obj);
127 * rcu_read_unlock();
128 * goto begin;
129 * }
130 * }
131 * rcu_read_unlock();
132 *
133 * This is useful if we need to approach a kernel structure obliquely,
134 * from its address obtained without the usual locking. We can lock
135 * the structure to stabilize it and check it's still at the given address,
136 * only if we can be sure that the memory has not been meanwhile reused
137 * for some other kind of object (which our subsystem's lock might corrupt).
138 *
139 * rcu_read_lock before reading the address, then rcu_read_unlock after
140 * taking the spinlock within the structure expected at that address.
141 *
142 * Note that object identity check has to be done *after* acquiring a
143 * reference, therefore user has to ensure proper ordering for loads.
144 * Similarly, when initializing objects allocated with SLAB_TYPESAFE_BY_RCU,
145 * the newly allocated object has to be fully initialized *before* its
146 * refcount gets initialized and proper ordering for stores is required.
147 * refcount_{add|inc}_not_zero_acquire() and refcount_set_release() are
148 * designed with the proper fences required for reference counting objects
149 * allocated with SLAB_TYPESAFE_BY_RCU.
150 *
151 * Note that it is not possible to acquire a lock within a structure
152 * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
153 * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
154 * are not zeroed before being given to the slab, which means that any
155 * locks must be initialized after each and every kmem_struct_alloc().
156 * Alternatively, make the ctor passed to kmem_cache_create() initialize
157 * the locks at page-allocation time, as is done in __i915_request_ctor(),
158 * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers
159 * to safely acquire those ctor-initialized locks under rcu_read_lock()
160 * protection.
161 *
162 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
163 */
164#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
165/* Trace allocations and frees */
166#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
167
168/* Flag to prevent checks on free */
169#ifdef CONFIG_DEBUG_OBJECTS
170# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
171#else
172# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
173#endif
174
175/* Avoid kmemleak tracing */
176#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
177
178/*
179 * Prevent merging with compatible kmem caches. This flag should be used
180 * cautiously. Valid use cases:
181 *
182 * - caches created for self-tests (e.g. kunit)
183 * - general caches created and used by a subsystem, only when a
184 * (subsystem-specific) debug option is enabled
185 * - performance critical caches, should be very rare and consulted with slab
186 * maintainers, and not used together with CONFIG_SLUB_TINY
187 */
188#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
189
190/* Fault injection mark */
191#ifdef CONFIG_FAILSLAB
192# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
193#else
194# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
195#endif
196/**
197 * define SLAB_ACCOUNT - Account allocations to memcg.
198 *
199 * All object allocations from this cache will be memcg accounted, regardless of
200 * __GFP_ACCOUNT being or not being passed to individual allocations.
201 */
202#ifdef CONFIG_MEMCG
203# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
204#else
205# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
206#endif
207
208#ifdef CONFIG_KASAN_GENERIC
209#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
210#else
211#define SLAB_KASAN __SLAB_FLAG_UNUSED
212#endif
213
214/*
215 * Ignore user specified debugging flags.
216 * Intended for caches created for self-tests so they have only flags
217 * specified in the code and other flags are ignored.
218 */
219#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
220
221#ifdef CONFIG_KFENCE
222#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
223#else
224#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
225#endif
226
227/* The following flags affect the page allocator grouping pages by mobility */
228/**
229 * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable.
230 *
231 * Use this flag for caches that have an associated shrinker. As a result, slab
232 * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by
233 * mobility, and are accounted in SReclaimable counter in /proc/meminfo
234 */
235#ifndef CONFIG_SLUB_TINY
236#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
237#else
238#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
239#endif
240#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
241
242/* Slab created using create_boot_cache */
243#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT)
244
245#if defined(CONFIG_SLAB_OBJ_EXT) && defined(CONFIG_64BIT)
246#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_BIT(_SLAB_OBJ_EXT_IN_OBJ)
247#else
248#define SLAB_OBJ_EXT_IN_OBJ __SLAB_FLAG_UNUSED
249#endif
250
251/*
252 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
253 *
254 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
255 *
256 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
257 * Both make kfree a no-op.
258 */
259#define ZERO_SIZE_PTR ((void *)16)
260
261#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
262 (unsigned long)ZERO_SIZE_PTR)
263
264#include <linux/kasan.h>
265
266struct list_lru;
267struct mem_cgroup;
268/*
269 * struct kmem_cache related prototypes
270 */
271bool slab_is_available(void);
272
273/**
274 * struct kmem_cache_args - Less common arguments for kmem_cache_create()
275 *
276 * Any uninitialized fields of the structure are interpreted as unused. The
277 * exception is @freeptr_offset where %0 is a valid value, so
278 * @use_freeptr_offset must be also set to %true in order to interpret the field
279 * as used. For @useroffset %0 is also valid, but only with non-%0
280 * @usersize.
281 *
282 * When %NULL args is passed to kmem_cache_create(), it is equivalent to all
283 * fields unused.
284 */
285struct kmem_cache_args {
286 /**
287 * @align: The required alignment for the objects.
288 *
289 * %0 means no specific alignment is requested.
290 */
291 unsigned int align;
292 /**
293 * @useroffset: Usercopy region offset.
294 *
295 * %0 is a valid offset, when @usersize is non-%0
296 */
297 unsigned int useroffset;
298 /**
299 * @usersize: Usercopy region size.
300 *
301 * %0 means no usercopy region is specified.
302 */
303 unsigned int usersize;
304 /**
305 * @freeptr_offset: Custom offset for the free pointer
306 * in caches with &SLAB_TYPESAFE_BY_RCU or @ctor
307 *
308 * By default, &SLAB_TYPESAFE_BY_RCU and @ctor caches place the free
309 * pointer outside of the object. This might cause the object to grow
310 * in size. Cache creators that have a reason to avoid this can specify
311 * a custom free pointer offset in their data structure where the free
312 * pointer will be placed.
313 *
314 * For caches with &SLAB_TYPESAFE_BY_RCU, the caller must ensure that
315 * the free pointer does not overlay fields required to guard against
316 * object recycling (See &SLAB_TYPESAFE_BY_RCU for details).
317 *
318 * For caches with @ctor, the caller must ensure that the free pointer
319 * does not overlay fields initialized by the constructor.
320 *
321 * Currently, only caches with &SLAB_TYPESAFE_BY_RCU or @ctor
322 * may specify @freeptr_offset.
323 *
324 * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset
325 * is specified, @use_freeptr_offset must be set %true.
326 */
327 unsigned int freeptr_offset;
328 /**
329 * @use_freeptr_offset: Whether a @freeptr_offset is used.
330 */
331 bool use_freeptr_offset;
332 /**
333 * @ctor: A constructor for the objects.
334 *
335 * The constructor is invoked for each object in a newly allocated slab
336 * page. It is the cache user's responsibility to free object in the
337 * same state as after calling the constructor, or deal appropriately
338 * with any differences between a freshly constructed and a reallocated
339 * object.
340 *
341 * %NULL means no constructor.
342 */
343 void (*ctor)(void *);
344 /**
345 * @sheaf_capacity: Enable sheaves of given capacity for the cache.
346 *
347 * With a non-zero value, allocations from the cache go through caching
348 * arrays called sheaves. Each cpu has a main sheaf that's always
349 * present, and a spare sheaf that may be not present. When both become
350 * empty, there's an attempt to replace an empty sheaf with a full sheaf
351 * from the per-node barn.
352 *
353 * When no full sheaf is available, and gfp flags allow blocking, a
354 * sheaf is allocated and filled from slab(s) using bulk allocation.
355 * Otherwise the allocation falls back to the normal operation
356 * allocating a single object from a slab.
357 *
358 * Analogically when freeing and both percpu sheaves are full, the barn
359 * may replace it with an empty sheaf, unless it's over capacity. In
360 * that case a sheaf is bulk freed to slab pages.
361 *
362 * The sheaves do not enforce NUMA placement of objects, so allocations
363 * via kmem_cache_alloc_node() with a node specified other than
364 * NUMA_NO_NODE will bypass them.
365 *
366 * Bulk allocation and free operations also try to use the cpu sheaves
367 * and barn, but fallback to using slab pages directly.
368 *
369 * When slub_debug is enabled for the cache, the sheaf_capacity argument
370 * is ignored.
371 *
372 * %0 means no sheaves will be created.
373 */
374 unsigned int sheaf_capacity;
375};
376
377struct kmem_cache *__kmem_cache_create_args(const char *name,
378 unsigned int object_size,
379 struct kmem_cache_args *args,
380 slab_flags_t flags);
381static inline struct kmem_cache *
382__kmem_cache_create(const char *name, unsigned int size, unsigned int align,
383 slab_flags_t flags, void (*ctor)(void *))
384{
385 struct kmem_cache_args kmem_args = {
386 .align = align,
387 .ctor = ctor,
388 };
389
390 return __kmem_cache_create_args(name, size, &kmem_args, flags);
391}
392
393/**
394 * kmem_cache_create_usercopy - Create a kmem cache with a region suitable
395 * for copying to userspace.
396 * @name: A string which is used in /proc/slabinfo to identify this cache.
397 * @size: The size of objects to be created in this cache.
398 * @align: The required alignment for the objects.
399 * @flags: SLAB flags
400 * @useroffset: Usercopy region offset
401 * @usersize: Usercopy region size
402 * @ctor: A constructor for the objects, or %NULL.
403 *
404 * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY()
405 * if whitelisting a single field is sufficient, or kmem_cache_create() with
406 * the necessary parameters passed via the args parameter (see
407 * &struct kmem_cache_args)
408 *
409 * Return: a pointer to the cache on success, NULL on failure.
410 */
411static inline struct kmem_cache *
412kmem_cache_create_usercopy(const char *name, unsigned int size,
413 unsigned int align, slab_flags_t flags,
414 unsigned int useroffset, unsigned int usersize,
415 void (*ctor)(void *))
416{
417 struct kmem_cache_args kmem_args = {
418 .align = align,
419 .ctor = ctor,
420 .useroffset = useroffset,
421 .usersize = usersize,
422 };
423
424 return __kmem_cache_create_args(name, size, &kmem_args, flags);
425}
426
427/* If NULL is passed for @args, use this variant with default arguments. */
428static inline struct kmem_cache *
429__kmem_cache_default_args(const char *name, unsigned int size,
430 struct kmem_cache_args *args,
431 slab_flags_t flags)
432{
433 struct kmem_cache_args kmem_default_args = {};
434
435 /* Make sure we don't get passed garbage. */
436 if (WARN_ON_ONCE(args))
437 return ERR_PTR(-EINVAL);
438
439 return __kmem_cache_create_args(name, size, &kmem_default_args, flags);
440}
441
442/**
443 * kmem_cache_create - Create a kmem cache.
444 * @__name: A string which is used in /proc/slabinfo to identify this cache.
445 * @__object_size: The size of objects to be created in this cache.
446 * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL
447 * means defaults will be used for all the arguments.
448 *
449 * This is currently implemented as a macro using ``_Generic()`` to call
450 * either the new variant of the function, or a legacy one.
451 *
452 * The new variant has 4 parameters:
453 * ``kmem_cache_create(name, object_size, args, flags)``
454 *
455 * See __kmem_cache_create_args() which implements this.
456 *
457 * The legacy variant has 5 parameters:
458 * ``kmem_cache_create(name, object_size, align, flags, ctor)``
459 *
460 * The align and ctor parameters map to the respective fields of
461 * &struct kmem_cache_args
462 *
463 * Context: Cannot be called within a interrupt, but can be interrupted.
464 *
465 * Return: a pointer to the cache on success, NULL on failure.
466 */
467#define kmem_cache_create(__name, __object_size, __args, ...) \
468 _Generic((__args), \
469 struct kmem_cache_args *: __kmem_cache_create_args, \
470 void *: __kmem_cache_default_args, \
471 default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__)
472
473void kmem_cache_destroy(struct kmem_cache *s);
474int kmem_cache_shrink(struct kmem_cache *s);
475
476/*
477 * Please use this macro to create slab caches. Simply specify the
478 * name of the structure and maybe some flags that are listed above.
479 *
480 * The alignment of the struct determines object alignment. If you
481 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
482 * then the objects will be properly aligned in SMP configurations.
483 */
484#define KMEM_CACHE(__struct, __flags) \
485 __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
486 &(struct kmem_cache_args) { \
487 .align = __alignof__(struct __struct), \
488 }, (__flags))
489
490/*
491 * To whitelist a single field for copying to/from usercopy, use this
492 * macro instead for KMEM_CACHE() above.
493 */
494#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
495 __kmem_cache_create_args(#__struct, sizeof(struct __struct), \
496 &(struct kmem_cache_args) { \
497 .align = __alignof__(struct __struct), \
498 .useroffset = offsetof(struct __struct, __field), \
499 .usersize = sizeof_field(struct __struct, __field), \
500 }, (__flags))
501
502/*
503 * Common kmalloc functions provided by all allocators
504 */
505void * __must_check krealloc_node_align_noprof(const void *objp, size_t new_size,
506 unsigned long align,
507 gfp_t flags, int nid) __realloc_size(2);
508#define krealloc_noprof(_o, _s, _f) krealloc_node_align_noprof(_o, _s, 1, _f, NUMA_NO_NODE)
509#define krealloc_node_align(...) alloc_hooks(krealloc_node_align_noprof(__VA_ARGS__))
510#define krealloc_node(_o, _s, _f, _n) krealloc_node_align(_o, _s, 1, _f, _n)
511#define krealloc(...) krealloc_node(__VA_ARGS__, NUMA_NO_NODE)
512
513void kfree(const void *objp);
514void kfree_nolock(const void *objp);
515void kfree_sensitive(const void *objp);
516
517DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T))
518DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T))
519
520size_t ksize(const void *objp);
521
522#ifdef CONFIG_PRINTK
523bool kmem_dump_obj(void *object);
524#else
525static inline bool kmem_dump_obj(void *object) { return false; }
526#endif
527
528/*
529 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
530 * alignment larger than the alignment of a 64-bit integer.
531 * Setting ARCH_DMA_MINALIGN in arch headers allows that.
532 */
533#ifdef ARCH_HAS_DMA_MINALIGN
534#if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
535#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
536#endif
537#endif
538
539#ifndef ARCH_KMALLOC_MINALIGN
540#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
541#elif ARCH_KMALLOC_MINALIGN > 8
542#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
543#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
544#endif
545
546/*
547 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
548 * Intended for arches that get misalignment faults even for 64 bit integer
549 * aligned buffers.
550 */
551#ifndef ARCH_SLAB_MINALIGN
552#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
553#endif
554
555/*
556 * Arches can define this function if they want to decide the minimum slab
557 * alignment at runtime. The value returned by the function must be a power
558 * of two and >= ARCH_SLAB_MINALIGN.
559 */
560#ifndef arch_slab_minalign
561static inline unsigned int arch_slab_minalign(void)
562{
563 return ARCH_SLAB_MINALIGN;
564}
565#endif
566
567/*
568 * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
569 * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
570 * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
571 */
572#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
573#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
574#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
575
576/*
577 * Kmalloc array related definitions
578 */
579
580/*
581 * SLUB directly allocates requests fitting in to an order-1 page
582 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
583 */
584#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
585#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT)
586#ifndef KMALLOC_SHIFT_LOW
587#define KMALLOC_SHIFT_LOW 3
588#endif
589
590/* Maximum allocatable size */
591#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
592/* Maximum size for which we actually use a slab cache */
593#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
594/* Maximum order allocatable via the slab allocator */
595#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
596
597/*
598 * Kmalloc subsystem.
599 */
600#ifndef KMALLOC_MIN_SIZE
601#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
602#endif
603
604/*
605 * This restriction comes from byte sized index implementation.
606 * Page size is normally 2^12 bytes and, in this case, if we want to use
607 * byte sized index which can represent 2^8 entries, the size of the object
608 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
609 * If minimum size of kmalloc is less than 16, we use it as minimum object
610 * size and give up to use byte sized index.
611 */
612#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
613 (KMALLOC_MIN_SIZE) : 16)
614
615#ifdef CONFIG_RANDOM_KMALLOC_CACHES
616#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies
617#else
618#define RANDOM_KMALLOC_CACHES_NR 0
619#endif
620
621/*
622 * Whenever changing this, take care of that kmalloc_type() and
623 * create_kmalloc_caches() still work as intended.
624 *
625 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
626 * is for accounted but unreclaimable and non-dma objects. All the other
627 * kmem caches can have both accounted and unaccounted objects.
628 */
629enum kmalloc_cache_type {
630 KMALLOC_NORMAL = 0,
631#ifndef CONFIG_ZONE_DMA
632 KMALLOC_DMA = KMALLOC_NORMAL,
633#endif
634#ifndef CONFIG_MEMCG
635 KMALLOC_CGROUP = KMALLOC_NORMAL,
636#endif
637 KMALLOC_RANDOM_START = KMALLOC_NORMAL,
638 KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
639#ifdef CONFIG_SLUB_TINY
640 KMALLOC_RECLAIM = KMALLOC_NORMAL,
641#else
642 KMALLOC_RECLAIM,
643#endif
644#ifdef CONFIG_ZONE_DMA
645 KMALLOC_DMA,
646#endif
647#ifdef CONFIG_MEMCG
648 KMALLOC_CGROUP,
649#endif
650 NR_KMALLOC_TYPES
651};
652
653typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1];
654
655extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES];
656
657/*
658 * Define gfp bits that should not be set for KMALLOC_NORMAL.
659 */
660#define KMALLOC_NOT_NORMAL_BITS \
661 (__GFP_RECLAIMABLE | \
662 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
663 (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0))
664
665extern unsigned long random_kmalloc_seed;
666
667static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
668{
669 /*
670 * The most common case is KMALLOC_NORMAL, so test for it
671 * with a single branch for all the relevant flags.
672 */
673 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
674#ifdef CONFIG_RANDOM_KMALLOC_CACHES
675 /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
676 return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
677 ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
678#else
679 return KMALLOC_NORMAL;
680#endif
681
682 /*
683 * At least one of the flags has to be set. Their priorities in
684 * decreasing order are:
685 * 1) __GFP_DMA
686 * 2) __GFP_RECLAIMABLE
687 * 3) __GFP_ACCOUNT
688 */
689 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
690 return KMALLOC_DMA;
691 if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE))
692 return KMALLOC_RECLAIM;
693 else
694 return KMALLOC_CGROUP;
695}
696
697/*
698 * Figure out which kmalloc slab an allocation of a certain size
699 * belongs to.
700 * 0 = zero alloc
701 * 1 = 65 .. 96 bytes
702 * 2 = 129 .. 192 bytes
703 * n = 2^(n-1)+1 .. 2^n
704 *
705 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
706 * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
707 * Callers where !size_is_constant should only be test modules, where runtime
708 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
709 */
710static __always_inline unsigned int __kmalloc_index(size_t size,
711 bool size_is_constant)
712{
713 if (!size)
714 return 0;
715
716 if (size <= KMALLOC_MIN_SIZE)
717 return KMALLOC_SHIFT_LOW;
718
719 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
720 return 1;
721 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
722 return 2;
723 if (size <= 8) return 3;
724 if (size <= 16) return 4;
725 if (size <= 32) return 5;
726 if (size <= 64) return 6;
727 if (size <= 128) return 7;
728 if (size <= 256) return 8;
729 if (size <= 512) return 9;
730 if (size <= 1024) return 10;
731 if (size <= 2 * 1024) return 11;
732 if (size <= 4 * 1024) return 12;
733 if (size <= 8 * 1024) return 13;
734 if (size <= 16 * 1024) return 14;
735 if (size <= 32 * 1024) return 15;
736 if (size <= 64 * 1024) return 16;
737 if (size <= 128 * 1024) return 17;
738 if (size <= 256 * 1024) return 18;
739 if (size <= 512 * 1024) return 19;
740 if (size <= 1024 * 1024) return 20;
741 if (size <= 2 * 1024 * 1024) return 21;
742
743 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
744 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
745 else
746 BUG();
747
748 /* Will never be reached. Needed because the compiler may complain */
749 return -1;
750}
751static_assert(PAGE_SHIFT <= 20);
752#define kmalloc_index(s) __kmalloc_index(s, true)
753
754#include <linux/alloc_tag.h>
755
756/**
757 * kmem_cache_alloc - Allocate an object
758 * @cachep: The cache to allocate from.
759 * @flags: See kmalloc().
760 *
761 * Allocate an object from this cache.
762 * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
763 *
764 * Return: pointer to the new object or %NULL in case of error
765 */
766void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
767 gfp_t flags) __assume_slab_alignment __malloc;
768#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
769
770void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
771 gfp_t gfpflags) __assume_slab_alignment __malloc;
772#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
773
774/**
775 * kmem_cache_charge - memcg charge an already allocated slab memory
776 * @objp: address of the slab object to memcg charge
777 * @gfpflags: describe the allocation context
778 *
779 * kmem_cache_charge allows charging a slab object to the current memcg,
780 * primarily in cases where charging at allocation time might not be possible
781 * because the target memcg is not known (i.e. softirq context)
782 *
783 * The objp should be pointer returned by the slab allocator functions like
784 * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge
785 * behavior can be controlled through gfpflags parameter, which affects how the
786 * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes
787 * that overcharging is requested instead of failure, but is not applied for the
788 * internal metadata allocation.
789 *
790 * There are several cases where it will return true even if the charging was
791 * not done:
792 * More specifically:
793 *
794 * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems.
795 * 2. Already charged slab objects.
796 * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc()
797 * without __GFP_ACCOUNT
798 * 4. Allocating internal metadata has failed
799 *
800 * Return: true if charge was successful otherwise false.
801 */
802bool kmem_cache_charge(void *objp, gfp_t gfpflags);
803void kmem_cache_free(struct kmem_cache *s, void *objp);
804
805kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
806 unsigned int useroffset, unsigned int usersize,
807 void (*ctor)(void *));
808
809/*
810 * Bulk allocation and freeing operations. These are accelerated in an
811 * allocator specific way to avoid taking locks repeatedly or building
812 * metadata structures unnecessarily.
813 *
814 * Note that interrupts must be enabled when calling these functions.
815 */
816void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
817
818int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
819#define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))
820
821static __always_inline void kfree_bulk(size_t size, void **p)
822{
823 kmem_cache_free_bulk(NULL, size, p);
824}
825
826void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
827 int node) __assume_slab_alignment __malloc;
828#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
829
830struct slab_sheaf *
831kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t gfp, unsigned int size);
832
833int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp_t gfp,
834 struct slab_sheaf **sheafp, unsigned int size);
835
836void kmem_cache_return_sheaf(struct kmem_cache *s, gfp_t gfp,
837 struct slab_sheaf *sheaf);
838
839void *kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache *cachep, gfp_t gfp,
840 struct slab_sheaf *sheaf) __assume_slab_alignment __malloc;
841#define kmem_cache_alloc_from_sheaf(...) \
842 alloc_hooks(kmem_cache_alloc_from_sheaf_noprof(__VA_ARGS__))
843
844unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf);
845
846/*
847 * These macros allow declaring a kmem_buckets * parameter alongside size, which
848 * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call
849 * sites don't have to pass NULL.
850 */
851#ifdef CONFIG_SLAB_BUCKETS
852#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b)
853#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b)
854#define PASS_BUCKET_PARAM(_b) (_b)
855#else
856#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size)
857#define PASS_BUCKET_PARAMS(_size, _b) (_size)
858#define PASS_BUCKET_PARAM(_b) NULL
859#endif
860
861/*
862 * The following functions are not to be used directly and are intended only
863 * for internal use from kmalloc() and kmalloc_node()
864 * with the exception of kunit tests
865 */
866
867void *__kmalloc_noprof(size_t size, gfp_t flags)
868 __assume_kmalloc_alignment __alloc_size(1);
869
870void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
871 __assume_kmalloc_alignment __alloc_size(1);
872
873void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
874 __assume_kmalloc_alignment __alloc_size(3);
875
876void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
877 int node, size_t size)
878 __assume_kmalloc_alignment __alloc_size(4);
879
880void *__kmalloc_large_noprof(size_t size, gfp_t flags)
881 __assume_page_alignment __alloc_size(1);
882
883void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
884 __assume_page_alignment __alloc_size(1);
885
886/**
887 * kmalloc - allocate kernel memory
888 * @size: how many bytes of memory are required.
889 * @flags: describe the allocation context
890 *
891 * kmalloc is the normal method of allocating memory
892 * for objects smaller than page size in the kernel.
893 *
894 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
895 * bytes. For @size of power of two bytes, the alignment is also guaranteed
896 * to be at least to the size. For other sizes, the alignment is guaranteed to
897 * be at least the largest power-of-two divisor of @size.
898 *
899 * The @flags argument may be one of the GFP flags defined at
900 * include/linux/gfp_types.h and described at
901 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
902 *
903 * The recommended usage of the @flags is described at
904 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
905 *
906 * Below is a brief outline of the most useful GFP flags
907 *
908 * %GFP_KERNEL
909 * Allocate normal kernel ram. May sleep.
910 *
911 * %GFP_NOWAIT
912 * Allocation will not sleep.
913 *
914 * %GFP_ATOMIC
915 * Allocation will not sleep. May use emergency pools.
916 *
917 * Also it is possible to set different flags by OR'ing
918 * in one or more of the following additional @flags:
919 *
920 * %__GFP_ZERO
921 * Zero the allocated memory before returning. Also see kzalloc().
922 *
923 * %__GFP_HIGH
924 * This allocation has high priority and may use emergency pools.
925 *
926 * %__GFP_NOFAIL
927 * Indicate that this allocation is in no way allowed to fail
928 * (think twice before using).
929 *
930 * %__GFP_NORETRY
931 * If memory is not immediately available,
932 * then give up at once.
933 *
934 * %__GFP_NOWARN
935 * If allocation fails, don't issue any warnings.
936 *
937 * %__GFP_RETRY_MAYFAIL
938 * Try really hard to succeed the allocation but fail
939 * eventually.
940 */
941static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)
942{
943 if (__builtin_constant_p(size) && size) {
944 unsigned int index;
945
946 if (size > KMALLOC_MAX_CACHE_SIZE)
947 return __kmalloc_large_noprof(size, flags);
948
949 index = kmalloc_index(size);
950 return __kmalloc_cache_noprof(
951 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
952 flags, size);
953 }
954 return __kmalloc_noprof(size, flags);
955}
956#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__))
957
958void *kmalloc_nolock_noprof(size_t size, gfp_t gfp_flags, int node);
959#define kmalloc_nolock(...) alloc_hooks(kmalloc_nolock_noprof(__VA_ARGS__))
960
961/**
962 * __alloc_objs - Allocate objects of a given type using
963 * @KMALLOC: which size-based kmalloc wrapper to allocate with.
964 * @GFP: GFP flags for the allocation.
965 * @TYPE: type to allocate space for.
966 * @COUNT: how many @TYPE objects to allocate.
967 *
968 * Returns: Newly allocated pointer to (first) @TYPE of @COUNT-many
969 * allocated @TYPE objects, or NULL on failure.
970 */
971#define __alloc_objs(KMALLOC, GFP, TYPE, COUNT) \
972({ \
973 const size_t __obj_size = size_mul(sizeof(TYPE), COUNT); \
974 (TYPE *)KMALLOC(__obj_size, GFP); \
975})
976
977/**
978 * __alloc_flex - Allocate an object that has a trailing flexible array
979 * @KMALLOC: kmalloc wrapper function to use for allocation.
980 * @GFP: GFP flags for the allocation.
981 * @TYPE: type of structure to allocate space for.
982 * @FAM: The name of the flexible array member of @TYPE structure.
983 * @COUNT: how many @FAM elements to allocate space for.
984 *
985 * Returns: Newly allocated pointer to @TYPE with @COUNT-many trailing
986 * @FAM elements, or NULL on failure or if @COUNT cannot be represented
987 * by the member of @TYPE that counts the @FAM elements (annotated via
988 * __counted_by()).
989 */
990#define __alloc_flex(KMALLOC, GFP, TYPE, FAM, COUNT) \
991({ \
992 const size_t __count = (COUNT); \
993 const size_t __obj_size = struct_size_t(TYPE, FAM, __count); \
994 TYPE *__obj_ptr = KMALLOC(__obj_size, GFP); \
995 if (__obj_ptr) \
996 __set_flex_counter(__obj_ptr->FAM, __count); \
997 __obj_ptr; \
998})
999
1000/**
1001 * kmalloc_obj - Allocate a single instance of the given type
1002 * @VAR_OR_TYPE: Variable or type to allocate.
1003 * @GFP: GFP flags for the allocation.
1004 *
1005 * Returns: newly allocated pointer to a @VAR_OR_TYPE on success, or NULL
1006 * on failure.
1007 */
1008#define kmalloc_obj(VAR_OR_TYPE, ...) \
1009 __alloc_objs(kmalloc, default_gfp(__VA_ARGS__), typeof(VAR_OR_TYPE), 1)
1010
1011/**
1012 * kmalloc_objs - Allocate an array of the given type
1013 * @VAR_OR_TYPE: Variable or type to allocate an array of.
1014 * @COUNT: How many elements in the array.
1015 * @GFP: GFP flags for the allocation.
1016 *
1017 * Returns: newly allocated pointer to array of @VAR_OR_TYPE on success,
1018 * or NULL on failure.
1019 */
1020#define kmalloc_objs(VAR_OR_TYPE, COUNT, ...) \
1021 __alloc_objs(kmalloc, default_gfp(__VA_ARGS__), typeof(VAR_OR_TYPE), COUNT)
1022
1023/**
1024 * kmalloc_flex - Allocate a single instance of the given flexible structure
1025 * @VAR_OR_TYPE: Variable or type to allocate (with its flex array).
1026 * @FAM: The name of the flexible array member of the structure.
1027 * @COUNT: How many flexible array member elements are desired.
1028 * @GFP: GFP flags for the allocation.
1029 *
1030 * Returns: newly allocated pointer to @VAR_OR_TYPE on success, NULL on
1031 * failure. If @FAM has been annotated with __counted_by(), the allocation
1032 * will immediately fail if @COUNT is larger than what the type of the
1033 * struct's counter variable can represent.
1034 */
1035#define kmalloc_flex(VAR_OR_TYPE, FAM, COUNT, ...) \
1036 __alloc_flex(kmalloc, default_gfp(__VA_ARGS__), typeof(VAR_OR_TYPE), FAM, COUNT)
1037
1038/* All kzalloc aliases for kmalloc_(obj|objs|flex). */
1039#define kzalloc_obj(P, ...) \
1040 __alloc_objs(kzalloc, default_gfp(__VA_ARGS__), typeof(P), 1)
1041#define kzalloc_objs(P, COUNT, ...) \
1042 __alloc_objs(kzalloc, default_gfp(__VA_ARGS__), typeof(P), COUNT)
1043#define kzalloc_flex(P, FAM, COUNT, ...) \
1044 __alloc_flex(kzalloc, default_gfp(__VA_ARGS__), typeof(P), FAM, COUNT)
1045
1046/* All kvmalloc aliases for kmalloc_(obj|objs|flex). */
1047#define kvmalloc_obj(P, ...) \
1048 __alloc_objs(kvmalloc, default_gfp(__VA_ARGS__), typeof(P), 1)
1049#define kvmalloc_objs(P, COUNT, ...) \
1050 __alloc_objs(kvmalloc, default_gfp(__VA_ARGS__), typeof(P), COUNT)
1051#define kvmalloc_flex(P, FAM, COUNT, ...) \
1052 __alloc_flex(kvmalloc, default_gfp(__VA_ARGS__), typeof(P), FAM, COUNT)
1053
1054/* All kvzalloc aliases for kmalloc_(obj|objs|flex). */
1055#define kvzalloc_obj(P, ...) \
1056 __alloc_objs(kvzalloc, default_gfp(__VA_ARGS__), typeof(P), 1)
1057#define kvzalloc_objs(P, COUNT, ...) \
1058 __alloc_objs(kvzalloc, default_gfp(__VA_ARGS__), typeof(P), COUNT)
1059#define kvzalloc_flex(P, FAM, COUNT, ...) \
1060 __alloc_flex(kvzalloc, default_gfp(__VA_ARGS__), typeof(P), FAM, COUNT)
1061
1062#define kmem_buckets_alloc(_b, _size, _flags) \
1063 alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE))
1064
1065#define kmem_buckets_alloc_track_caller(_b, _size, _flags) \
1066 alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_))
1067
1068static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
1069{
1070 if (__builtin_constant_p(size) && size) {
1071 unsigned int index;
1072
1073 if (size > KMALLOC_MAX_CACHE_SIZE)
1074 return __kmalloc_large_node_noprof(size, flags, node);
1075
1076 index = kmalloc_index(size);
1077 return __kmalloc_cache_node_noprof(
1078 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
1079 flags, node, size);
1080 }
1081 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node);
1082}
1083#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
1084
1085/**
1086 * kmalloc_array - allocate memory for an array.
1087 * @n: number of elements.
1088 * @size: element size.
1089 * @flags: the type of memory to allocate (see kmalloc).
1090 */
1091static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
1092{
1093 size_t bytes;
1094
1095 if (unlikely(check_mul_overflow(n, size, &bytes)))
1096 return NULL;
1097 return kmalloc_noprof(bytes, flags);
1098}
1099#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
1100
1101/**
1102 * krealloc_array - reallocate memory for an array.
1103 * @p: pointer to the memory chunk to reallocate
1104 * @new_n: new number of elements to alloc
1105 * @new_size: new size of a single member of the array
1106 * @flags: the type of memory to allocate (see kmalloc)
1107 *
1108 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
1109 * initial memory allocation, every subsequent call to this API for the same
1110 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
1111 * __GFP_ZERO is not fully honored by this API.
1112 *
1113 * See krealloc_noprof() for further details.
1114 *
1115 * In any case, the contents of the object pointed to are preserved up to the
1116 * lesser of the new and old sizes.
1117 */
1118static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
1119 size_t new_n,
1120 size_t new_size,
1121 gfp_t flags)
1122{
1123 size_t bytes;
1124
1125 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
1126 return NULL;
1127
1128 return krealloc_noprof(p, bytes, flags);
1129}
1130#define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__))
1131
1132/**
1133 * kcalloc - allocate memory for an array. The memory is set to zero.
1134 * @n: number of elements.
1135 * @size: element size.
1136 * @flags: the type of memory to allocate (see kmalloc).
1137 */
1138#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO)
1139
1140void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node,
1141 unsigned long caller) __alloc_size(1);
1142#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \
1143 __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller)
1144#define kmalloc_node_track_caller(...) \
1145 alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
1146
1147/*
1148 * kmalloc_track_caller is a special version of kmalloc that records the
1149 * calling function of the routine calling it for slab leak tracking instead
1150 * of just the calling function (confusing, eh?).
1151 * It's useful when the call to kmalloc comes from a widely-used standard
1152 * allocator where we care about the real place the memory allocation
1153 * request comes from.
1154 */
1155#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
1156
1157#define kmalloc_track_caller_noprof(...) \
1158 kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_)
1159
1160static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
1161 int node)
1162{
1163 size_t bytes;
1164
1165 if (unlikely(check_mul_overflow(n, size, &bytes)))
1166 return NULL;
1167 if (__builtin_constant_p(n) && __builtin_constant_p(size))
1168 return kmalloc_node_noprof(bytes, flags, node);
1169 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node);
1170}
1171#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
1172
1173#define kcalloc_node(_n, _size, _flags, _node) \
1174 kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
1175
1176/*
1177 * Shortcuts
1178 */
1179#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
1180
1181/**
1182 * kzalloc - allocate memory. The memory is set to zero.
1183 * @size: how many bytes of memory are required.
1184 * @flags: the type of memory to allocate (see kmalloc).
1185 */
1186static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
1187{
1188 return kmalloc_noprof(size, flags | __GFP_ZERO);
1189}
1190#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__))
1191#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
1192
1193void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), unsigned long align,
1194 gfp_t flags, int node) __alloc_size(1);
1195#define kvmalloc_node_align_noprof(_size, _align, _flags, _node) \
1196 __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, NULL), _align, _flags, _node)
1197#define kvmalloc_node_align(...) \
1198 alloc_hooks(kvmalloc_node_align_noprof(__VA_ARGS__))
1199#define kvmalloc_node(_s, _f, _n) kvmalloc_node_align(_s, 1, _f, _n)
1200#define kvmalloc(...) kvmalloc_node(__VA_ARGS__, NUMA_NO_NODE)
1201#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO)
1202
1203#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
1204
1205#define kmem_buckets_valloc(_b, _size, _flags) \
1206 alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), 1, _flags, NUMA_NO_NODE))
1207
1208static inline __alloc_size(1, 2) void *
1209kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node)
1210{
1211 size_t bytes;
1212
1213 if (unlikely(check_mul_overflow(n, size, &bytes)))
1214 return NULL;
1215
1216 return kvmalloc_node_align_noprof(bytes, 1, flags, node);
1217}
1218
1219#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
1220#define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node)
1221#define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE)
1222
1223#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__))
1224#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__))
1225#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__))
1226
1227void *kvrealloc_node_align_noprof(const void *p, size_t size, unsigned long align,
1228 gfp_t flags, int nid) __realloc_size(2);
1229#define kvrealloc_node_align(...) \
1230 alloc_hooks(kvrealloc_node_align_noprof(__VA_ARGS__))
1231#define kvrealloc_node(_p, _s, _f, _n) kvrealloc_node_align(_p, _s, 1, _f, _n)
1232#define kvrealloc(...) kvrealloc_node(__VA_ARGS__, NUMA_NO_NODE)
1233
1234extern void kvfree(const void *addr);
1235DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T))
1236
1237extern void kvfree_sensitive(const void *addr, size_t len);
1238
1239unsigned int kmem_cache_size(struct kmem_cache *s);
1240
1241#ifndef CONFIG_KVFREE_RCU_BATCHED
1242static inline void kvfree_rcu_barrier(void)
1243{
1244 rcu_barrier();
1245}
1246
1247static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s)
1248{
1249 rcu_barrier();
1250}
1251
1252static inline void kfree_rcu_scheduler_running(void) { }
1253#else
1254void kvfree_rcu_barrier(void);
1255
1256void kvfree_rcu_barrier_on_cache(struct kmem_cache *s);
1257
1258void kfree_rcu_scheduler_running(void);
1259#endif
1260
1261/**
1262 * kmalloc_size_roundup - Report allocation bucket size for the given size
1263 *
1264 * @size: Number of bytes to round up from.
1265 *
1266 * This returns the number of bytes that would be available in a kmalloc()
1267 * allocation of @size bytes. For example, a 126 byte request would be
1268 * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
1269 * for the general-purpose kmalloc()-based allocations, and is not for the
1270 * pre-sized kmem_cache_alloc()-based allocations.)
1271 *
1272 * Use this to kmalloc() the full bucket size ahead of time instead of using
1273 * ksize() to query the size after an allocation.
1274 */
1275size_t kmalloc_size_roundup(size_t size);
1276
1277void __init kmem_cache_init_late(void);
1278void __init kvfree_rcu_init(void);
1279
1280#endif /* _LINUX_SLAB_H */