Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/vmalloc: allow to set node and align in vrealloc

Patch series "support large align and nid in Rust allocators", v15.

The series provides the ability for Rust allocators to set NUMA node and
large alignment.


This patch (of 4):

Reimplement vrealloc() to be able to set node and alignment should a user
need to do so. Rename the function to vrealloc_node_align() to better
match what it actually does now and introduce macros for vrealloc() and
friends for backward compatibility.

With that change we also provide the ability for the Rust part of the
kernel to set node and alignment in its allocations.

Link: https://lkml.kernel.org/r/20250806124034.1724515-1-vitaly.wool@konsulko.se
Link: https://lkml.kernel.org/r/20250806124108.1724561-1-vitaly.wool@konsulko.se
Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.se>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jann Horn <jannh@google.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Vitaly Wool and committed by
Andrew Morton
4c5d3365 61dc4358

+35 -9
+9 -3
include/linux/vmalloc.h
··· 197 197 extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); 198 198 #define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) 199 199 200 - void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags) 201 - __realloc_size(2); 202 - #define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) 200 + void *__must_check vrealloc_node_align_noprof(const void *p, size_t size, 201 + unsigned long align, gfp_t flags, int nid) __realloc_size(2); 202 + #define vrealloc_node_noprof(_p, _s, _f, _nid) \ 203 + vrealloc_node_align_noprof(_p, _s, 1, _f, _nid) 204 + #define vrealloc_noprof(_p, _s, _f) \ 205 + vrealloc_node_align_noprof(_p, _s, 1, _f, NUMA_NO_NODE) 206 + #define vrealloc_node_align(...) alloc_hooks(vrealloc_node_align_noprof(__VA_ARGS__)) 207 + #define vrealloc_node(...) alloc_hooks(vrealloc_node_noprof(__VA_ARGS__)) 208 + #define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) 203 209 204 210 extern void vfree(const void *addr); 205 211 extern void vfree_atomic(const void *addr);
+2 -1
mm/nommu.c
··· 119 119 } 120 120 EXPORT_SYMBOL(__vmalloc_noprof); 121 121 122 - void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) 122 + void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, 123 + gfp_t flags, int node) 123 124 { 124 125 return krealloc_noprof(p, size, (flags | __GFP_COMP) & ~__GFP_HIGHMEM); 125 126 }
+24 -5
mm/vmalloc.c
··· 4089 4089 EXPORT_SYMBOL(vzalloc_node_noprof); 4090 4090 4091 4091 /** 4092 - * vrealloc - reallocate virtually contiguous memory; contents remain unchanged 4092 + * vrealloc_node_align_noprof - reallocate virtually contiguous memory; contents 4093 + * remain unchanged 4093 4094 * @p: object to reallocate memory for 4094 4095 * @size: the size to reallocate 4096 + * @align: requested alignment 4095 4097 * @flags: the flags for the page level allocator 4098 + * @nid: node number of the target node 4096 4099 * 4097 - * If @p is %NULL, vrealloc() behaves exactly like vmalloc(). If @size is 0 and 4098 - * @p is not a %NULL pointer, the object pointed to is freed. 4100 + * If @p is %NULL, vrealloc_XXX() behaves exactly like vmalloc_XXX(). If @size 4101 + * is 0 and @p is not a %NULL pointer, the object pointed to is freed. 4102 + * 4103 + * If the caller wants the new memory to be on specific node *only*, 4104 + * __GFP_THISNODE flag should be set, otherwise the function will try to avoid 4105 + * reallocation and possibly disregard the specified @nid. 4099 4106 * 4100 4107 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the 4101 4108 * initial memory allocation, every subsequent call to this API for the same 4102 4109 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that 4103 4110 * __GFP_ZERO is not fully honored by this API. 4111 + * 4112 + * Requesting an alignment that is bigger than the alignment of the existing 4113 + * allocation will fail. 4104 4114 * 4105 4115 * In any case, the contents of the object pointed to are preserved up to the 4106 4116 * lesser of the new and old sizes. ··· 4121 4111 * Return: pointer to the allocated memory; %NULL if @size is zero or in case of 4122 4112 * failure 4123 4113 */ 4124 - void *vrealloc_noprof(const void *p, size_t size, gfp_t flags) 4114 + void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align, 4115 + gfp_t flags, int nid) 4125 4116 { 4126 4117 struct vm_struct *vm = NULL; 4127 4118 size_t alloced_size = 0; ··· 4146 4135 if (WARN(alloced_size < old_size, 4147 4136 "vrealloc() has mismatched area vs requested sizes (%p)\n", p)) 4148 4137 return NULL; 4138 + if (WARN(!IS_ALIGNED((unsigned long)p, align), 4139 + "will not reallocate with a bigger alignment (0x%lx)\n", align)) 4140 + return NULL; 4141 + if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE && 4142 + nid != page_to_nid(vmalloc_to_page(p))) 4143 + goto need_realloc; 4149 4144 } 4150 4145 4151 4146 /* ··· 4182 4165 return (void *)p; 4183 4166 } 4184 4167 4168 + need_realloc: 4185 4169 /* TODO: Grow the vm_area, i.e. allocate and map additional pages. */ 4186 - n = __vmalloc_noprof(size, flags); 4170 + n = __vmalloc_node_noprof(size, align, flags, nid, __builtin_return_address(0)); 4171 + 4187 4172 if (!n) 4188 4173 return NULL; 4189 4174