Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _LINUX_MEMBLOCK_H
3#define _LINUX_MEMBLOCK_H
4
5/*
6 * Logical memory blocks.
7 *
8 * Copyright (C) 2001 Peter Bergner, IBM Corp.
9 */
10
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <asm/dma.h>
14
15extern unsigned long max_low_pfn;
16extern unsigned long min_low_pfn;
17
18/*
19 * highest page
20 */
21extern unsigned long max_pfn;
22/*
23 * highest possible page
24 */
25extern unsigned long long max_possible_pfn;
26
27/**
28 * enum memblock_flags - definition of memory region attributes
29 * @MEMBLOCK_NONE: no special request
30 * @MEMBLOCK_HOTPLUG: memory region indicated in the firmware-provided memory
31 * map during early boot as hot(un)pluggable system RAM (e.g., memory range
32 * that might get hotunplugged later). With "movable_node" set on the kernel
33 * commandline, try keeping this memory region hotunpluggable. Does not apply
34 * to memblocks added ("hotplugged") after early boot.
35 * @MEMBLOCK_MIRROR: mirrored region
36 * @MEMBLOCK_NOMAP: don't add to kernel direct mapping and treat as
37 * reserved in the memory map; refer to memblock_mark_nomap() description
38 * for further details
39 * @MEMBLOCK_DRIVER_MANAGED: memory region that is always detected and added
40 * via a driver, and never indicated in the firmware-provided memory map as
41 * system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
42 * kernel resource tree.
43 * @MEMBLOCK_RSRV_NOINIT: reserved memory region for which struct pages are not
44 * fully initialized. Users of this flag are responsible to properly initialize
45 * struct pages of this region
46 * @MEMBLOCK_RSRV_KERN: memory region that is reserved for kernel use,
47 * either explictitly with memblock_reserve_kern() or via memblock
48 * allocation APIs. All memblock allocations set this flag.
49 * @MEMBLOCK_KHO_SCRATCH: memory region that kexec can pass to the next
50 * kernel in handover mode. During early boot, we do not know about all
51 * memory reservations yet, so we get scratch memory from the previous
52 * kernel that we know is good to use. It is the only memory that
53 * allocations may happen from in this phase.
54 */
55enum memblock_flags {
56 MEMBLOCK_NONE = 0x0, /* No special request */
57 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
58 MEMBLOCK_MIRROR = 0x2, /* mirrored region */
59 MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
60 MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
61 MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
62 MEMBLOCK_RSRV_KERN = 0x20, /* memory reserved for kernel use */
63 MEMBLOCK_KHO_SCRATCH = 0x40, /* scratch memory for kexec handover */
64};
65
66/**
67 * struct memblock_region - represents a memory region
68 * @base: base address of the region
69 * @size: size of the region
70 * @flags: memory region attributes
71 * @nid: NUMA node id
72 */
73struct memblock_region {
74 phys_addr_t base;
75 phys_addr_t size;
76 enum memblock_flags flags;
77#ifdef CONFIG_NUMA
78 int nid;
79#endif
80};
81
82/**
83 * struct memblock_type - collection of memory regions of certain type
84 * @cnt: number of regions
85 * @max: size of the allocated array
86 * @total_size: size of all regions
87 * @regions: array of regions
88 * @name: the memory type symbolic name
89 */
90struct memblock_type {
91 unsigned long cnt;
92 unsigned long max;
93 phys_addr_t total_size;
94 struct memblock_region *regions;
95 char *name;
96};
97
98/**
99 * struct memblock - memblock allocator metadata
100 * @bottom_up: is bottom up direction?
101 * @current_limit: physical address of the current allocation limit
102 * @memory: usable memory regions
103 * @reserved: reserved memory regions
104 */
105struct memblock {
106 bool bottom_up; /* is bottom up direction? */
107 phys_addr_t current_limit;
108 struct memblock_type memory;
109 struct memblock_type reserved;
110};
111
112extern struct memblock memblock;
113
114#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
115#define __init_memblock __meminit
116#define __initdata_memblock __meminitdata
117void memblock_discard(void);
118#else
119#define __init_memblock
120#define __initdata_memblock
121static inline void memblock_discard(void) {}
122#endif
123
124void memblock_allow_resize(void);
125int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid,
126 enum memblock_flags flags);
127int memblock_add(phys_addr_t base, phys_addr_t size);
128int memblock_remove(phys_addr_t base, phys_addr_t size);
129int memblock_phys_free(phys_addr_t base, phys_addr_t size);
130int __memblock_reserve(phys_addr_t base, phys_addr_t size, int nid,
131 enum memblock_flags flags);
132
133static __always_inline int memblock_reserve(phys_addr_t base, phys_addr_t size)
134{
135 return __memblock_reserve(base, size, NUMA_NO_NODE, 0);
136}
137
138static __always_inline int memblock_reserve_kern(phys_addr_t base, phys_addr_t size)
139{
140 return __memblock_reserve(base, size, NUMA_NO_NODE, MEMBLOCK_RSRV_KERN);
141}
142
143#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
144int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
145#endif
146void memblock_trim_memory(phys_addr_t align);
147unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
148 phys_addr_t base2, phys_addr_t size2);
149bool memblock_overlaps_region(struct memblock_type *type,
150 phys_addr_t base, phys_addr_t size);
151bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
152int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
153int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
154int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
155int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
156int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
157int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
158int memblock_reserved_mark_kern(phys_addr_t base, phys_addr_t size);
159int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size);
160int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size);
161
162void memblock_free(void *ptr, size_t size);
163void reset_all_zones_managed_pages(void);
164
165/* Low level functions */
166void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
167 struct memblock_type *type_a,
168 struct memblock_type *type_b, phys_addr_t *out_start,
169 phys_addr_t *out_end, int *out_nid);
170
171void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
172 struct memblock_type *type_a,
173 struct memblock_type *type_b, phys_addr_t *out_start,
174 phys_addr_t *out_end, int *out_nid);
175
176#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
177static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
178 phys_addr_t *out_start,
179 phys_addr_t *out_end)
180{
181 extern struct memblock_type physmem;
182
183 __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
184 out_start, out_end, NULL);
185}
186
187/**
188 * for_each_physmem_range - iterate through physmem areas not included in type.
189 * @i: u64 used as loop variable
190 * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
191 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
192 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
193 */
194#define for_each_physmem_range(i, type, p_start, p_end) \
195 for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
196 i != (u64)ULLONG_MAX; \
197 __next_physmem_range(&i, type, p_start, p_end))
198#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
199
200/**
201 * __for_each_mem_range - iterate through memblock areas from type_a and not
202 * included in type_b. Or just type_a if type_b is NULL.
203 * @i: u64 used as loop variable
204 * @type_a: ptr to memblock_type to iterate
205 * @type_b: ptr to memblock_type which excludes from the iteration
206 * @nid: node selector, %NUMA_NO_NODE for all nodes
207 * @flags: pick from blocks based on memory attributes
208 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
209 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
210 * @p_nid: ptr to int for nid of the range, can be %NULL
211 */
212#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
213 p_start, p_end, p_nid) \
214 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
215 p_start, p_end, p_nid); \
216 i != (u64)ULLONG_MAX; \
217 __next_mem_range(&i, nid, flags, type_a, type_b, \
218 p_start, p_end, p_nid))
219
220/**
221 * __for_each_mem_range_rev - reverse iterate through memblock areas from
222 * type_a and not included in type_b. Or just type_a if type_b is NULL.
223 * @i: u64 used as loop variable
224 * @type_a: ptr to memblock_type to iterate
225 * @type_b: ptr to memblock_type which excludes from the iteration
226 * @nid: node selector, %NUMA_NO_NODE for all nodes
227 * @flags: pick from blocks based on memory attributes
228 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
229 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
230 * @p_nid: ptr to int for nid of the range, can be %NULL
231 */
232#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
233 p_start, p_end, p_nid) \
234 for (i = (u64)ULLONG_MAX, \
235 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
236 p_start, p_end, p_nid); \
237 i != (u64)ULLONG_MAX; \
238 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
239 p_start, p_end, p_nid))
240
241/**
242 * for_each_mem_range - iterate through memory areas.
243 * @i: u64 used as loop variable
244 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
245 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
246 */
247#define for_each_mem_range(i, p_start, p_end) \
248 __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
249 MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED, \
250 p_start, p_end, NULL)
251
252/**
253 * for_each_mem_range_rev - reverse iterate through memblock areas from
254 * type_a and not included in type_b. Or just type_a if type_b is NULL.
255 * @i: u64 used as loop variable
256 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
257 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
258 */
259#define for_each_mem_range_rev(i, p_start, p_end) \
260 __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
261 MEMBLOCK_HOTPLUG | MEMBLOCK_DRIVER_MANAGED,\
262 p_start, p_end, NULL)
263
264/**
265 * for_each_reserved_mem_range - iterate over all reserved memblock areas
266 * @i: u64 used as loop variable
267 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
268 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
269 *
270 * Walks over reserved areas of memblock. Available as soon as memblock
271 * is initialized.
272 */
273#define for_each_reserved_mem_range(i, p_start, p_end) \
274 __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
275 MEMBLOCK_NONE, p_start, p_end, NULL)
276
277static inline bool memblock_is_hotpluggable(struct memblock_region *m)
278{
279 return m->flags & MEMBLOCK_HOTPLUG;
280}
281
282static inline bool memblock_is_mirror(struct memblock_region *m)
283{
284 return m->flags & MEMBLOCK_MIRROR;
285}
286
287static inline bool memblock_is_nomap(struct memblock_region *m)
288{
289 return m->flags & MEMBLOCK_NOMAP;
290}
291
292static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
293{
294 return m->flags & MEMBLOCK_RSRV_NOINIT;
295}
296
297static inline bool memblock_is_driver_managed(struct memblock_region *m)
298{
299 return m->flags & MEMBLOCK_DRIVER_MANAGED;
300}
301
302static inline bool memblock_is_kho_scratch(struct memblock_region *m)
303{
304 return m->flags & MEMBLOCK_KHO_SCRATCH;
305}
306
307int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
308 unsigned long *end_pfn);
309void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
310 unsigned long *out_end_pfn, int *out_nid);
311
312/**
313 * for_each_mem_pfn_range - early memory pfn range iterator
314 * @i: an integer used as loop variable
315 * @nid: node selector, %MAX_NUMNODES for all nodes
316 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
317 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
318 * @p_nid: ptr to int for nid of the range, can be %NULL
319 *
320 * Walks over configured memory ranges.
321 */
322#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
323 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
324 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
325
326
327/**
328 * for_each_free_mem_range - iterate through free memblock areas
329 * @i: u64 used as loop variable
330 * @nid: node selector, %NUMA_NO_NODE for all nodes
331 * @flags: pick from blocks based on memory attributes
332 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
333 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
334 * @p_nid: ptr to int for nid of the range, can be %NULL
335 *
336 * Walks over free (memory && !reserved) areas of memblock. Available as
337 * soon as memblock is initialized.
338 */
339#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
340 __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
341 nid, flags, p_start, p_end, p_nid)
342
343/**
344 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
345 * @i: u64 used as loop variable
346 * @nid: node selector, %NUMA_NO_NODE for all nodes
347 * @flags: pick from blocks based on memory attributes
348 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
349 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
350 * @p_nid: ptr to int for nid of the range, can be %NULL
351 *
352 * Walks over free (memory && !reserved) areas of memblock in reverse
353 * order. Available as soon as memblock is initialized.
354 */
355#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
356 p_nid) \
357 __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
358 nid, flags, p_start, p_end, p_nid)
359
360int memblock_set_node(phys_addr_t base, phys_addr_t size,
361 struct memblock_type *type, int nid);
362
363#ifdef CONFIG_NUMA
364static inline void memblock_set_region_node(struct memblock_region *r, int nid)
365{
366 r->nid = nid;
367}
368
369static inline int memblock_get_region_node(const struct memblock_region *r)
370{
371 return r->nid;
372}
373#else
374static inline void memblock_set_region_node(struct memblock_region *r, int nid)
375{
376}
377
378static inline int memblock_get_region_node(const struct memblock_region *r)
379{
380 return 0;
381}
382#endif /* CONFIG_NUMA */
383
384/* Flags for memblock allocation APIs */
385#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
386#define MEMBLOCK_ALLOC_ACCESSIBLE 0
387/*
388 * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies
389 * MEMBLOCK_ALLOC_ACCESSIBLE
390 */
391#define MEMBLOCK_ALLOC_NOLEAKTRACE 1
392
393/* We are using top down, so it is safe to use 0 here */
394#define MEMBLOCK_LOW_LIMIT 0
395
396#ifndef ARCH_LOW_ADDRESS_LIMIT
397#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
398#endif
399
400phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
401 phys_addr_t start, phys_addr_t end);
402phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
403 phys_addr_t align, phys_addr_t start,
404 phys_addr_t end, int nid, bool exact_nid);
405phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
406
407static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
408 phys_addr_t align)
409{
410 return memblock_phys_alloc_range(size, align, 0,
411 MEMBLOCK_ALLOC_ACCESSIBLE);
412}
413
414void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
415 phys_addr_t min_addr, phys_addr_t max_addr,
416 int nid);
417void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
418 phys_addr_t min_addr, phys_addr_t max_addr,
419 int nid);
420void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
421 phys_addr_t min_addr, phys_addr_t max_addr,
422 int nid);
423
424static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
425{
426 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
427 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
428}
429
430void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
431 const char *func);
432
433#define memblock_alloc_or_panic(size, align) \
434 __memblock_alloc_or_panic(size, align, __func__)
435
436static inline void *memblock_alloc_raw(phys_addr_t size,
437 phys_addr_t align)
438{
439 return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
440 MEMBLOCK_ALLOC_ACCESSIBLE,
441 NUMA_NO_NODE);
442}
443
444static __always_inline void *memblock_alloc_from(phys_addr_t size,
445 phys_addr_t align,
446 phys_addr_t min_addr)
447{
448 return memblock_alloc_try_nid(size, align, min_addr,
449 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
450}
451
452static inline void *memblock_alloc_low(phys_addr_t size,
453 phys_addr_t align)
454{
455 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
456 ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
457}
458
459static inline void *memblock_alloc_node(phys_addr_t size,
460 phys_addr_t align, int nid)
461{
462 return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
463 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
464}
465
466/*
467 * Set the allocation direction to bottom-up or top-down.
468 */
469static inline __init_memblock void memblock_set_bottom_up(bool enable)
470{
471 memblock.bottom_up = enable;
472}
473
474/*
475 * Check if the allocation direction is bottom-up or not.
476 * if this is true, that said, memblock will allocate memory
477 * in bottom-up direction.
478 */
479static inline __init_memblock bool memblock_bottom_up(void)
480{
481 return memblock.bottom_up;
482}
483
484phys_addr_t memblock_phys_mem_size(void);
485phys_addr_t memblock_reserved_size(void);
486phys_addr_t memblock_reserved_kern_size(phys_addr_t limit, int nid);
487unsigned long memblock_estimated_nr_free_pages(void);
488phys_addr_t memblock_start_of_DRAM(void);
489phys_addr_t memblock_end_of_DRAM(void);
490void memblock_enforce_memory_limit(phys_addr_t memory_limit);
491void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
492void memblock_mem_limit_remove_map(phys_addr_t limit);
493bool memblock_is_memory(phys_addr_t addr);
494bool memblock_is_map_memory(phys_addr_t addr);
495bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
496bool memblock_is_reserved(phys_addr_t addr);
497bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
498
499void memblock_dump_all(void);
500
501/**
502 * memblock_set_current_limit - Set the current allocation limit to allow
503 * limiting allocations to what is currently
504 * accessible during boot
505 * @limit: New limit value (physical address)
506 */
507void memblock_set_current_limit(phys_addr_t limit);
508
509
510phys_addr_t memblock_get_current_limit(void);
511
512/*
513 * pfn conversion functions
514 *
515 * While the memory MEMBLOCKs should always be page aligned, the reserved
516 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
517 * idea of what they return for such non aligned MEMBLOCKs.
518 */
519
520/**
521 * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
522 * @reg: memblock_region structure
523 *
524 * Return: the lowest pfn intersecting with the memory region
525 */
526static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
527{
528 return PFN_UP(reg->base);
529}
530
531/**
532 * memblock_region_memory_end_pfn - get the end pfn of the memory region
533 * @reg: memblock_region structure
534 *
535 * Return: the end_pfn of the reserved region
536 */
537static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
538{
539 return PFN_DOWN(reg->base + reg->size);
540}
541
542/**
543 * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
544 * @reg: memblock_region structure
545 *
546 * Return: the lowest pfn intersecting with the reserved region
547 */
548static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
549{
550 return PFN_DOWN(reg->base);
551}
552
553/**
554 * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
555 * @reg: memblock_region structure
556 *
557 * Return: the end_pfn of the reserved region
558 */
559static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
560{
561 return PFN_UP(reg->base + reg->size);
562}
563
564/**
565 * for_each_mem_region - iterate over memory regions
566 * @region: loop variable
567 */
568#define for_each_mem_region(region) \
569 for (region = memblock.memory.regions; \
570 region < (memblock.memory.regions + memblock.memory.cnt); \
571 region++)
572
573/**
574 * for_each_reserved_mem_region - itereate over reserved memory regions
575 * @region: loop variable
576 */
577#define for_each_reserved_mem_region(region) \
578 for (region = memblock.reserved.regions; \
579 region < (memblock.reserved.regions + memblock.reserved.cnt); \
580 region++)
581
582extern void *alloc_large_system_hash(const char *tablename,
583 unsigned long bucketsize,
584 unsigned long numentries,
585 int scale,
586 int flags,
587 unsigned int *_hash_shift,
588 unsigned int *_hash_mask,
589 unsigned long low_limit,
590 unsigned long high_limit);
591
592#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
593#define HASH_ZERO 0x00000002 /* Zero allocated hash table */
594
595/* Only NUMA needs hash distribution. 64bit NUMA architectures have
596 * sufficient vmalloc space.
597 */
598#ifdef CONFIG_NUMA
599#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
600extern bool hashdist; /* Distribute hashes across NUMA nodes? */
601#else
602#define hashdist (false)
603#endif
604
605#ifdef CONFIG_MEMTEST
606void early_memtest(phys_addr_t start, phys_addr_t end);
607void memtest_report_meminfo(struct seq_file *m);
608#else
609static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
610static inline void memtest_report_meminfo(struct seq_file *m) { }
611#endif
612
613#ifdef CONFIG_MEMBLOCK_KHO_SCRATCH
614void memblock_set_kho_scratch_only(void);
615void memblock_clear_kho_scratch_only(void);
616void memmap_init_kho_scratch_pages(void);
617#else
618static inline void memblock_set_kho_scratch_only(void) { }
619static inline void memblock_clear_kho_scratch_only(void) { }
620static inline void memmap_init_kho_scratch_pages(void) {}
621#endif
622
623#endif /* _LINUX_MEMBLOCK_H */