Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at d986ba0329dcca102e227995371135c9bbcefb6b 472 lines 17 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_GFP_H 3#define __LINUX_GFP_H 4 5#include <linux/gfp_types.h> 6 7#include <linux/mmzone.h> 8#include <linux/topology.h> 9#include <linux/alloc_tag.h> 10#include <linux/cleanup.h> 11#include <linux/sched.h> 12 13struct vm_area_struct; 14struct mempolicy; 15 16/* Helper macro to avoid gfp flags if they are the default one */ 17#define __default_gfp(a,b,...) b 18#define default_gfp(...) __default_gfp(,##__VA_ARGS__,GFP_KERNEL) 19 20/* Convert GFP flags to their corresponding migrate type */ 21#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE) 22#define GFP_MOVABLE_SHIFT 3 23 24static inline int gfp_migratetype(const gfp_t gfp_flags) 25{ 26 VM_WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 27 BUILD_BUG_ON((1UL << GFP_MOVABLE_SHIFT) != ___GFP_MOVABLE); 28 BUILD_BUG_ON((___GFP_MOVABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_MOVABLE); 29 BUILD_BUG_ON((___GFP_RECLAIMABLE >> GFP_MOVABLE_SHIFT) != MIGRATE_RECLAIMABLE); 30 BUILD_BUG_ON(((___GFP_MOVABLE | ___GFP_RECLAIMABLE) >> 31 GFP_MOVABLE_SHIFT) != MIGRATE_HIGHATOMIC); 32 33 if (unlikely(page_group_by_mobility_disabled)) 34 return MIGRATE_UNMOVABLE; 35 36 /* Group based on mobility */ 37 return (__force unsigned long)(gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; 38} 39#undef GFP_MOVABLE_MASK 40#undef GFP_MOVABLE_SHIFT 41 42static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) 43{ 44 return !!(gfp_flags & __GFP_DIRECT_RECLAIM); 45} 46 47static inline bool gfpflags_allow_spinning(const gfp_t gfp_flags) 48{ 49 /* 50 * !__GFP_DIRECT_RECLAIM -> direct claim is not allowed. 51 * !__GFP_KSWAPD_RECLAIM -> it's not safe to wake up kswapd. 52 * All GFP_* flags including GFP_NOWAIT use one or both flags. 53 * alloc_pages_nolock() is the only API that doesn't specify either flag. 54 * 55 * This is stronger than GFP_NOWAIT or GFP_ATOMIC because 56 * those are guaranteed to never block on a sleeping lock. 57 * Here we are enforcing that the allocation doesn't ever spin 58 * on any locks (i.e. only trylocks). There is no high level 59 * GFP_$FOO flag for this use in alloc_pages_nolock() as the 60 * regular page allocator doesn't fully support this 61 * allocation mode. 62 */ 63 return !!(gfp_flags & __GFP_RECLAIM); 64} 65 66#ifdef CONFIG_HIGHMEM 67#define OPT_ZONE_HIGHMEM ZONE_HIGHMEM 68#else 69#define OPT_ZONE_HIGHMEM ZONE_NORMAL 70#endif 71 72#ifdef CONFIG_ZONE_DMA 73#define OPT_ZONE_DMA ZONE_DMA 74#else 75#define OPT_ZONE_DMA ZONE_NORMAL 76#endif 77 78#ifdef CONFIG_ZONE_DMA32 79#define OPT_ZONE_DMA32 ZONE_DMA32 80#else 81#define OPT_ZONE_DMA32 ZONE_NORMAL 82#endif 83 84/* 85 * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the 86 * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT 87 * bits long and there are 16 of them to cover all possible combinations of 88 * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. 89 * 90 * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. 91 * But GFP_MOVABLE is not only a zone specifier but also an allocation 92 * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. 93 * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". 94 * 95 * bit result 96 * ================= 97 * 0x0 => NORMAL 98 * 0x1 => DMA or NORMAL 99 * 0x2 => HIGHMEM or NORMAL 100 * 0x3 => BAD (DMA+HIGHMEM) 101 * 0x4 => DMA32 or NORMAL 102 * 0x5 => BAD (DMA+DMA32) 103 * 0x6 => BAD (HIGHMEM+DMA32) 104 * 0x7 => BAD (HIGHMEM+DMA32+DMA) 105 * 0x8 => NORMAL (MOVABLE+0) 106 * 0x9 => DMA or NORMAL (MOVABLE+DMA) 107 * 0xa => MOVABLE (Movable is valid only if HIGHMEM is set too) 108 * 0xb => BAD (MOVABLE+HIGHMEM+DMA) 109 * 0xc => DMA32 or NORMAL (MOVABLE+DMA32) 110 * 0xd => BAD (MOVABLE+DMA32+DMA) 111 * 0xe => BAD (MOVABLE+DMA32+HIGHMEM) 112 * 0xf => BAD (MOVABLE+DMA32+HIGHMEM+DMA) 113 * 114 * GFP_ZONES_SHIFT must be <= 2 on 32 bit platforms. 115 */ 116 117#if defined(CONFIG_ZONE_DEVICE) && (MAX_NR_ZONES-1) <= 4 118/* ZONE_DEVICE is not a valid GFP zone specifier */ 119#define GFP_ZONES_SHIFT 2 120#else 121#define GFP_ZONES_SHIFT ZONES_SHIFT 122#endif 123 124#if 16 * GFP_ZONES_SHIFT > BITS_PER_LONG 125#error GFP_ZONES_SHIFT too large to create GFP_ZONE_TABLE integer 126#endif 127 128#define GFP_ZONE_TABLE ( \ 129 (ZONE_NORMAL << 0 * GFP_ZONES_SHIFT) \ 130 | (OPT_ZONE_DMA << ___GFP_DMA * GFP_ZONES_SHIFT) \ 131 | (OPT_ZONE_HIGHMEM << ___GFP_HIGHMEM * GFP_ZONES_SHIFT) \ 132 | (OPT_ZONE_DMA32 << ___GFP_DMA32 * GFP_ZONES_SHIFT) \ 133 | (ZONE_NORMAL << ___GFP_MOVABLE * GFP_ZONES_SHIFT) \ 134 | (OPT_ZONE_DMA << (___GFP_MOVABLE | ___GFP_DMA) * GFP_ZONES_SHIFT) \ 135 | (ZONE_MOVABLE << (___GFP_MOVABLE | ___GFP_HIGHMEM) * GFP_ZONES_SHIFT)\ 136 | (OPT_ZONE_DMA32 << (___GFP_MOVABLE | ___GFP_DMA32) * GFP_ZONES_SHIFT)\ 137) 138 139/* 140 * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 141 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per 142 * entry starting with bit 0. Bit is set if the combination is not 143 * allowed. 144 */ 145#define GFP_ZONE_BAD ( \ 146 1 << (___GFP_DMA | ___GFP_HIGHMEM) \ 147 | 1 << (___GFP_DMA | ___GFP_DMA32) \ 148 | 1 << (___GFP_DMA32 | ___GFP_HIGHMEM) \ 149 | 1 << (___GFP_DMA | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 150 | 1 << (___GFP_MOVABLE | ___GFP_HIGHMEM | ___GFP_DMA) \ 151 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA) \ 152 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_HIGHMEM) \ 153 | 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM) \ 154) 155 156static inline enum zone_type gfp_zone(gfp_t flags) 157{ 158 enum zone_type z; 159 int bit = (__force int) (flags & GFP_ZONEMASK); 160 161 z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) & 162 ((1 << GFP_ZONES_SHIFT) - 1); 163 VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1); 164 return z; 165} 166 167/* 168 * There is only one page-allocator function, and two main namespaces to 169 * it. The alloc_page*() variants return 'struct page *' and as such 170 * can allocate highmem pages, the *get*page*() variants return 171 * virtual kernel addresses to the allocated page(s). 172 */ 173 174static inline int gfp_zonelist(gfp_t flags) 175{ 176#ifdef CONFIG_NUMA 177 if (unlikely(flags & __GFP_THISNODE)) 178 return ZONELIST_NOFALLBACK; 179#endif 180 return ZONELIST_FALLBACK; 181} 182 183/* 184 * gfp flag masking for nested internal allocations. 185 * 186 * For code that needs to do allocations inside the public allocation API (e.g. 187 * memory allocation tracking code) the allocations need to obey the caller 188 * allocation context constrains to prevent allocation context mismatches (e.g. 189 * GFP_KERNEL allocations in GFP_NOFS contexts) from potential deadlock 190 * situations. 191 * 192 * It is also assumed that these nested allocations are for internal kernel 193 * object storage purposes only and are not going to be used for DMA, etc. Hence 194 * we strip out all the zone information and leave just the context information 195 * intact. 196 * 197 * Further, internal allocations must fail before the higher level allocation 198 * can fail, so we must make them fail faster and fail silently. We also don't 199 * want them to deplete emergency reserves. Hence nested allocations must be 200 * prepared for these allocations to fail. 201 */ 202static inline gfp_t gfp_nested_mask(gfp_t flags) 203{ 204 return ((flags & (GFP_KERNEL | GFP_ATOMIC | __GFP_NOLOCKDEP)) | 205 (__GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)); 206} 207 208/* 209 * We get the zone list from the current node and the gfp_mask. 210 * This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones. 211 * There are two zonelists per node, one for all zones with memory and 212 * one containing just zones from the node the zonelist belongs to. 213 * 214 * For the case of non-NUMA systems the NODE_DATA() gets optimized to 215 * &contig_page_data at compile-time. 216 */ 217static inline struct zonelist *node_zonelist(int nid, gfp_t flags) 218{ 219 return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags); 220} 221 222#ifndef HAVE_ARCH_FREE_PAGE 223static inline void arch_free_page(struct page *page, int order) { } 224#endif 225#ifndef HAVE_ARCH_ALLOC_PAGE 226static inline void arch_alloc_page(struct page *page, int order) { } 227#endif 228 229struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 230 nodemask_t *nodemask); 231#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__)) 232 233struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, 234 nodemask_t *nodemask); 235#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__)) 236 237unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, 238 nodemask_t *nodemask, int nr_pages, 239 struct page **page_array); 240#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) 241 242unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp, 243 unsigned long nr_pages, 244 struct page **page_array); 245#define alloc_pages_bulk_mempolicy(...) \ 246 alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__)) 247 248/* Bulk allocate order-0 pages */ 249#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \ 250 __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array) 251 252static inline unsigned long 253alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, 254 struct page **page_array) 255{ 256 if (nid == NUMA_NO_NODE) 257 nid = numa_mem_id(); 258 259 return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); 260} 261 262#define alloc_pages_bulk_node(...) \ 263 alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__)) 264 265static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) 266{ 267 gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); 268 269 if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN)) 270 return; 271 272 if (node_online(this_node)) 273 return; 274 275 pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node); 276 dump_stack(); 277} 278 279/* 280 * Allocate pages, preferring the node given as nid. The node must be valid and 281 * online. For more general interface, see alloc_pages_node(). 282 */ 283static inline struct page * 284__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) 285{ 286 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 287 warn_if_node_offline(nid, gfp_mask); 288 289 return __alloc_pages_noprof(gfp_mask, order, nid, NULL); 290} 291 292#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__)) 293 294static inline 295struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) 296{ 297 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); 298 warn_if_node_offline(nid, gfp); 299 300 return __folio_alloc_noprof(gfp, order, nid, NULL); 301} 302 303#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__)) 304 305/* 306 * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, 307 * prefer the current CPU's closest node. Otherwise node must be valid and 308 * online. 309 */ 310static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, 311 unsigned int order) 312{ 313 if (nid == NUMA_NO_NODE) 314 nid = numa_mem_id(); 315 316 return __alloc_pages_node_noprof(nid, gfp_mask, order); 317} 318 319#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) 320 321#ifdef CONFIG_NUMA 322struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); 323struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); 324struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, 325 struct mempolicy *mpol, pgoff_t ilx, int nid); 326struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, 327 unsigned long addr); 328#else 329static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) 330{ 331 return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); 332} 333static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) 334{ 335 return __folio_alloc_node_noprof(gfp, order, numa_node_id()); 336} 337static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, 338 struct mempolicy *mpol, pgoff_t ilx, int nid) 339{ 340 return folio_alloc_noprof(gfp, order); 341} 342static inline struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, 343 struct vm_area_struct *vma, unsigned long addr) 344{ 345 return folio_alloc_noprof(gfp, order); 346} 347#endif 348 349#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) 350#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) 351#define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__)) 352#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) 353 354#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 355 356static inline struct page *alloc_page_vma_noprof(gfp_t gfp, 357 struct vm_area_struct *vma, unsigned long addr) 358{ 359 struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr); 360 361 return &folio->page; 362} 363#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__)) 364 365struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order); 366#define alloc_pages_nolock(...) alloc_hooks(alloc_pages_nolock_noprof(__VA_ARGS__)) 367 368extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order); 369#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__)) 370 371extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask); 372#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__)) 373 374void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1); 375#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__)) 376 377void free_pages_exact(void *virt, size_t size); 378 379__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); 380#define alloc_pages_exact_nid(...) \ 381 alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__)) 382 383#define __get_free_page(gfp_mask) \ 384 __get_free_pages((gfp_mask), 0) 385 386#define __get_dma_pages(gfp_mask, order) \ 387 __get_free_pages((gfp_mask) | GFP_DMA, (order)) 388 389extern void __free_pages(struct page *page, unsigned int order); 390extern void free_pages_nolock(struct page *page, unsigned int order); 391extern void free_pages(unsigned long addr, unsigned int order); 392 393#define __free_page(page) __free_pages((page), 0) 394#define free_page(addr) free_pages((addr), 0) 395 396void page_alloc_init_cpuhp(void); 397bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp); 398void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 399void drain_all_pages(struct zone *zone); 400void drain_local_pages(struct zone *zone); 401 402void page_alloc_init_late(void); 403void setup_pcp_cacheinfo(unsigned int cpu); 404 405/* 406 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 407 * GFP flags are used before interrupts are enabled. Once interrupts are 408 * enabled, it is set to __GFP_BITS_MASK while the system is running. During 409 * hibernation, it is used by PM to avoid I/O during memory allocation while 410 * devices are suspended. 411 */ 412extern gfp_t gfp_allowed_mask; 413 414/* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ 415bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); 416 417/* A helper for checking if gfp includes all the specified flags */ 418static inline bool gfp_has_flags(gfp_t gfp, gfp_t flags) 419{ 420 return (gfp & flags) == flags; 421} 422 423static inline bool gfp_has_io_fs(gfp_t gfp) 424{ 425 return gfp_has_flags(gfp, __GFP_IO | __GFP_FS); 426} 427 428/* 429 * Check if the gfp flags allow compaction - GFP_NOIO is a really 430 * tricky context because the migration might require IO. 431 */ 432static inline bool gfp_compaction_allowed(gfp_t gfp_mask) 433{ 434 return IS_ENABLED(CONFIG_COMPACTION) && (gfp_mask & __GFP_IO); 435} 436 437extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); 438 439#ifdef CONFIG_CONTIG_ALLOC 440 441typedef unsigned int __bitwise acr_flags_t; 442#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request 443#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA 444 445/* The below functions must be run on a range from a single zone. */ 446int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end, 447 acr_flags_t alloc_flags, gfp_t gfp_mask); 448#define alloc_contig_frozen_range(...) \ 449 alloc_hooks(alloc_contig_frozen_range_noprof(__VA_ARGS__)) 450 451int alloc_contig_range_noprof(unsigned long start, unsigned long end, 452 acr_flags_t alloc_flags, gfp_t gfp_mask); 453#define alloc_contig_range(...) \ 454 alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) 455 456struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages, 457 gfp_t gfp_mask, int nid, nodemask_t *nodemask); 458#define alloc_contig_frozen_pages(...) \ 459 alloc_hooks(alloc_contig_frozen_pages_noprof(__VA_ARGS__)) 460 461struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, 462 int nid, nodemask_t *nodemask); 463#define alloc_contig_pages(...) \ 464 alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__)) 465 466void free_contig_frozen_range(unsigned long pfn, unsigned long nr_pages); 467void free_contig_range(unsigned long pfn, unsigned long nr_pages); 468#endif 469 470DEFINE_FREE(free_page, void *, free_page((unsigned long)_T)) 471 472#endif /* __LINUX_GFP_H */