Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Virtual Memory Map support
4 *
5 * (C) 2007 sgi. Christoph Lameter.
6 *
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
10 *
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
17 *
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
20 */
21#include <linux/mm.h>
22#include <linux/mmzone.h>
23#include <linux/memblock.h>
24#include <linux/memremap.h>
25#include <linux/highmem.h>
26#include <linux/slab.h>
27#include <linux/spinlock.h>
28#include <linux/vmalloc.h>
29#include <linux/sched.h>
30#include <linux/pgalloc.h>
31
32#include <asm/dma.h>
33#include <asm/tlbflush.h>
34
35#include "hugetlb_vmemmap.h"
36
37/*
38 * Flags for vmemmap_populate_range and friends.
39 */
40/* Get a ref on the head page struct page, for ZONE_DEVICE compound pages */
41#define VMEMMAP_POPULATE_PAGEREF 0x0001
42
43#include "internal.h"
44
45/*
46 * Allocate a block of memory to be used to back the virtual memory map
47 * or to back the page tables that are used to create the mapping.
48 * Uses the main allocators if they are available, else bootmem.
49 */
50
51static void * __ref __earlyonly_bootmem_alloc(int node,
52 unsigned long size,
53 unsigned long align,
54 unsigned long goal)
55{
56 return memmap_alloc(size, align, goal, node, false);
57}
58
59void * __meminit vmemmap_alloc_block(unsigned long size, int node)
60{
61 /* If the main allocator is up use that, fallback to bootmem. */
62 if (slab_is_available()) {
63 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
64 int order = get_order(size);
65 static bool warned __meminitdata;
66 struct page *page;
67
68 page = alloc_pages_node(node, gfp_mask, order);
69 if (page)
70 return page_address(page);
71
72 if (!warned) {
73 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL,
74 "vmemmap alloc failure: order:%u", order);
75 warned = true;
76 }
77 return NULL;
78 } else
79 return __earlyonly_bootmem_alloc(node, size, size,
80 __pa(MAX_DMA_ADDRESS));
81}
82
83static void * __meminit altmap_alloc_block_buf(unsigned long size,
84 struct vmem_altmap *altmap);
85
86/* need to make sure size is all the same during early stage */
87void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
88 struct vmem_altmap *altmap)
89{
90 void *ptr;
91
92 if (altmap)
93 return altmap_alloc_block_buf(size, altmap);
94
95 ptr = sparse_buffer_alloc(size);
96 if (!ptr)
97 ptr = vmemmap_alloc_block(size, node);
98 return ptr;
99}
100
101static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
102{
103 return altmap->base_pfn + altmap->reserve + altmap->alloc
104 + altmap->align;
105}
106
107static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
108{
109 unsigned long allocated = altmap->alloc + altmap->align;
110
111 if (altmap->free > allocated)
112 return altmap->free - allocated;
113 return 0;
114}
115
116static void * __meminit altmap_alloc_block_buf(unsigned long size,
117 struct vmem_altmap *altmap)
118{
119 unsigned long pfn, nr_pfns, nr_align;
120
121 if (size & ~PAGE_MASK) {
122 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
123 __func__, size);
124 return NULL;
125 }
126
127 pfn = vmem_altmap_next_pfn(altmap);
128 nr_pfns = size >> PAGE_SHIFT;
129 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
130 nr_align = ALIGN(pfn, nr_align) - pfn;
131 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
132 return NULL;
133
134 altmap->alloc += nr_pfns;
135 altmap->align += nr_align;
136 pfn += nr_align;
137
138 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
139 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
140 return __va(__pfn_to_phys(pfn));
141}
142
143void __meminit vmemmap_verify(pte_t *pte, int node,
144 unsigned long start, unsigned long end)
145{
146 unsigned long pfn = pte_pfn(ptep_get(pte));
147 int actual_node = early_pfn_to_nid(pfn);
148
149 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
150 pr_warn_once("[%lx-%lx] potential offnode page_structs\n",
151 start, end - 1);
152}
153
154pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
155 struct vmem_altmap *altmap,
156 unsigned long ptpfn, unsigned long flags)
157{
158 pte_t *pte = pte_offset_kernel(pmd, addr);
159 if (pte_none(ptep_get(pte))) {
160 pte_t entry;
161 void *p;
162
163 if (ptpfn == (unsigned long)-1) {
164 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
165 if (!p)
166 return NULL;
167 ptpfn = PHYS_PFN(__pa(p));
168 } else {
169 /*
170 * When a PTE/PMD entry is freed from the init_mm
171 * there's a free_pages() call to this page allocated
172 * above. Thus this get_page() is paired with the
173 * put_page_testzero() on the freeing path.
174 * This can only called by certain ZONE_DEVICE path,
175 * and through vmemmap_populate_compound_pages() when
176 * slab is available.
177 */
178 if (flags & VMEMMAP_POPULATE_PAGEREF)
179 get_page(pfn_to_page(ptpfn));
180 }
181 entry = pfn_pte(ptpfn, PAGE_KERNEL);
182 set_pte_at(&init_mm, addr, pte, entry);
183 }
184 return pte;
185}
186
187static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
188{
189 void *p = vmemmap_alloc_block(size, node);
190
191 if (!p)
192 return NULL;
193 memset(p, 0, size);
194
195 return p;
196}
197
198pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
199{
200 pmd_t *pmd = pmd_offset(pud, addr);
201 if (pmd_none(*pmd)) {
202 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
203 if (!p)
204 return NULL;
205 kernel_pte_init(p);
206 pmd_populate_kernel(&init_mm, pmd, p);
207 }
208 return pmd;
209}
210
211pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
212{
213 pud_t *pud = pud_offset(p4d, addr);
214 if (pud_none(*pud)) {
215 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
216 if (!p)
217 return NULL;
218 pmd_init(p);
219 pud_populate(&init_mm, pud, p);
220 }
221 return pud;
222}
223
224p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
225{
226 p4d_t *p4d = p4d_offset(pgd, addr);
227 if (p4d_none(*p4d)) {
228 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
229 if (!p)
230 return NULL;
231 pud_init(p);
232 p4d_populate_kernel(addr, p4d, p);
233 }
234 return p4d;
235}
236
237pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
238{
239 pgd_t *pgd = pgd_offset_k(addr);
240 if (pgd_none(*pgd)) {
241 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
242 if (!p)
243 return NULL;
244 pgd_populate_kernel(addr, pgd, p);
245 }
246 return pgd;
247}
248
249static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node,
250 struct vmem_altmap *altmap,
251 unsigned long ptpfn,
252 unsigned long flags)
253{
254 pgd_t *pgd;
255 p4d_t *p4d;
256 pud_t *pud;
257 pmd_t *pmd;
258 pte_t *pte;
259
260 pgd = vmemmap_pgd_populate(addr, node);
261 if (!pgd)
262 return NULL;
263 p4d = vmemmap_p4d_populate(pgd, addr, node);
264 if (!p4d)
265 return NULL;
266 pud = vmemmap_pud_populate(p4d, addr, node);
267 if (!pud)
268 return NULL;
269 pmd = vmemmap_pmd_populate(pud, addr, node);
270 if (!pmd)
271 return NULL;
272 pte = vmemmap_pte_populate(pmd, addr, node, altmap, ptpfn, flags);
273 if (!pte)
274 return NULL;
275 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
276
277 return pte;
278}
279
280static int __meminit vmemmap_populate_range(unsigned long start,
281 unsigned long end, int node,
282 struct vmem_altmap *altmap,
283 unsigned long ptpfn,
284 unsigned long flags)
285{
286 unsigned long addr = start;
287 pte_t *pte;
288
289 for (; addr < end; addr += PAGE_SIZE) {
290 pte = vmemmap_populate_address(addr, node, altmap,
291 ptpfn, flags);
292 if (!pte)
293 return -ENOMEM;
294 }
295
296 return 0;
297}
298
299int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
300 int node, struct vmem_altmap *altmap)
301{
302 return vmemmap_populate_range(start, end, node, altmap, -1, 0);
303}
304
305/*
306 * Write protect the mirrored tail page structs for HVO. This will be
307 * called from the hugetlb code when gathering and initializing the
308 * memblock allocated gigantic pages. The write protect can't be
309 * done earlier, since it can't be guaranteed that the reserved
310 * page structures will not be written to during initialization,
311 * even if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled.
312 *
313 * The PTEs are known to exist, and nothing else should be touching
314 * these pages. The caller is responsible for any TLB flushing.
315 */
316void vmemmap_wrprotect_hvo(unsigned long addr, unsigned long end,
317 int node, unsigned long headsize)
318{
319 unsigned long maddr;
320 pte_t *pte;
321
322 for (maddr = addr + headsize; maddr < end; maddr += PAGE_SIZE) {
323 pte = virt_to_kpte(maddr);
324 ptep_set_wrprotect(&init_mm, maddr, pte);
325 }
326}
327
328#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
329static __meminit struct page *vmemmap_get_tail(unsigned int order, struct zone *zone)
330{
331 struct page *p, *tail;
332 unsigned int idx;
333 int node = zone_to_nid(zone);
334
335 if (WARN_ON_ONCE(order < VMEMMAP_TAIL_MIN_ORDER))
336 return NULL;
337 if (WARN_ON_ONCE(order > MAX_FOLIO_ORDER))
338 return NULL;
339
340 idx = order - VMEMMAP_TAIL_MIN_ORDER;
341 tail = zone->vmemmap_tails[idx];
342 if (tail)
343 return tail;
344
345 /*
346 * Only allocate the page, but do not initialize it.
347 *
348 * Any initialization done here will be overwritten by memmap_init().
349 *
350 * hugetlb_vmemmap_init() will take care of initialization after
351 * memmap_init().
352 */
353
354 p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
355 if (!p)
356 return NULL;
357
358 tail = virt_to_page(p);
359 zone->vmemmap_tails[idx] = tail;
360
361 return tail;
362}
363
364int __meminit vmemmap_populate_hvo(unsigned long addr, unsigned long end,
365 unsigned int order, struct zone *zone,
366 unsigned long headsize)
367{
368 unsigned long maddr;
369 struct page *tail;
370 pte_t *pte;
371 int node = zone_to_nid(zone);
372
373 tail = vmemmap_get_tail(order, zone);
374 if (!tail)
375 return -ENOMEM;
376
377 for (maddr = addr; maddr < addr + headsize; maddr += PAGE_SIZE) {
378 pte = vmemmap_populate_address(maddr, node, NULL, -1, 0);
379 if (!pte)
380 return -ENOMEM;
381 }
382
383 /*
384 * Reuse the last page struct page mapped above for the rest.
385 */
386 return vmemmap_populate_range(maddr, end, node, NULL,
387 page_to_pfn(tail), 0);
388}
389#endif
390
391void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
392 unsigned long addr, unsigned long next)
393{
394}
395
396int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
397 unsigned long addr, unsigned long next)
398{
399 return 0;
400}
401
402int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
403 int node, struct vmem_altmap *altmap)
404{
405 unsigned long addr;
406 unsigned long next;
407 pgd_t *pgd;
408 p4d_t *p4d;
409 pud_t *pud;
410 pmd_t *pmd;
411
412 for (addr = start; addr < end; addr = next) {
413 next = pmd_addr_end(addr, end);
414
415 pgd = vmemmap_pgd_populate(addr, node);
416 if (!pgd)
417 return -ENOMEM;
418
419 p4d = vmemmap_p4d_populate(pgd, addr, node);
420 if (!p4d)
421 return -ENOMEM;
422
423 pud = vmemmap_pud_populate(p4d, addr, node);
424 if (!pud)
425 return -ENOMEM;
426
427 pmd = pmd_offset(pud, addr);
428 if (pmd_none(pmdp_get(pmd))) {
429 void *p;
430
431 p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
432 if (p) {
433 vmemmap_set_pmd(pmd, p, node, addr, next);
434 continue;
435 } else if (altmap) {
436 /*
437 * No fallback: In any case we care about, the
438 * altmap should be reasonably sized and aligned
439 * such that vmemmap_alloc_block_buf() will always
440 * succeed. For consistency with the PTE case,
441 * return an error here as failure could indicate
442 * a configuration issue with the size of the altmap.
443 */
444 return -ENOMEM;
445 }
446 } else if (vmemmap_check_pmd(pmd, node, addr, next))
447 continue;
448 if (vmemmap_populate_basepages(addr, next, node, altmap))
449 return -ENOMEM;
450 }
451 return 0;
452}
453
454#ifndef vmemmap_populate_compound_pages
455/*
456 * For compound pages bigger than section size (e.g. x86 1G compound
457 * pages with 2M subsection size) fill the rest of sections as tail
458 * pages.
459 *
460 * Note that memremap_pages() resets @nr_range value and will increment
461 * it after each range successful onlining. Thus the value or @nr_range
462 * at section memmap populate corresponds to the in-progress range
463 * being onlined here.
464 */
465static bool __meminit reuse_compound_section(unsigned long start_pfn,
466 struct dev_pagemap *pgmap)
467{
468 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
469 unsigned long offset = start_pfn -
470 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start);
471
472 return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION;
473}
474
475static pte_t * __meminit compound_section_tail_page(unsigned long addr)
476{
477 pte_t *pte;
478
479 addr -= PAGE_SIZE;
480
481 /*
482 * Assuming sections are populated sequentially, the previous section's
483 * page data can be reused.
484 */
485 pte = pte_offset_kernel(pmd_off_k(addr), addr);
486 if (!pte)
487 return NULL;
488
489 return pte;
490}
491
492static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
493 unsigned long start,
494 unsigned long end, int node,
495 struct dev_pagemap *pgmap)
496{
497 unsigned long size, addr;
498 pte_t *pte;
499 int rc;
500
501 if (reuse_compound_section(start_pfn, pgmap)) {
502 pte = compound_section_tail_page(start);
503 if (!pte)
504 return -ENOMEM;
505
506 /*
507 * Reuse the page that was populated in the prior iteration
508 * with just tail struct pages.
509 */
510 return vmemmap_populate_range(start, end, node, NULL,
511 pte_pfn(ptep_get(pte)),
512 VMEMMAP_POPULATE_PAGEREF);
513 }
514
515 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page));
516 for (addr = start; addr < end; addr += size) {
517 unsigned long next, last = addr + size;
518
519 /* Populate the head page vmemmap page */
520 pte = vmemmap_populate_address(addr, node, NULL, -1, 0);
521 if (!pte)
522 return -ENOMEM;
523
524 /* Populate the tail pages vmemmap page */
525 next = addr + PAGE_SIZE;
526 pte = vmemmap_populate_address(next, node, NULL, -1, 0);
527 if (!pte)
528 return -ENOMEM;
529
530 /*
531 * Reuse the previous page for the rest of tail pages
532 * See layout diagram in Documentation/mm/vmemmap_dedup.rst
533 */
534 next += PAGE_SIZE;
535 rc = vmemmap_populate_range(next, last, node, NULL,
536 pte_pfn(ptep_get(pte)),
537 VMEMMAP_POPULATE_PAGEREF);
538 if (rc)
539 return -ENOMEM;
540 }
541
542 return 0;
543}
544
545#endif
546
547struct page * __meminit __populate_section_memmap(unsigned long pfn,
548 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
549 struct dev_pagemap *pgmap)
550{
551 unsigned long start = (unsigned long) pfn_to_page(pfn);
552 unsigned long end = start + nr_pages * sizeof(struct page);
553 int r;
554
555 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
556 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
557 return NULL;
558
559 if (vmemmap_can_optimize(altmap, pgmap))
560 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap);
561 else
562 r = vmemmap_populate(start, end, nid, altmap);
563
564 if (r < 0)
565 return NULL;
566
567 return pfn_to_page(pfn);
568}
569
570#ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
571/*
572 * This is called just before initializing sections for a NUMA node.
573 * Any special initialization that needs to be done before the
574 * generic initialization can be done from here. Sections that
575 * are initialized in hooks called from here will be skipped by
576 * the generic initialization.
577 */
578void __init sparse_vmemmap_init_nid_early(int nid)
579{
580 hugetlb_vmemmap_init_early(nid);
581}
582
583/*
584 * This is called just before the initialization of page structures
585 * through memmap_init. Zones are now initialized, so any work that
586 * needs to be done that needs zone information can be done from
587 * here.
588 */
589void __init sparse_vmemmap_init_nid_late(int nid)
590{
591 hugetlb_vmemmap_init_late(nid);
592}
593#endif
594
595static void subsection_mask_set(unsigned long *map, unsigned long pfn,
596 unsigned long nr_pages)
597{
598 int idx = subsection_map_index(pfn);
599 int end = subsection_map_index(pfn + nr_pages - 1);
600
601 bitmap_set(map, idx, end - idx + 1);
602}
603
604void __init sparse_init_subsection_map(unsigned long pfn, unsigned long nr_pages)
605{
606 int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
607 unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
608
609 for (nr = start_sec_nr; nr <= end_sec_nr; nr++) {
610 struct mem_section *ms;
611 unsigned long pfns;
612
613 pfns = min(nr_pages, PAGES_PER_SECTION
614 - (pfn & ~PAGE_SECTION_MASK));
615 ms = __nr_to_section(nr);
616 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
617
618 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
619 pfns, subsection_map_index(pfn),
620 subsection_map_index(pfn + pfns - 1));
621
622 pfn += pfns;
623 nr_pages -= pfns;
624 }
625}
626
627#ifdef CONFIG_MEMORY_HOTPLUG
628
629/* Mark all memory sections within the pfn range as online */
630void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
631{
632 unsigned long pfn;
633
634 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
635 unsigned long section_nr = pfn_to_section_nr(pfn);
636 struct mem_section *ms = __nr_to_section(section_nr);
637
638 ms->section_mem_map |= SECTION_IS_ONLINE;
639 }
640}
641
642/* Mark all memory sections within the pfn range as offline */
643void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
644{
645 unsigned long pfn;
646
647 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
648 unsigned long section_nr = pfn_to_section_nr(pfn);
649 struct mem_section *ms = __nr_to_section(section_nr);
650
651 ms->section_mem_map &= ~SECTION_IS_ONLINE;
652 }
653}
654
655static struct page * __meminit populate_section_memmap(unsigned long pfn,
656 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
657 struct dev_pagemap *pgmap)
658{
659 return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
660}
661
662static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
663 struct vmem_altmap *altmap)
664{
665 unsigned long start = (unsigned long) pfn_to_page(pfn);
666 unsigned long end = start + nr_pages * sizeof(struct page);
667
668 vmemmap_free(start, end, altmap);
669}
670static void free_map_bootmem(struct page *memmap)
671{
672 unsigned long start = (unsigned long)memmap;
673 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
674
675 vmemmap_free(start, end, NULL);
676}
677
678static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
679{
680 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
681 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
682 struct mem_section *ms = __pfn_to_section(pfn);
683 unsigned long *subsection_map = ms->usage
684 ? &ms->usage->subsection_map[0] : NULL;
685
686 subsection_mask_set(map, pfn, nr_pages);
687 if (subsection_map)
688 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
689
690 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
691 "section already deactivated (%#lx + %ld)\n",
692 pfn, nr_pages))
693 return -EINVAL;
694
695 bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
696 return 0;
697}
698
699static bool is_subsection_map_empty(struct mem_section *ms)
700{
701 return bitmap_empty(&ms->usage->subsection_map[0],
702 SUBSECTIONS_PER_SECTION);
703}
704
705static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
706{
707 struct mem_section *ms = __pfn_to_section(pfn);
708 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
709 unsigned long *subsection_map;
710 int rc = 0;
711
712 subsection_mask_set(map, pfn, nr_pages);
713
714 subsection_map = &ms->usage->subsection_map[0];
715
716 if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
717 rc = -EINVAL;
718 else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
719 rc = -EEXIST;
720 else
721 bitmap_or(subsection_map, map, subsection_map,
722 SUBSECTIONS_PER_SECTION);
723
724 return rc;
725}
726
727/*
728 * To deactivate a memory region, there are 3 cases to handle:
729 *
730 * 1. deactivation of a partial hot-added section:
731 * a) section was present at memory init.
732 * b) section was hot-added post memory init.
733 * 2. deactivation of a complete hot-added section.
734 * 3. deactivation of a complete section from memory init.
735 *
736 * For 1, when subsection_map does not empty we will not be freeing the
737 * usage map, but still need to free the vmemmap range.
738 */
739static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
740 struct vmem_altmap *altmap)
741{
742 struct mem_section *ms = __pfn_to_section(pfn);
743 bool section_is_early = early_section(ms);
744 struct page *memmap = NULL;
745 bool empty;
746
747 if (clear_subsection_map(pfn, nr_pages))
748 return;
749
750 empty = is_subsection_map_empty(ms);
751 if (empty) {
752 /*
753 * Mark the section invalid so that valid_section()
754 * return false. This prevents code from dereferencing
755 * ms->usage array.
756 */
757 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
758
759 /*
760 * When removing an early section, the usage map is kept (as the
761 * usage maps of other sections fall into the same page). It
762 * will be re-used when re-adding the section - which is then no
763 * longer an early section. If the usage map is PageReserved, it
764 * was allocated during boot.
765 */
766 if (!PageReserved(virt_to_page(ms->usage))) {
767 kfree_rcu(ms->usage, rcu);
768 WRITE_ONCE(ms->usage, NULL);
769 }
770 memmap = pfn_to_page(SECTION_ALIGN_DOWN(pfn));
771 }
772
773 /*
774 * The memmap of early sections is always fully populated. See
775 * section_activate() and pfn_valid() .
776 */
777 if (!section_is_early) {
778 memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
779 depopulate_section_memmap(pfn, nr_pages, altmap);
780 } else if (memmap) {
781 memmap_boot_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page),
782 PAGE_SIZE)));
783 free_map_bootmem(memmap);
784 }
785
786 if (empty)
787 ms->section_mem_map = (unsigned long)NULL;
788}
789
790static struct page * __meminit section_activate(int nid, unsigned long pfn,
791 unsigned long nr_pages, struct vmem_altmap *altmap,
792 struct dev_pagemap *pgmap)
793{
794 struct mem_section *ms = __pfn_to_section(pfn);
795 struct mem_section_usage *usage = NULL;
796 struct page *memmap;
797 int rc;
798
799 if (!ms->usage) {
800 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
801 if (!usage)
802 return ERR_PTR(-ENOMEM);
803 ms->usage = usage;
804 }
805
806 rc = fill_subsection_map(pfn, nr_pages);
807 if (rc) {
808 if (usage)
809 ms->usage = NULL;
810 kfree(usage);
811 return ERR_PTR(rc);
812 }
813
814 /*
815 * The early init code does not consider partially populated
816 * initial sections, it simply assumes that memory will never be
817 * referenced. If we hot-add memory into such a section then we
818 * do not need to populate the memmap and can simply reuse what
819 * is already there.
820 */
821 if (nr_pages < PAGES_PER_SECTION && early_section(ms))
822 return pfn_to_page(pfn);
823
824 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
825 if (!memmap) {
826 section_deactivate(pfn, nr_pages, altmap);
827 return ERR_PTR(-ENOMEM);
828 }
829 memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
830
831 return memmap;
832}
833
834/**
835 * sparse_add_section - add a memory section, or populate an existing one
836 * @nid: The node to add section on
837 * @start_pfn: start pfn of the memory range
838 * @nr_pages: number of pfns to add in the section
839 * @altmap: alternate pfns to allocate the memmap backing store
840 * @pgmap: alternate compound page geometry for devmap mappings
841 *
842 * This is only intended for hotplug.
843 *
844 * Note that only VMEMMAP supports sub-section aligned hotplug,
845 * the proper alignment and size are gated by check_pfn_span().
846 *
847 *
848 * Return:
849 * * 0 - On success.
850 * * -EEXIST - Section has been present.
851 * * -ENOMEM - Out of memory.
852 */
853int __meminit sparse_add_section(int nid, unsigned long start_pfn,
854 unsigned long nr_pages, struct vmem_altmap *altmap,
855 struct dev_pagemap *pgmap)
856{
857 unsigned long section_nr = pfn_to_section_nr(start_pfn);
858 struct mem_section *ms;
859 struct page *memmap;
860 int ret;
861
862 ret = sparse_index_init(section_nr, nid);
863 if (ret < 0)
864 return ret;
865
866 memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
867 if (IS_ERR(memmap))
868 return PTR_ERR(memmap);
869
870 /*
871 * Poison uninitialized struct pages in order to catch invalid flags
872 * combinations.
873 */
874 page_init_poison(memmap, sizeof(struct page) * nr_pages);
875
876 ms = __nr_to_section(section_nr);
877 __section_mark_present(ms, section_nr);
878
879 /* Align memmap to section boundary in the subsection case */
880 if (section_nr_to_pfn(section_nr) != start_pfn)
881 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
882 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
883
884 return 0;
885}
886
887void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
888 struct vmem_altmap *altmap)
889{
890 struct mem_section *ms = __pfn_to_section(pfn);
891
892 if (WARN_ON_ONCE(!valid_section(ms)))
893 return;
894
895 section_deactivate(pfn, nr_pages, altmap);
896}
897#endif /* CONFIG_MEMORY_HOTPLUG */