Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_PGTABLE_H
3#define _LINUX_PGTABLE_H
4
5#include <linux/pfn.h>
6#include <asm/pgtable.h>
7
8#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
9#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
10
11#ifndef __ASSEMBLY__
12#ifdef CONFIG_MMU
13
14#include <linux/mm_types.h>
15#include <linux/bug.h>
16#include <linux/errno.h>
17#include <asm-generic/pgtable_uffd.h>
18#include <linux/page_table_check.h>
19
20#if 5 - defined(__PAGETABLE_P4D_FOLDED) - defined(__PAGETABLE_PUD_FOLDED) - \
21 defined(__PAGETABLE_PMD_FOLDED) != CONFIG_PGTABLE_LEVELS
22#error CONFIG_PGTABLE_LEVELS is not consistent with __PAGETABLE_{P4D,PUD,PMD}_FOLDED
23#endif
24
25/*
26 * On almost all architectures and configurations, 0 can be used as the
27 * upper ceiling to free_pgtables(): on many architectures it has the same
28 * effect as using TASK_SIZE. However, there is one configuration which
29 * must impose a more careful limit, to avoid freeing kernel pgtables.
30 */
31#ifndef USER_PGTABLES_CEILING
32#define USER_PGTABLES_CEILING 0UL
33#endif
34
35/*
36 * This defines the first usable user address. Platforms
37 * can override its value with custom FIRST_USER_ADDRESS
38 * defined in their respective <asm/pgtable.h>.
39 */
40#ifndef FIRST_USER_ADDRESS
41#define FIRST_USER_ADDRESS 0UL
42#endif
43
44/*
45 * This defines the generic helper for accessing PMD page
46 * table page. Although platforms can still override this
47 * via their respective <asm/pgtable.h>.
48 */
49#ifndef pmd_pgtable
50#define pmd_pgtable(pmd) pmd_page(pmd)
51#endif
52
53#define pmd_folio(pmd) page_folio(pmd_page(pmd))
54
55/*
56 * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD]
57 *
58 * The pXx_index() functions return the index of the entry in the page
59 * table page which would control the given virtual address
60 *
61 * As these functions may be used by the same code for different levels of
62 * the page table folding, they are always available, regardless of
63 * CONFIG_PGTABLE_LEVELS value. For the folded levels they simply return 0
64 * because in such cases PTRS_PER_PxD equals 1.
65 */
66
67static inline unsigned long pte_index(unsigned long address)
68{
69 return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
70}
71
72#ifndef pmd_index
73static inline unsigned long pmd_index(unsigned long address)
74{
75 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
76}
77#define pmd_index pmd_index
78#endif
79
80#ifndef pud_index
81static inline unsigned long pud_index(unsigned long address)
82{
83 return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
84}
85#define pud_index pud_index
86#endif
87
88#ifndef pgd_index
89/* Must be a compile-time constant, so implement it as a macro */
90#define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
91#endif
92
93#ifndef kernel_pte_init
94static inline void kernel_pte_init(void *addr)
95{
96}
97#define kernel_pte_init kernel_pte_init
98#endif
99
100#ifndef pmd_init
101static inline void pmd_init(void *addr)
102{
103}
104#define pmd_init pmd_init
105#endif
106
107#ifndef pud_init
108static inline void pud_init(void *addr)
109{
110}
111#define pud_init pud_init
112#endif
113
114#ifndef pte_offset_kernel
115static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
116{
117 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
118}
119#define pte_offset_kernel pte_offset_kernel
120#endif
121
122#ifdef CONFIG_HIGHPTE
123#define __pte_map(pmd, address) \
124 ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
125#define pte_unmap(pte) do { \
126 kunmap_local((pte)); \
127 rcu_read_unlock(); \
128} while (0)
129#else
130static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
131{
132 return pte_offset_kernel(pmd, address);
133}
134static inline void pte_unmap(pte_t *pte)
135{
136 rcu_read_unlock();
137}
138#endif
139
140void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
141
142/* Find an entry in the second-level page table.. */
143#ifndef pmd_offset
144static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
145{
146 return pud_pgtable(*pud) + pmd_index(address);
147}
148#define pmd_offset pmd_offset
149#endif
150
151#ifndef pud_offset
152static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
153{
154 return p4d_pgtable(*p4d) + pud_index(address);
155}
156#define pud_offset pud_offset
157#endif
158
159static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address)
160{
161 return (pgd + pgd_index(address));
162};
163
164/*
165 * a shortcut to get a pgd_t in a given mm
166 */
167#ifndef pgd_offset
168#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
169#endif
170
171/*
172 * a shortcut which implies the use of the kernel's pgd, instead
173 * of a process's
174 */
175#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
176
177/*
178 * In many cases it is known that a virtual address is mapped at PMD or PTE
179 * level, so instead of traversing all the page table levels, we can get a
180 * pointer to the PMD entry in user or kernel page table or translate a virtual
181 * address to the pointer in the PTE in the kernel page tables with simple
182 * helpers.
183 */
184static inline pmd_t *pmd_off(struct mm_struct *mm, unsigned long va)
185{
186 return pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, va), va), va), va);
187}
188
189static inline pmd_t *pmd_off_k(unsigned long va)
190{
191 return pmd_offset(pud_offset(p4d_offset(pgd_offset_k(va), va), va), va);
192}
193
194static inline pte_t *virt_to_kpte(unsigned long vaddr)
195{
196 pmd_t *pmd = pmd_off_k(vaddr);
197
198 return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
199}
200
201#ifndef pmd_young
202static inline int pmd_young(pmd_t pmd)
203{
204 return 0;
205}
206#endif
207
208#ifndef pmd_dirty
209static inline int pmd_dirty(pmd_t pmd)
210{
211 return 0;
212}
213#endif
214
215/*
216 * A facility to provide lazy MMU batching. This allows PTE updates and
217 * page invalidations to be delayed until a call to leave lazy MMU mode
218 * is issued. Some architectures may benefit from doing this, and it is
219 * beneficial for both shadow and direct mode hypervisors, which may batch
220 * the PTE updates which happen during this window. Note that using this
221 * interface requires that read hazards be removed from the code. A read
222 * hazard could result in the direct mode hypervisor case, since the actual
223 * write to the page tables may not yet have taken place, so reads though
224 * a raw PTE pointer after it has been modified are not guaranteed to be
225 * up to date.
226 *
227 * In the general case, no lock is guaranteed to be held between entry and exit
228 * of the lazy mode. (In practice, for user PTE updates, the appropriate page
229 * table lock(s) are held, but for kernel PTE updates, no lock is held).
230 * The implementation must therefore assume preemption may be enabled upon
231 * entry to the mode and cpu migration is possible; it must take steps to be
232 * robust against this. An implementation may handle this by disabling
233 * preemption, as a consequence generic code may not sleep while the lazy MMU
234 * mode is active.
235 *
236 * The mode is disabled in interrupt context and calls to the lazy_mmu API have
237 * no effect.
238 *
239 * The lazy MMU mode is enabled for a given block of code using:
240 *
241 * lazy_mmu_mode_enable();
242 * <code>
243 * lazy_mmu_mode_disable();
244 *
245 * Nesting is permitted: <code> may itself use an enable()/disable() pair.
246 * A nested call to enable() has no functional effect; however disable() causes
247 * any batched architectural state to be flushed regardless of nesting. After a
248 * call to disable(), the caller can therefore rely on all previous page table
249 * modifications to have taken effect, but the lazy MMU mode may still be
250 * enabled.
251 *
252 * In certain cases, it may be desirable to temporarily pause the lazy MMU mode.
253 * This can be done using:
254 *
255 * lazy_mmu_mode_pause();
256 * <code>
257 * lazy_mmu_mode_resume();
258 *
259 * pause() ensures that the mode is exited regardless of the nesting level;
260 * resume() re-enters the mode at the same nesting level. Any call to the
261 * lazy_mmu_mode_* API between those two calls has no effect. In particular,
262 * this means that pause()/resume() pairs may nest.
263 *
264 * is_lazy_mmu_mode_active() can be used to check whether the lazy MMU mode is
265 * currently enabled.
266 */
267#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
268/**
269 * lazy_mmu_mode_enable() - Enable the lazy MMU mode.
270 *
271 * Enters a new lazy MMU mode section; if the mode was not already enabled,
272 * enables it and calls arch_enter_lazy_mmu_mode().
273 *
274 * Must be paired with a call to lazy_mmu_mode_disable().
275 *
276 * Has no effect if called:
277 * - While paused - see lazy_mmu_mode_pause()
278 * - In interrupt context
279 */
280static inline void lazy_mmu_mode_enable(void)
281{
282 struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
283
284 if (in_interrupt() || state->pause_count > 0)
285 return;
286
287 VM_WARN_ON_ONCE(state->enable_count == U8_MAX);
288
289 if (state->enable_count++ == 0)
290 arch_enter_lazy_mmu_mode();
291}
292
293/**
294 * lazy_mmu_mode_disable() - Disable the lazy MMU mode.
295 *
296 * Exits the current lazy MMU mode section. If it is the outermost section,
297 * disables the mode and calls arch_leave_lazy_mmu_mode(). Otherwise (nested
298 * section), calls arch_flush_lazy_mmu_mode().
299 *
300 * Must match a call to lazy_mmu_mode_enable().
301 *
302 * Has no effect if called:
303 * - While paused - see lazy_mmu_mode_pause()
304 * - In interrupt context
305 */
306static inline void lazy_mmu_mode_disable(void)
307{
308 struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
309
310 if (in_interrupt() || state->pause_count > 0)
311 return;
312
313 VM_WARN_ON_ONCE(state->enable_count == 0);
314
315 if (--state->enable_count == 0)
316 arch_leave_lazy_mmu_mode();
317 else /* Exiting a nested section */
318 arch_flush_lazy_mmu_mode();
319
320}
321
322/**
323 * lazy_mmu_mode_pause() - Pause the lazy MMU mode.
324 *
325 * Pauses the lazy MMU mode; if it is currently active, disables it and calls
326 * arch_leave_lazy_mmu_mode().
327 *
328 * Must be paired with a call to lazy_mmu_mode_resume(). Calls to the
329 * lazy_mmu_mode_* API have no effect until the matching resume() call.
330 *
331 * Has no effect if called:
332 * - While paused (inside another pause()/resume() pair)
333 * - In interrupt context
334 */
335static inline void lazy_mmu_mode_pause(void)
336{
337 struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
338
339 if (in_interrupt())
340 return;
341
342 VM_WARN_ON_ONCE(state->pause_count == U8_MAX);
343
344 if (state->pause_count++ == 0 && state->enable_count > 0)
345 arch_leave_lazy_mmu_mode();
346}
347
348/**
349 * lazy_mmu_mode_resume() - Resume the lazy MMU mode.
350 *
351 * Resumes the lazy MMU mode; if it was active at the point where the matching
352 * call to lazy_mmu_mode_pause() was made, re-enables it and calls
353 * arch_enter_lazy_mmu_mode().
354 *
355 * Must match a call to lazy_mmu_mode_pause().
356 *
357 * Has no effect if called:
358 * - While paused (inside another pause()/resume() pair)
359 * - In interrupt context
360 */
361static inline void lazy_mmu_mode_resume(void)
362{
363 struct lazy_mmu_state *state = ¤t->lazy_mmu_state;
364
365 if (in_interrupt())
366 return;
367
368 VM_WARN_ON_ONCE(state->pause_count == 0);
369
370 if (--state->pause_count == 0 && state->enable_count > 0)
371 arch_enter_lazy_mmu_mode();
372}
373#else
374static inline void lazy_mmu_mode_enable(void) {}
375static inline void lazy_mmu_mode_disable(void) {}
376static inline void lazy_mmu_mode_pause(void) {}
377static inline void lazy_mmu_mode_resume(void) {}
378#endif
379
380#ifndef pte_batch_hint
381/**
382 * pte_batch_hint - Number of pages that can be added to batch without scanning.
383 * @ptep: Page table pointer for the entry.
384 * @pte: Page table entry.
385 *
386 * Some architectures know that a set of contiguous ptes all map the same
387 * contiguous memory with the same permissions. In this case, it can provide a
388 * hint to aid pte batching without the core code needing to scan every pte.
389 *
390 * An architecture implementation may ignore the PTE accessed state. Further,
391 * the dirty state must apply atomically to all the PTEs described by the hint.
392 *
393 * May be overridden by the architecture, else pte_batch_hint is always 1.
394 */
395static inline unsigned int pte_batch_hint(pte_t *ptep, pte_t pte)
396{
397 return 1;
398}
399#endif
400
401#ifndef pte_advance_pfn
402static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
403{
404 return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
405}
406#endif
407
408#define pte_next_pfn(pte) pte_advance_pfn(pte, 1)
409
410#ifndef set_ptes
411/**
412 * set_ptes - Map consecutive pages to a contiguous range of addresses.
413 * @mm: Address space to map the pages into.
414 * @addr: Address to map the first page at.
415 * @ptep: Page table pointer for the first entry.
416 * @pte: Page table entry for the first page.
417 * @nr: Number of pages to map.
418 *
419 * When nr==1, initial state of pte may be present or not present, and new state
420 * may be present or not present. When nr>1, initial state of all ptes must be
421 * not present, and new state must be present.
422 *
423 * May be overridden by the architecture, or the architecture can define
424 * set_pte() and PFN_PTE_SHIFT.
425 *
426 * Context: The caller holds the page table lock. The pages all belong
427 * to the same folio. The PTEs are all in the same PMD.
428 */
429static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
430 pte_t *ptep, pte_t pte, unsigned int nr)
431{
432 page_table_check_ptes_set(mm, addr, ptep, pte, nr);
433
434 for (;;) {
435 set_pte(ptep, pte);
436 if (--nr == 0)
437 break;
438 ptep++;
439 pte = pte_next_pfn(pte);
440 }
441}
442#endif
443#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
444
445#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
446extern int ptep_set_access_flags(struct vm_area_struct *vma,
447 unsigned long address, pte_t *ptep,
448 pte_t entry, int dirty);
449#endif
450
451#ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
452#ifdef CONFIG_TRANSPARENT_HUGEPAGE
453extern int pmdp_set_access_flags(struct vm_area_struct *vma,
454 unsigned long address, pmd_t *pmdp,
455 pmd_t entry, int dirty);
456extern int pudp_set_access_flags(struct vm_area_struct *vma,
457 unsigned long address, pud_t *pudp,
458 pud_t entry, int dirty);
459#else
460static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
461 unsigned long address, pmd_t *pmdp,
462 pmd_t entry, int dirty)
463{
464 BUILD_BUG();
465 return 0;
466}
467static inline int pudp_set_access_flags(struct vm_area_struct *vma,
468 unsigned long address, pud_t *pudp,
469 pud_t entry, int dirty)
470{
471 BUILD_BUG();
472 return 0;
473}
474#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
475#endif
476
477#ifndef ptep_get
478static inline pte_t ptep_get(pte_t *ptep)
479{
480 return READ_ONCE(*ptep);
481}
482#endif
483
484#ifndef pmdp_get
485static inline pmd_t pmdp_get(pmd_t *pmdp)
486{
487 return READ_ONCE(*pmdp);
488}
489#endif
490
491#ifndef pudp_get
492static inline pud_t pudp_get(pud_t *pudp)
493{
494 return READ_ONCE(*pudp);
495}
496#endif
497
498#ifndef p4dp_get
499static inline p4d_t p4dp_get(p4d_t *p4dp)
500{
501 return READ_ONCE(*p4dp);
502}
503#endif
504
505#ifndef pgdp_get
506static inline pgd_t pgdp_get(pgd_t *pgdp)
507{
508 return READ_ONCE(*pgdp);
509}
510#endif
511
512#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
513static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
514 unsigned long address,
515 pte_t *ptep)
516{
517 pte_t pte = ptep_get(ptep);
518 int r = 1;
519 if (!pte_young(pte))
520 r = 0;
521 else
522 set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte));
523 return r;
524}
525#endif
526
527#ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
528#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
529static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
530 unsigned long address,
531 pmd_t *pmdp)
532{
533 pmd_t pmd = *pmdp;
534 int r = 1;
535 if (!pmd_young(pmd))
536 r = 0;
537 else
538 set_pmd_at(vma->vm_mm, address, pmdp, pmd_mkold(pmd));
539 return r;
540}
541#else
542static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
543 unsigned long address,
544 pmd_t *pmdp)
545{
546 BUILD_BUG();
547 return 0;
548}
549#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG */
550#endif
551
552#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
553int ptep_clear_flush_young(struct vm_area_struct *vma,
554 unsigned long address, pte_t *ptep);
555#endif
556
557#ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
558#ifdef CONFIG_TRANSPARENT_HUGEPAGE
559extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
560 unsigned long address, pmd_t *pmdp);
561#else
562/*
563 * Despite relevant to THP only, this API is called from generic rmap code
564 * under PageTransHuge(), hence needs a dummy implementation for !THP
565 */
566static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
567 unsigned long address, pmd_t *pmdp)
568{
569 BUILD_BUG();
570 return 0;
571}
572#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
573#endif
574
575#ifndef arch_has_hw_nonleaf_pmd_young
576/*
577 * Return whether the accessed bit in non-leaf PMD entries is supported on the
578 * local CPU.
579 */
580static inline bool arch_has_hw_nonleaf_pmd_young(void)
581{
582 return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
583}
584#endif
585
586#ifndef arch_has_hw_pte_young
587/*
588 * Return whether the accessed bit is supported on the local CPU.
589 *
590 * This stub assumes accessing through an old PTE triggers a page fault.
591 * Architectures that automatically set the access bit should overwrite it.
592 */
593static inline bool arch_has_hw_pte_young(void)
594{
595 return IS_ENABLED(CONFIG_ARCH_HAS_HW_PTE_YOUNG);
596}
597#endif
598
599#ifndef exec_folio_order
600/*
601 * Returns preferred minimum folio order for executable file-backed memory. Must
602 * be in range [0, PMD_ORDER). Default to order-0.
603 */
604static inline unsigned int exec_folio_order(void)
605{
606 return 0;
607}
608#endif
609
610#ifndef arch_check_zapped_pte
611static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
612 pte_t pte)
613{
614}
615#endif
616
617#ifndef arch_check_zapped_pmd
618static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
619 pmd_t pmd)
620{
621}
622#endif
623
624#ifndef arch_check_zapped_pud
625static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud)
626{
627}
628#endif
629
630#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
631static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
632 unsigned long address,
633 pte_t *ptep)
634{
635 pte_t pte = ptep_get(ptep);
636 pte_clear(mm, address, ptep);
637 page_table_check_pte_clear(mm, address, pte);
638 return pte;
639}
640#endif
641
642#ifndef clear_young_dirty_ptes
643/**
644 * clear_young_dirty_ptes - Mark PTEs that map consecutive pages of the
645 * same folio as old/clean.
646 * @mm: Address space the pages are mapped into.
647 * @addr: Address the first page is mapped at.
648 * @ptep: Page table pointer for the first entry.
649 * @nr: Number of entries to mark old/clean.
650 * @flags: Flags to modify the PTE batch semantics.
651 *
652 * May be overridden by the architecture; otherwise, implemented by
653 * get_and_clear/modify/set for each pte in the range.
654 *
655 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
656 * some PTEs might be write-protected.
657 *
658 * Context: The caller holds the page table lock. The PTEs map consecutive
659 * pages that belong to the same folio. The PTEs are all in the same PMD.
660 */
661static inline void clear_young_dirty_ptes(struct vm_area_struct *vma,
662 unsigned long addr, pte_t *ptep,
663 unsigned int nr, cydp_t flags)
664{
665 pte_t pte;
666
667 for (;;) {
668 if (flags == CYDP_CLEAR_YOUNG)
669 ptep_test_and_clear_young(vma, addr, ptep);
670 else {
671 pte = ptep_get_and_clear(vma->vm_mm, addr, ptep);
672 if (flags & CYDP_CLEAR_YOUNG)
673 pte = pte_mkold(pte);
674 if (flags & CYDP_CLEAR_DIRTY)
675 pte = pte_mkclean(pte);
676 set_pte_at(vma->vm_mm, addr, ptep, pte);
677 }
678 if (--nr == 0)
679 break;
680 ptep++;
681 addr += PAGE_SIZE;
682 }
683}
684#endif
685
686static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
687 pte_t *ptep)
688{
689 pte_t pte = ptep_get(ptep);
690
691 pte_clear(mm, addr, ptep);
692 /*
693 * No need for ptep_get_and_clear(): page table check doesn't care about
694 * any bits that could have been set by HW concurrently.
695 */
696 page_table_check_pte_clear(mm, addr, pte);
697}
698
699#ifdef CONFIG_GUP_GET_PXX_LOW_HIGH
700/*
701 * For walking the pagetables without holding any locks. Some architectures
702 * (eg x86-32 PAE) cannot load the entries atomically without using expensive
703 * instructions. We are guaranteed that a PTE will only either go from not
704 * present to present, or present to not present -- it will not switch to a
705 * completely different present page without a TLB flush inbetween; which we
706 * are blocking by holding interrupts off.
707 *
708 * Setting ptes from not present to present goes:
709 *
710 * ptep->pte_high = h;
711 * smp_wmb();
712 * ptep->pte_low = l;
713 *
714 * And present to not present goes:
715 *
716 * ptep->pte_low = 0;
717 * smp_wmb();
718 * ptep->pte_high = 0;
719 *
720 * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
721 * We load pte_high *after* loading pte_low, which ensures we don't see an older
722 * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
723 * picked up a changed pte high. We might have gotten rubbish values from
724 * pte_low and pte_high, but we are guaranteed that pte_low will not have the
725 * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
726 * operates on present ptes we're safe.
727 */
728static inline pte_t ptep_get_lockless(pte_t *ptep)
729{
730 pte_t pte;
731
732 do {
733 pte.pte_low = ptep->pte_low;
734 smp_rmb();
735 pte.pte_high = ptep->pte_high;
736 smp_rmb();
737 } while (unlikely(pte.pte_low != ptep->pte_low));
738
739 return pte;
740}
741#define ptep_get_lockless ptep_get_lockless
742
743#if CONFIG_PGTABLE_LEVELS > 2
744static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
745{
746 pmd_t pmd;
747
748 do {
749 pmd.pmd_low = pmdp->pmd_low;
750 smp_rmb();
751 pmd.pmd_high = pmdp->pmd_high;
752 smp_rmb();
753 } while (unlikely(pmd.pmd_low != pmdp->pmd_low));
754
755 return pmd;
756}
757#define pmdp_get_lockless pmdp_get_lockless
758#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
759#endif /* CONFIG_PGTABLE_LEVELS > 2 */
760#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
761
762/*
763 * We require that the PTE can be read atomically.
764 */
765#ifndef ptep_get_lockless
766static inline pte_t ptep_get_lockless(pte_t *ptep)
767{
768 return ptep_get(ptep);
769}
770#endif
771
772#ifndef pmdp_get_lockless
773static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
774{
775 return pmdp_get(pmdp);
776}
777static inline void pmdp_get_lockless_sync(void)
778{
779}
780#endif
781
782#ifdef CONFIG_TRANSPARENT_HUGEPAGE
783#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
784static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
785 unsigned long address,
786 pmd_t *pmdp)
787{
788 pmd_t pmd = *pmdp;
789
790 pmd_clear(pmdp);
791 page_table_check_pmd_clear(mm, address, pmd);
792
793 return pmd;
794}
795#endif /* __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR */
796#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
797static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
798 unsigned long address,
799 pud_t *pudp)
800{
801 pud_t pud = *pudp;
802
803 pud_clear(pudp);
804 page_table_check_pud_clear(mm, address, pud);
805
806 return pud;
807}
808#endif /* __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR */
809#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
810
811#ifdef CONFIG_TRANSPARENT_HUGEPAGE
812#ifndef __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
813static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
814 unsigned long address, pmd_t *pmdp,
815 int full)
816{
817 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
818}
819#endif
820
821#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
822static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
823 unsigned long address, pud_t *pudp,
824 int full)
825{
826 return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
827}
828#endif
829#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
830
831#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
832static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
833 unsigned long address, pte_t *ptep,
834 int full)
835{
836 return ptep_get_and_clear(mm, address, ptep);
837}
838#endif
839
840#ifndef get_and_clear_full_ptes
841/**
842 * get_and_clear_full_ptes - Clear present PTEs that map consecutive pages of
843 * the same folio, collecting dirty/accessed bits.
844 * @mm: Address space the pages are mapped into.
845 * @addr: Address the first page is mapped at.
846 * @ptep: Page table pointer for the first entry.
847 * @nr: Number of entries to clear.
848 * @full: Whether we are clearing a full mm.
849 *
850 * May be overridden by the architecture; otherwise, implemented as a simple
851 * loop over ptep_get_and_clear_full(), merging dirty/accessed bits into the
852 * returned PTE.
853 *
854 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
855 * some PTEs might be write-protected.
856 *
857 * Context: The caller holds the page table lock. The PTEs map consecutive
858 * pages that belong to the same folio. The PTEs are all in the same PMD.
859 */
860static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm,
861 unsigned long addr, pte_t *ptep, unsigned int nr, int full)
862{
863 pte_t pte, tmp_pte;
864
865 pte = ptep_get_and_clear_full(mm, addr, ptep, full);
866 while (--nr) {
867 ptep++;
868 addr += PAGE_SIZE;
869 tmp_pte = ptep_get_and_clear_full(mm, addr, ptep, full);
870 if (pte_dirty(tmp_pte))
871 pte = pte_mkdirty(pte);
872 if (pte_young(tmp_pte))
873 pte = pte_mkyoung(pte);
874 }
875 return pte;
876}
877#endif
878
879/**
880 * get_and_clear_ptes - Clear present PTEs that map consecutive pages of
881 * the same folio, collecting dirty/accessed bits.
882 * @mm: Address space the pages are mapped into.
883 * @addr: Address the first page is mapped at.
884 * @ptep: Page table pointer for the first entry.
885 * @nr: Number of entries to clear.
886 *
887 * Use this instead of get_and_clear_full_ptes() if it is known that we don't
888 * need to clear the full mm, which is mostly the case.
889 *
890 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
891 * some PTEs might be write-protected.
892 *
893 * Context: The caller holds the page table lock. The PTEs map consecutive
894 * pages that belong to the same folio. The PTEs are all in the same PMD.
895 */
896static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr,
897 pte_t *ptep, unsigned int nr)
898{
899 return get_and_clear_full_ptes(mm, addr, ptep, nr, 0);
900}
901
902#ifndef clear_full_ptes
903/**
904 * clear_full_ptes - Clear present PTEs that map consecutive pages of the same
905 * folio.
906 * @mm: Address space the pages are mapped into.
907 * @addr: Address the first page is mapped at.
908 * @ptep: Page table pointer for the first entry.
909 * @nr: Number of entries to clear.
910 * @full: Whether we are clearing a full mm.
911 *
912 * May be overridden by the architecture; otherwise, implemented as a simple
913 * loop over ptep_get_and_clear_full().
914 *
915 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
916 * some PTEs might be write-protected.
917 *
918 * Context: The caller holds the page table lock. The PTEs map consecutive
919 * pages that belong to the same folio. The PTEs are all in the same PMD.
920 */
921static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr,
922 pte_t *ptep, unsigned int nr, int full)
923{
924 for (;;) {
925 ptep_get_and_clear_full(mm, addr, ptep, full);
926 if (--nr == 0)
927 break;
928 ptep++;
929 addr += PAGE_SIZE;
930 }
931}
932#endif
933
934/**
935 * clear_ptes - Clear present PTEs that map consecutive pages of the same folio.
936 * @mm: Address space the pages are mapped into.
937 * @addr: Address the first page is mapped at.
938 * @ptep: Page table pointer for the first entry.
939 * @nr: Number of entries to clear.
940 *
941 * Use this instead of clear_full_ptes() if it is known that we don't need to
942 * clear the full mm, which is mostly the case.
943 *
944 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
945 * some PTEs might be write-protected.
946 *
947 * Context: The caller holds the page table lock. The PTEs map consecutive
948 * pages that belong to the same folio. The PTEs are all in the same PMD.
949 */
950static inline void clear_ptes(struct mm_struct *mm, unsigned long addr,
951 pte_t *ptep, unsigned int nr)
952{
953 clear_full_ptes(mm, addr, ptep, nr, 0);
954}
955
956/*
957 * If two threads concurrently fault at the same page, the thread that
958 * won the race updates the PTE and its local TLB/Cache. The other thread
959 * gives up, simply does nothing, and continues; on architectures where
960 * software can update TLB, local TLB can be updated here to avoid next page
961 * fault. This function updates TLB only, do nothing with cache or others.
962 * It is the difference with function update_mmu_cache.
963 */
964#ifndef update_mmu_tlb_range
965static inline void update_mmu_tlb_range(struct vm_area_struct *vma,
966 unsigned long address, pte_t *ptep, unsigned int nr)
967{
968}
969#endif
970
971static inline void update_mmu_tlb(struct vm_area_struct *vma,
972 unsigned long address, pte_t *ptep)
973{
974 update_mmu_tlb_range(vma, address, ptep, 1);
975}
976
977/*
978 * Some architectures may be able to avoid expensive synchronization
979 * primitives when modifications are made to PTE's which are already
980 * not present, or in the process of an address space destruction.
981 */
982#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
983static inline void pte_clear_not_present_full(struct mm_struct *mm,
984 unsigned long address,
985 pte_t *ptep,
986 int full)
987{
988 pte_clear(mm, address, ptep);
989}
990#endif
991
992#ifndef clear_not_present_full_ptes
993/**
994 * clear_not_present_full_ptes - Clear multiple not present PTEs which are
995 * consecutive in the pgtable.
996 * @mm: Address space the ptes represent.
997 * @addr: Address of the first pte.
998 * @ptep: Page table pointer for the first entry.
999 * @nr: Number of entries to clear.
1000 * @full: Whether we are clearing a full mm.
1001 *
1002 * May be overridden by the architecture; otherwise, implemented as a simple
1003 * loop over pte_clear_not_present_full().
1004 *
1005 * Context: The caller holds the page table lock. The PTEs are all not present.
1006 * The PTEs are all in the same PMD.
1007 */
1008static inline void clear_not_present_full_ptes(struct mm_struct *mm,
1009 unsigned long addr, pte_t *ptep, unsigned int nr, int full)
1010{
1011 for (;;) {
1012 pte_clear_not_present_full(mm, addr, ptep, full);
1013 if (--nr == 0)
1014 break;
1015 ptep++;
1016 addr += PAGE_SIZE;
1017 }
1018}
1019#endif
1020
1021#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
1022extern pte_t ptep_clear_flush(struct vm_area_struct *vma,
1023 unsigned long address,
1024 pte_t *ptep);
1025#endif
1026
1027#ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1028extern pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1029 unsigned long address,
1030 pmd_t *pmdp);
1031extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
1032 unsigned long address,
1033 pud_t *pudp);
1034#endif
1035
1036#ifndef pte_mkwrite
1037static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
1038{
1039 return pte_mkwrite_novma(pte);
1040}
1041#endif
1042
1043#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
1044static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
1045{
1046 return pmd_mkwrite_novma(pmd);
1047}
1048#endif
1049
1050#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
1051struct mm_struct;
1052static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
1053{
1054 pte_t old_pte = ptep_get(ptep);
1055 set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
1056}
1057#endif
1058
1059#ifndef wrprotect_ptes
1060/**
1061 * wrprotect_ptes - Write-protect PTEs that map consecutive pages of the same
1062 * folio.
1063 * @mm: Address space the pages are mapped into.
1064 * @addr: Address the first page is mapped at.
1065 * @ptep: Page table pointer for the first entry.
1066 * @nr: Number of entries to write-protect.
1067 *
1068 * May be overridden by the architecture; otherwise, implemented as a simple
1069 * loop over ptep_set_wrprotect().
1070 *
1071 * Note that PTE bits in the PTE range besides the PFN can differ. For example,
1072 * some PTEs might be write-protected.
1073 *
1074 * Context: The caller holds the page table lock. The PTEs map consecutive
1075 * pages that belong to the same folio. The PTEs are all in the same PMD.
1076 */
1077static inline void wrprotect_ptes(struct mm_struct *mm, unsigned long addr,
1078 pte_t *ptep, unsigned int nr)
1079{
1080 for (;;) {
1081 ptep_set_wrprotect(mm, addr, ptep);
1082 if (--nr == 0)
1083 break;
1084 ptep++;
1085 addr += PAGE_SIZE;
1086 }
1087}
1088#endif
1089
1090/*
1091 * On some architectures hardware does not set page access bit when accessing
1092 * memory page, it is responsibility of software setting this bit. It brings
1093 * out extra page fault penalty to track page access bit. For optimization page
1094 * access bit can be set during all page fault flow on these arches.
1095 * To be differentiate with macro pte_mkyoung, this macro is used on platforms
1096 * where software maintains page access bit.
1097 */
1098#ifndef pte_sw_mkyoung
1099static inline pte_t pte_sw_mkyoung(pte_t pte)
1100{
1101 return pte;
1102}
1103#define pte_sw_mkyoung pte_sw_mkyoung
1104#endif
1105
1106#ifndef __HAVE_ARCH_PMDP_SET_WRPROTECT
1107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1108static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1109 unsigned long address, pmd_t *pmdp)
1110{
1111 pmd_t old_pmd = *pmdp;
1112 set_pmd_at(mm, address, pmdp, pmd_wrprotect(old_pmd));
1113}
1114#else
1115static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1116 unsigned long address, pmd_t *pmdp)
1117{
1118 BUILD_BUG();
1119}
1120#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1121#endif
1122#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
1123#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1124#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1125static inline void pudp_set_wrprotect(struct mm_struct *mm,
1126 unsigned long address, pud_t *pudp)
1127{
1128 pud_t old_pud = *pudp;
1129
1130 set_pud_at(mm, address, pudp, pud_wrprotect(old_pud));
1131}
1132#else
1133static inline void pudp_set_wrprotect(struct mm_struct *mm,
1134 unsigned long address, pud_t *pudp)
1135{
1136 BUILD_BUG();
1137}
1138#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1139#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1140#endif
1141
1142#ifndef pmdp_collapse_flush
1143#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1144extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1145 unsigned long address, pmd_t *pmdp);
1146#else
1147static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1148 unsigned long address,
1149 pmd_t *pmdp)
1150{
1151 BUILD_BUG();
1152 return *pmdp;
1153}
1154#define pmdp_collapse_flush pmdp_collapse_flush
1155#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1156#endif
1157
1158#ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
1159extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1160 pgtable_t pgtable);
1161#endif
1162
1163#ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
1164extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1165#endif
1166
1167#ifndef arch_needs_pgtable_deposit
1168#define arch_needs_pgtable_deposit() (false)
1169#endif
1170
1171#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1172/*
1173 * This is an implementation of pmdp_establish() that is only suitable for an
1174 * architecture that doesn't have hardware dirty/accessed bits. In this case we
1175 * can't race with CPU which sets these bits and non-atomic approach is fine.
1176 */
1177static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
1178 unsigned long address, pmd_t *pmdp, pmd_t pmd)
1179{
1180 pmd_t old_pmd = *pmdp;
1181 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
1182 return old_pmd;
1183}
1184#endif
1185
1186#ifndef __HAVE_ARCH_PMDP_INVALIDATE
1187extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
1188 pmd_t *pmdp);
1189#endif
1190
1191#ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
1192
1193/*
1194 * pmdp_invalidate_ad() invalidates the PMD while changing a transparent
1195 * hugepage mapping in the page tables. This function is similar to
1196 * pmdp_invalidate(), but should only be used if the access and dirty bits would
1197 * not be cleared by the software in the new PMD value. The function ensures
1198 * that hardware changes of the access and dirty bits updates would not be lost.
1199 *
1200 * Doing so can allow in certain architectures to avoid a TLB flush in most
1201 * cases. Yet, another TLB flush might be necessary later if the PMD update
1202 * itself requires such flush (e.g., if protection was set to be stricter). Yet,
1203 * even when a TLB flush is needed because of the update, the caller may be able
1204 * to batch these TLB flushing operations, so fewer TLB flush operations are
1205 * needed.
1206 */
1207extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1208 unsigned long address, pmd_t *pmdp);
1209#endif
1210
1211#ifndef __HAVE_ARCH_PTE_SAME
1212static inline int pte_same(pte_t pte_a, pte_t pte_b)
1213{
1214 return pte_val(pte_a) == pte_val(pte_b);
1215}
1216#endif
1217
1218#ifndef __HAVE_ARCH_PTE_UNUSED
1219/*
1220 * Some architectures provide facilities to virtualization guests
1221 * so that they can flag allocated pages as unused. This allows the
1222 * host to transparently reclaim unused pages. This function returns
1223 * whether the pte's page is unused.
1224 */
1225static inline int pte_unused(pte_t pte)
1226{
1227 return 0;
1228}
1229#endif
1230
1231#ifndef pte_access_permitted
1232#define pte_access_permitted(pte, write) \
1233 (pte_present(pte) && (!(write) || pte_write(pte)))
1234#endif
1235
1236#ifndef pmd_access_permitted
1237#define pmd_access_permitted(pmd, write) \
1238 (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
1239#endif
1240
1241#ifndef pud_access_permitted
1242#define pud_access_permitted(pud, write) \
1243 (pud_present(pud) && (!(write) || pud_write(pud)))
1244#endif
1245
1246#ifndef p4d_access_permitted
1247#define p4d_access_permitted(p4d, write) \
1248 (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
1249#endif
1250
1251#ifndef pgd_access_permitted
1252#define pgd_access_permitted(pgd, write) \
1253 (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
1254#endif
1255
1256#ifndef __HAVE_ARCH_PMD_SAME
1257static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
1258{
1259 return pmd_val(pmd_a) == pmd_val(pmd_b);
1260}
1261#endif
1262
1263#ifndef pud_same
1264static inline int pud_same(pud_t pud_a, pud_t pud_b)
1265{
1266 return pud_val(pud_a) == pud_val(pud_b);
1267}
1268#define pud_same pud_same
1269#endif
1270
1271#ifndef __HAVE_ARCH_P4D_SAME
1272static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
1273{
1274 return p4d_val(p4d_a) == p4d_val(p4d_b);
1275}
1276#endif
1277
1278#ifndef __HAVE_ARCH_PGD_SAME
1279static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
1280{
1281 return pgd_val(pgd_a) == pgd_val(pgd_b);
1282}
1283#endif
1284
1285#ifndef __HAVE_ARCH_DO_SWAP_PAGE
1286static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1287 struct vm_area_struct *vma,
1288 unsigned long addr,
1289 pte_t pte, pte_t oldpte,
1290 int nr)
1291{
1292
1293}
1294#else
1295/*
1296 * Some architectures support metadata associated with a page. When a
1297 * page is being swapped out, this metadata must be saved so it can be
1298 * restored when the page is swapped back in. SPARC M7 and newer
1299 * processors support an ADI (Application Data Integrity) tag for the
1300 * page as metadata for the page. arch_do_swap_page() can restore this
1301 * metadata when a page is swapped back in.
1302 */
1303static inline void arch_do_swap_page_nr(struct mm_struct *mm,
1304 struct vm_area_struct *vma,
1305 unsigned long addr,
1306 pte_t pte, pte_t oldpte,
1307 int nr)
1308{
1309 for (int i = 0; i < nr; i++) {
1310 arch_do_swap_page(vma->vm_mm, vma, addr + i * PAGE_SIZE,
1311 pte_advance_pfn(pte, i),
1312 pte_advance_pfn(oldpte, i));
1313 }
1314}
1315#endif
1316
1317#ifndef __HAVE_ARCH_UNMAP_ONE
1318/*
1319 * Some architectures support metadata associated with a page. When a
1320 * page is being swapped out, this metadata must be saved so it can be
1321 * restored when the page is swapped back in. SPARC M7 and newer
1322 * processors support an ADI (Application Data Integrity) tag for the
1323 * page as metadata for the page. arch_unmap_one() can save this
1324 * metadata on a swap-out of a page.
1325 */
1326static inline int arch_unmap_one(struct mm_struct *mm,
1327 struct vm_area_struct *vma,
1328 unsigned long addr,
1329 pte_t orig_pte)
1330{
1331 return 0;
1332}
1333#endif
1334
1335/*
1336 * Allow architectures to preserve additional metadata associated with
1337 * swapped-out pages. The corresponding __HAVE_ARCH_SWAP_* macros and function
1338 * prototypes must be defined in the arch-specific asm/pgtable.h file.
1339 */
1340#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
1341static inline int arch_prepare_to_swap(struct folio *folio)
1342{
1343 return 0;
1344}
1345#endif
1346
1347#ifndef __HAVE_ARCH_SWAP_INVALIDATE
1348static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
1349{
1350}
1351
1352static inline void arch_swap_invalidate_area(int type)
1353{
1354}
1355#endif
1356
1357#ifndef __HAVE_ARCH_SWAP_RESTORE
1358static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
1359{
1360}
1361#endif
1362
1363#ifndef __HAVE_ARCH_MOVE_PTE
1364#define move_pte(pte, old_addr, new_addr) (pte)
1365#endif
1366
1367#ifndef pte_accessible
1368# define pte_accessible(mm, pte) ((void)(pte), 1)
1369#endif
1370
1371#ifndef flush_tlb_fix_spurious_fault
1372#define flush_tlb_fix_spurious_fault(vma, address, ptep) flush_tlb_page(vma, address)
1373#endif
1374
1375#ifndef flush_tlb_fix_spurious_fault_pmd
1376#define flush_tlb_fix_spurious_fault_pmd(vma, address, pmdp) do { } while (0)
1377#endif
1378
1379/*
1380 * When walking page tables, get the address of the next boundary,
1381 * or the end address of the range if that comes earlier. Although no
1382 * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
1383 */
1384
1385#define pgd_addr_end(addr, end) \
1386({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
1387 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1388})
1389
1390#ifndef p4d_addr_end
1391#define p4d_addr_end(addr, end) \
1392({ unsigned long __boundary = ((addr) + P4D_SIZE) & P4D_MASK; \
1393 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1394})
1395#endif
1396
1397#ifndef pud_addr_end
1398#define pud_addr_end(addr, end) \
1399({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
1400 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1401})
1402#endif
1403
1404#ifndef pmd_addr_end
1405#define pmd_addr_end(addr, end) \
1406({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
1407 (__boundary - 1 < (end) - 1)? __boundary: (end); \
1408})
1409#endif
1410
1411/*
1412 * When walking page tables, we usually want to skip any p?d_none entries;
1413 * and any p?d_bad entries - reporting the error before resetting to none.
1414 * Do the tests inline, but report and clear the bad entry in mm/memory.c.
1415 */
1416void pgd_clear_bad(pgd_t *);
1417
1418#ifndef __PAGETABLE_P4D_FOLDED
1419void p4d_clear_bad(p4d_t *);
1420#else
1421#define p4d_clear_bad(p4d) do { } while (0)
1422#endif
1423
1424#ifndef __PAGETABLE_PUD_FOLDED
1425void pud_clear_bad(pud_t *);
1426#else
1427#define pud_clear_bad(p4d) do { } while (0)
1428#endif
1429
1430void pmd_clear_bad(pmd_t *);
1431
1432static inline int pgd_none_or_clear_bad(pgd_t *pgd)
1433{
1434 if (pgd_none(*pgd))
1435 return 1;
1436 if (unlikely(pgd_bad(*pgd))) {
1437 pgd_clear_bad(pgd);
1438 return 1;
1439 }
1440 return 0;
1441}
1442
1443static inline int p4d_none_or_clear_bad(p4d_t *p4d)
1444{
1445 if (p4d_none(*p4d))
1446 return 1;
1447 if (unlikely(p4d_bad(*p4d))) {
1448 p4d_clear_bad(p4d);
1449 return 1;
1450 }
1451 return 0;
1452}
1453
1454static inline int pud_none_or_clear_bad(pud_t *pud)
1455{
1456 if (pud_none(*pud))
1457 return 1;
1458 if (unlikely(pud_bad(*pud))) {
1459 pud_clear_bad(pud);
1460 return 1;
1461 }
1462 return 0;
1463}
1464
1465static inline int pmd_none_or_clear_bad(pmd_t *pmd)
1466{
1467 if (pmd_none(*pmd))
1468 return 1;
1469 if (unlikely(pmd_bad(*pmd))) {
1470 pmd_clear_bad(pmd);
1471 return 1;
1472 }
1473 return 0;
1474}
1475
1476static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
1477 unsigned long addr,
1478 pte_t *ptep)
1479{
1480 /*
1481 * Get the current pte state, but zero it out to make it
1482 * non-present, preventing the hardware from asynchronously
1483 * updating it.
1484 */
1485 return ptep_get_and_clear(vma->vm_mm, addr, ptep);
1486}
1487
1488static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
1489 unsigned long addr,
1490 pte_t *ptep, pte_t pte)
1491{
1492 /*
1493 * The pte is non-present, so there's no hardware state to
1494 * preserve.
1495 */
1496 set_pte_at(vma->vm_mm, addr, ptep, pte);
1497}
1498
1499#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1500/*
1501 * Start a pte protection read-modify-write transaction, which
1502 * protects against asynchronous hardware modifications to the pte.
1503 * The intention is not to prevent the hardware from making pte
1504 * updates, but to prevent any updates it may make from being lost.
1505 *
1506 * This does not protect against other software modifications of the
1507 * pte; the appropriate pte lock must be held over the transaction.
1508 *
1509 * Note that this interface is intended to be batchable, meaning that
1510 * ptep_modify_prot_commit may not actually update the pte, but merely
1511 * queue the update to be done at some later time. The update must be
1512 * actually committed before the pte lock is released, however.
1513 */
1514static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
1515 unsigned long addr,
1516 pte_t *ptep)
1517{
1518 return __ptep_modify_prot_start(vma, addr, ptep);
1519}
1520
1521/*
1522 * Commit an update to a pte, leaving any hardware-controlled bits in
1523 * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
1524 * additionally have young and/or dirty bits set where previously they were not,
1525 * so the updated pte may have these additional changes.
1526 */
1527static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
1528 unsigned long addr,
1529 pte_t *ptep, pte_t old_pte, pte_t pte)
1530{
1531 __ptep_modify_prot_commit(vma, addr, ptep, pte);
1532}
1533#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
1534
1535/**
1536 * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
1537 * over a batch of ptes, which protects against asynchronous hardware
1538 * modifications to the ptes. The intention is not to prevent the hardware from
1539 * making pte updates, but to prevent any updates it may make from being lost.
1540 * Please see the comment above ptep_modify_prot_start() for full description.
1541 *
1542 * @vma: The virtual memory area the pages are mapped into.
1543 * @addr: Address the first page is mapped at.
1544 * @ptep: Page table pointer for the first entry.
1545 * @nr: Number of entries.
1546 *
1547 * May be overridden by the architecture; otherwise, implemented as a simple
1548 * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
1549 * in the batch.
1550 *
1551 * Note that PTE bits in the PTE batch besides the PFN can differ.
1552 *
1553 * Context: The caller holds the page table lock. The PTEs map consecutive
1554 * pages that belong to the same folio. All other PTE bits must be identical for
1555 * all PTEs in the batch except for young and dirty bits. The PTEs are all in
1556 * the same PMD.
1557 */
1558#ifndef modify_prot_start_ptes
1559static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
1560 unsigned long addr, pte_t *ptep, unsigned int nr)
1561{
1562 pte_t pte, tmp_pte;
1563
1564 pte = ptep_modify_prot_start(vma, addr, ptep);
1565 while (--nr) {
1566 ptep++;
1567 addr += PAGE_SIZE;
1568 tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
1569 if (pte_dirty(tmp_pte))
1570 pte = pte_mkdirty(pte);
1571 if (pte_young(tmp_pte))
1572 pte = pte_mkyoung(pte);
1573 }
1574 return pte;
1575}
1576#endif
1577
1578/**
1579 * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
1580 * hardware-controlled bits in the PTE unmodified.
1581 *
1582 * @vma: The virtual memory area the pages are mapped into.
1583 * @addr: Address the first page is mapped at.
1584 * @ptep: Page table pointer for the first entry.
1585 * @old_pte: Old page table entry (for the first entry) which is now cleared.
1586 * @pte: New page table entry to be set.
1587 * @nr: Number of entries.
1588 *
1589 * May be overridden by the architecture; otherwise, implemented as a simple
1590 * loop over ptep_modify_prot_commit().
1591 *
1592 * Context: The caller holds the page table lock. The PTEs are all in the same
1593 * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
1594 * ptep_modify_prot_start() may additionally have young and/or dirty bits set
1595 * where previously they were not, so the updated ptes may have these
1596 * additional changes.
1597 */
1598#ifndef modify_prot_commit_ptes
1599static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
1600 pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
1601{
1602 int i;
1603
1604 for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
1605 ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
1606
1607 /* Advance PFN only, set same prot */
1608 old_pte = pte_next_pfn(old_pte);
1609 pte = pte_next_pfn(pte);
1610 }
1611}
1612#endif
1613
1614/*
1615 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
1616 * and let generic vmalloc, ioremap and page table update code know when
1617 * arch_sync_kernel_mappings() needs to be called.
1618 */
1619#ifndef ARCH_PAGE_TABLE_SYNC_MASK
1620#define ARCH_PAGE_TABLE_SYNC_MASK 0
1621#endif
1622
1623/*
1624 * There is no default implementation for arch_sync_kernel_mappings(). It is
1625 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
1626 * is 0.
1627 */
1628void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
1629
1630#endif /* CONFIG_MMU */
1631
1632/*
1633 * No-op macros that just return the current protection value. Defined here
1634 * because these macros can be used even if CONFIG_MMU is not defined.
1635 */
1636
1637#ifndef pgprot_nx
1638#define pgprot_nx(prot) (prot)
1639#endif
1640
1641#ifndef pgprot_noncached
1642#define pgprot_noncached(prot) (prot)
1643#endif
1644
1645#ifndef pgprot_writecombine
1646#define pgprot_writecombine pgprot_noncached
1647#endif
1648
1649#ifndef pgprot_writethrough
1650#define pgprot_writethrough pgprot_noncached
1651#endif
1652
1653#ifndef pgprot_device
1654#define pgprot_device pgprot_noncached
1655#endif
1656
1657#ifndef pgprot_mhp
1658#define pgprot_mhp(prot) (prot)
1659#endif
1660
1661#ifdef CONFIG_MMU
1662#ifndef pgprot_modify
1663#define pgprot_modify pgprot_modify
1664static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
1665{
1666 if (pgprot_val(oldprot) == pgprot_val(pgprot_noncached(oldprot)))
1667 newprot = pgprot_noncached(newprot);
1668 if (pgprot_val(oldprot) == pgprot_val(pgprot_writecombine(oldprot)))
1669 newprot = pgprot_writecombine(newprot);
1670 if (pgprot_val(oldprot) == pgprot_val(pgprot_device(oldprot)))
1671 newprot = pgprot_device(newprot);
1672 return newprot;
1673}
1674#endif
1675#endif /* CONFIG_MMU */
1676
1677#ifndef pgprot_encrypted
1678#define pgprot_encrypted(prot) (prot)
1679#endif
1680
1681#ifndef pgprot_decrypted
1682#define pgprot_decrypted(prot) (prot)
1683#endif
1684
1685/*
1686 * A facility to provide batching of the reload of page tables and
1687 * other process state with the actual context switch code for
1688 * paravirtualized guests. By convention, only one of the batched
1689 * update (lazy) modes (CPU, MMU) should be active at any given time,
1690 * entry should never be nested, and entry and exits should always be
1691 * paired. This is for sanity of maintaining and reasoning about the
1692 * kernel code. In this case, the exit (end of the context switch) is
1693 * in architecture-specific code, and so doesn't need a generic
1694 * definition.
1695 */
1696#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
1697#define arch_start_context_switch(prev) do {} while (0)
1698#endif
1699
1700/*
1701 * Some platforms can customize the PTE soft-dirty bit making it unavailable
1702 * even if the architecture provides the resource.
1703 * Adding this API allows architectures to add their own checks for the
1704 * devices on which the kernel is running.
1705 * Note: When overriding it, please make sure the CONFIG_MEM_SOFT_DIRTY
1706 * is part of this macro.
1707 */
1708#ifndef pgtable_supports_soft_dirty
1709#define pgtable_supports_soft_dirty() IS_ENABLED(CONFIG_MEM_SOFT_DIRTY)
1710#endif
1711
1712#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1713#ifndef CONFIG_ARCH_ENABLE_THP_MIGRATION
1714static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1715{
1716 return pmd;
1717}
1718
1719static inline int pmd_swp_soft_dirty(pmd_t pmd)
1720{
1721 return 0;
1722}
1723
1724static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1725{
1726 return pmd;
1727}
1728#endif
1729#else /* !CONFIG_HAVE_ARCH_SOFT_DIRTY */
1730static inline int pte_soft_dirty(pte_t pte)
1731{
1732 return 0;
1733}
1734
1735static inline int pmd_soft_dirty(pmd_t pmd)
1736{
1737 return 0;
1738}
1739
1740static inline pte_t pte_mksoft_dirty(pte_t pte)
1741{
1742 return pte;
1743}
1744
1745static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
1746{
1747 return pmd;
1748}
1749
1750static inline pte_t pte_clear_soft_dirty(pte_t pte)
1751{
1752 return pte;
1753}
1754
1755static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
1756{
1757 return pmd;
1758}
1759
1760static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1761{
1762 return pte;
1763}
1764
1765static inline int pte_swp_soft_dirty(pte_t pte)
1766{
1767 return 0;
1768}
1769
1770static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1771{
1772 return pte;
1773}
1774
1775static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1776{
1777 return pmd;
1778}
1779
1780static inline int pmd_swp_soft_dirty(pmd_t pmd)
1781{
1782 return 0;
1783}
1784
1785static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1786{
1787 return pmd;
1788}
1789#endif
1790
1791#ifndef __HAVE_PFNMAP_TRACKING
1792/*
1793 * Interfaces that can be used by architecture code to keep track of
1794 * memory type of pfn mappings specified by the remap_pfn_range,
1795 * vmf_insert_pfn.
1796 */
1797
1798static inline int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1799 pgprot_t *prot)
1800{
1801 return 0;
1802}
1803
1804static inline int pfnmap_track(unsigned long pfn, unsigned long size,
1805 pgprot_t *prot)
1806{
1807 return 0;
1808}
1809
1810static inline void pfnmap_untrack(unsigned long pfn, unsigned long size)
1811{
1812}
1813#else
1814/**
1815 * pfnmap_setup_cachemode - setup the cachemode in the pgprot for a pfn range
1816 * @pfn: the start of the pfn range
1817 * @size: the size of the pfn range in bytes
1818 * @prot: the pgprot to modify
1819 *
1820 * Lookup the cachemode for the pfn range starting at @pfn with the size
1821 * @size and store it in @prot, leaving other data in @prot unchanged.
1822 *
1823 * This allows for a hardware implementation to have fine-grained control of
1824 * memory cache behavior at page level granularity. Without a hardware
1825 * implementation, this function does nothing.
1826 *
1827 * Currently there is only one implementation for this - x86 Page Attribute
1828 * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1829 *
1830 * This function can fail if the pfn range spans pfns that require differing
1831 * cachemodes. If the pfn range was previously verified to have a single
1832 * cachemode, it is sufficient to query only a single pfn. The assumption is
1833 * that this is the case for drivers using the vmf_insert_pfn*() interface.
1834 *
1835 * Returns 0 on success and -EINVAL on error.
1836 */
1837int pfnmap_setup_cachemode(unsigned long pfn, unsigned long size,
1838 pgprot_t *prot);
1839
1840/**
1841 * pfnmap_track - track a pfn range
1842 * @pfn: the start of the pfn range
1843 * @size: the size of the pfn range in bytes
1844 * @prot: the pgprot to track
1845 *
1846 * Requested the pfn range to be 'tracked' by a hardware implementation and
1847 * setup the cachemode in @prot similar to pfnmap_setup_cachemode().
1848 *
1849 * This allows for fine-grained control of memory cache behaviour at page
1850 * level granularity. Tracking memory this way is persisted across VMA splits
1851 * (VMA merging does not apply for VM_PFNMAP).
1852 *
1853 * Currently, there is only one implementation for this - x86 Page Attribute
1854 * Table (PAT). See Documentation/arch/x86/pat.rst for more details.
1855 *
1856 * Returns 0 on success and -EINVAL on error.
1857 */
1858int pfnmap_track(unsigned long pfn, unsigned long size, pgprot_t *prot);
1859
1860/**
1861 * pfnmap_untrack - untrack a pfn range
1862 * @pfn: the start of the pfn range
1863 * @size: the size of the pfn range in bytes
1864 *
1865 * Untrack a pfn range previously tracked through pfnmap_track().
1866 */
1867void pfnmap_untrack(unsigned long pfn, unsigned long size);
1868#endif
1869
1870/**
1871 * pfnmap_setup_cachemode_pfn - setup the cachemode in the pgprot for a pfn
1872 * @pfn: the pfn
1873 * @prot: the pgprot to modify
1874 *
1875 * Lookup the cachemode for @pfn and store it in @prot, leaving other
1876 * data in @prot unchanged.
1877 *
1878 * See pfnmap_setup_cachemode() for details.
1879 */
1880static inline void pfnmap_setup_cachemode_pfn(unsigned long pfn, pgprot_t *prot)
1881{
1882 pfnmap_setup_cachemode(pfn, PAGE_SIZE, prot);
1883}
1884
1885#ifdef CONFIG_MMU
1886#ifdef __HAVE_COLOR_ZERO_PAGE
1887static inline int is_zero_pfn(unsigned long pfn)
1888{
1889 extern unsigned long zero_pfn;
1890 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
1891 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
1892}
1893
1894#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
1895
1896#else
1897static inline int is_zero_pfn(unsigned long pfn)
1898{
1899 extern unsigned long zero_pfn;
1900 return pfn == zero_pfn;
1901}
1902
1903static inline unsigned long my_zero_pfn(unsigned long addr)
1904{
1905 extern unsigned long zero_pfn;
1906 return zero_pfn;
1907}
1908#endif
1909#else
1910static inline int is_zero_pfn(unsigned long pfn)
1911{
1912 return 0;
1913}
1914
1915static inline unsigned long my_zero_pfn(unsigned long addr)
1916{
1917 return 0;
1918}
1919#endif /* CONFIG_MMU */
1920
1921#ifdef CONFIG_MMU
1922
1923#ifndef CONFIG_TRANSPARENT_HUGEPAGE
1924static inline int pmd_trans_huge(pmd_t pmd)
1925{
1926 return 0;
1927}
1928#ifndef pmd_write
1929static inline int pmd_write(pmd_t pmd)
1930{
1931 BUG();
1932 return 0;
1933}
1934#endif /* pmd_write */
1935#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1936
1937#ifndef pud_write
1938static inline int pud_write(pud_t pud)
1939{
1940 BUG();
1941 return 0;
1942}
1943#endif /* pud_write */
1944
1945#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
1946 !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1947static inline int pud_trans_huge(pud_t pud)
1948{
1949 return 0;
1950}
1951#endif
1952
1953static inline int pud_trans_unstable(pud_t *pud)
1954{
1955#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
1956 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
1957 pud_t pudval = READ_ONCE(*pud);
1958
1959 if (pud_none(pudval) || pud_trans_huge(pudval))
1960 return 1;
1961 if (unlikely(pud_bad(pudval))) {
1962 pud_clear_bad(pud);
1963 return 1;
1964 }
1965#endif
1966 return 0;
1967}
1968
1969#ifndef CONFIG_NUMA_BALANCING
1970/*
1971 * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
1972 * perfectly valid to indicate "no" in that case, which is why our default
1973 * implementation defaults to "always no".
1974 *
1975 * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
1976 * page protection due to NUMA hinting. NUMA hinting faults only apply in
1977 * accessible VMAs.
1978 *
1979 * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
1980 * looking at the VMA accessibility is sufficient.
1981 */
1982static inline int pte_protnone(pte_t pte)
1983{
1984 return 0;
1985}
1986
1987static inline int pmd_protnone(pmd_t pmd)
1988{
1989 return 0;
1990}
1991#endif /* CONFIG_NUMA_BALANCING */
1992
1993#endif /* CONFIG_MMU */
1994
1995#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
1996
1997#ifndef __PAGETABLE_P4D_FOLDED
1998int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot);
1999void p4d_clear_huge(p4d_t *p4d);
2000#else
2001static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
2002{
2003 return 0;
2004}
2005static inline void p4d_clear_huge(p4d_t *p4d) { }
2006#endif /* !__PAGETABLE_P4D_FOLDED */
2007
2008int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
2009int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
2010int pud_clear_huge(pud_t *pud);
2011int pmd_clear_huge(pmd_t *pmd);
2012int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
2013int pud_free_pmd_page(pud_t *pud, unsigned long addr);
2014int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
2015#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
2016static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
2017{
2018 return 0;
2019}
2020static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
2021{
2022 return 0;
2023}
2024static inline int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
2025{
2026 return 0;
2027}
2028static inline void p4d_clear_huge(p4d_t *p4d) { }
2029static inline int pud_clear_huge(pud_t *pud)
2030{
2031 return 0;
2032}
2033static inline int pmd_clear_huge(pmd_t *pmd)
2034{
2035 return 0;
2036}
2037static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
2038{
2039 return 0;
2040}
2041static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
2042{
2043 return 0;
2044}
2045static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
2046{
2047 return 0;
2048}
2049#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
2050
2051#ifndef __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
2052#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2053/*
2054 * ARCHes with special requirements for evicting THP backing TLB entries can
2055 * implement this. Otherwise also, it can help optimize normal TLB flush in
2056 * THP regime. Stock flush_tlb_range() typically has optimization to nuke the
2057 * entire TLB if flush span is greater than a threshold, which will
2058 * likely be true for a single huge page. Thus a single THP flush will
2059 * invalidate the entire TLB which is not desirable.
2060 * e.g. see arch/arc: flush_pmd_tlb_range
2061 */
2062#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
2063#define flush_pud_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
2064#else
2065#define flush_pmd_tlb_range(vma, addr, end) BUILD_BUG()
2066#define flush_pud_tlb_range(vma, addr, end) BUILD_BUG()
2067#endif
2068#endif
2069
2070struct file;
2071int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
2072 unsigned long size, pgprot_t *vma_prot);
2073
2074#ifndef CONFIG_X86_ESPFIX64
2075static inline void init_espfix_bsp(void) { }
2076#endif
2077
2078extern void __init pgtable_cache_init(void);
2079
2080#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
2081static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
2082{
2083 return true;
2084}
2085
2086static inline bool arch_has_pfn_modify_check(void)
2087{
2088 return false;
2089}
2090#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
2091
2092/*
2093 * Architecture PAGE_KERNEL_* fallbacks
2094 *
2095 * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
2096 * because they really don't support them, or the port needs to be updated to
2097 * reflect the required functionality. Below are a set of relatively safe
2098 * fallbacks, as best effort, which we can count on in lieu of the architectures
2099 * not defining them on their own yet.
2100 */
2101
2102#ifndef PAGE_KERNEL_RO
2103# define PAGE_KERNEL_RO PAGE_KERNEL
2104#endif
2105
2106#ifndef PAGE_KERNEL_EXEC
2107# define PAGE_KERNEL_EXEC PAGE_KERNEL
2108#endif
2109
2110/*
2111 * Page Table Modification bits for pgtbl_mod_mask.
2112 *
2113 * These are used by the p?d_alloc_track*() and p*d_populate_kernel()
2114 * functions in the generic vmalloc, ioremap and page table update code
2115 * to track at which page-table levels entries have been modified.
2116 * Based on that the code can better decide when page table changes need
2117 * to be synchronized to other page-tables in the system.
2118 */
2119#define __PGTBL_PGD_MODIFIED 0
2120#define __PGTBL_P4D_MODIFIED 1
2121#define __PGTBL_PUD_MODIFIED 2
2122#define __PGTBL_PMD_MODIFIED 3
2123#define __PGTBL_PTE_MODIFIED 4
2124
2125#define PGTBL_PGD_MODIFIED BIT(__PGTBL_PGD_MODIFIED)
2126#define PGTBL_P4D_MODIFIED BIT(__PGTBL_P4D_MODIFIED)
2127#define PGTBL_PUD_MODIFIED BIT(__PGTBL_PUD_MODIFIED)
2128#define PGTBL_PMD_MODIFIED BIT(__PGTBL_PMD_MODIFIED)
2129#define PGTBL_PTE_MODIFIED BIT(__PGTBL_PTE_MODIFIED)
2130
2131/* Page-Table Modification Mask */
2132typedef unsigned int pgtbl_mod_mask;
2133
2134enum pgtable_level {
2135 PGTABLE_LEVEL_PTE = 0,
2136 PGTABLE_LEVEL_PMD,
2137 PGTABLE_LEVEL_PUD,
2138 PGTABLE_LEVEL_P4D,
2139 PGTABLE_LEVEL_PGD,
2140};
2141
2142static inline const char *pgtable_level_to_str(enum pgtable_level level)
2143{
2144 switch (level) {
2145 case PGTABLE_LEVEL_PTE:
2146 return "pte";
2147 case PGTABLE_LEVEL_PMD:
2148 return "pmd";
2149 case PGTABLE_LEVEL_PUD:
2150 return "pud";
2151 case PGTABLE_LEVEL_P4D:
2152 return "p4d";
2153 case PGTABLE_LEVEL_PGD:
2154 return "pgd";
2155 default:
2156 return "unknown";
2157 }
2158}
2159
2160#endif /* !__ASSEMBLY__ */
2161
2162#if !defined(MAX_POSSIBLE_PHYSMEM_BITS) && !defined(CONFIG_64BIT)
2163#ifdef CONFIG_PHYS_ADDR_T_64BIT
2164/*
2165 * ZSMALLOC needs to know the highest PFN on 32-bit architectures
2166 * with physical address space extension, but falls back to
2167 * BITS_PER_LONG otherwise.
2168 */
2169#error Missing MAX_POSSIBLE_PHYSMEM_BITS definition
2170#else
2171#define MAX_POSSIBLE_PHYSMEM_BITS 32
2172#endif
2173#endif
2174
2175#ifndef has_transparent_hugepage
2176#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
2177#endif
2178
2179#ifndef has_transparent_pud_hugepage
2180#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
2181#endif
2182/*
2183 * On some architectures it depends on the mm if the p4d/pud or pmd
2184 * layer of the page table hierarchy is folded or not.
2185 */
2186#ifndef mm_p4d_folded
2187#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
2188#endif
2189
2190#ifndef mm_pud_folded
2191#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
2192#endif
2193
2194#ifndef mm_pmd_folded
2195#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
2196#endif
2197
2198#ifndef p4d_offset_lockless
2199#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address)
2200#endif
2201#ifndef pud_offset_lockless
2202#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address)
2203#endif
2204#ifndef pmd_offset_lockless
2205#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address)
2206#endif
2207
2208/*
2209 * pXd_leaf() is the API to check whether a pgtable entry is a huge page
2210 * mapping. It should work globally across all archs, without any
2211 * dependency on CONFIG_* options. For architectures that do not support
2212 * huge mappings on specific levels, below fallbacks will be used.
2213 *
2214 * A leaf pgtable entry should always imply the following:
2215 *
2216 * - It is a "present" entry. IOW, before using this API, please check it
2217 * with pXd_present() first. NOTE: it may not always mean the "present
2218 * bit" is set. For example, PROT_NONE entries are always "present".
2219 *
2220 * - It should _never_ be a swap entry of any type. Above "present" check
2221 * should have guarded this, but let's be crystal clear on this.
2222 *
2223 * - It should contain a huge PFN, which points to a huge page larger than
2224 * PAGE_SIZE of the platform. The PFN format isn't important here.
2225 *
2226 * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
2227 * or hugetlb mappings).
2228 */
2229#ifndef pgd_leaf
2230#define pgd_leaf(x) false
2231#endif
2232#ifndef p4d_leaf
2233#define p4d_leaf(x) false
2234#endif
2235#ifndef pud_leaf
2236#define pud_leaf(x) false
2237#endif
2238#ifndef pmd_leaf
2239#define pmd_leaf(x) false
2240#endif
2241
2242#ifndef pgd_leaf_size
2243#define pgd_leaf_size(x) (1ULL << PGDIR_SHIFT)
2244#endif
2245#ifndef p4d_leaf_size
2246#define p4d_leaf_size(x) P4D_SIZE
2247#endif
2248#ifndef pud_leaf_size
2249#define pud_leaf_size(x) PUD_SIZE
2250#endif
2251#ifndef pmd_leaf_size
2252#define pmd_leaf_size(x) PMD_SIZE
2253#endif
2254#ifndef __pte_leaf_size
2255#ifndef pte_leaf_size
2256#define pte_leaf_size(x) PAGE_SIZE
2257#endif
2258#define __pte_leaf_size(x,y) pte_leaf_size(y)
2259#endif
2260
2261/*
2262 * We always define pmd_pfn for all archs as it's used in lots of generic
2263 * code. Now it happens too for pud_pfn (and can happen for larger
2264 * mappings too in the future; we're not there yet). Instead of defining
2265 * it for all archs (like pmd_pfn), provide a fallback.
2266 *
2267 * Note that returning 0 here means any arch that didn't define this can
2268 * get severely wrong when it hits a real pud leaf. It's arch's
2269 * responsibility to properly define it when a huge pud is possible.
2270 */
2271#ifndef pud_pfn
2272#define pud_pfn(x) 0
2273#endif
2274
2275/*
2276 * Some architectures have MMUs that are configurable or selectable at boot
2277 * time. These lead to variable PTRS_PER_x. For statically allocated arrays it
2278 * helps to have a static maximum value.
2279 */
2280
2281#ifndef MAX_PTRS_PER_PTE
2282#define MAX_PTRS_PER_PTE PTRS_PER_PTE
2283#endif
2284
2285#ifndef MAX_PTRS_PER_PMD
2286#define MAX_PTRS_PER_PMD PTRS_PER_PMD
2287#endif
2288
2289#ifndef MAX_PTRS_PER_PUD
2290#define MAX_PTRS_PER_PUD PTRS_PER_PUD
2291#endif
2292
2293#ifndef MAX_PTRS_PER_P4D
2294#define MAX_PTRS_PER_P4D PTRS_PER_P4D
2295#endif
2296
2297#ifndef pte_pgprot
2298#define pte_pgprot(x) ((pgprot_t) {0})
2299#endif
2300
2301#ifndef pmd_pgprot
2302#define pmd_pgprot(x) ((pgprot_t) {0})
2303#endif
2304
2305#ifndef pud_pgprot
2306#define pud_pgprot(x) ((pgprot_t) {0})
2307#endif
2308
2309/* description of effects of mapping type and prot in current implementation.
2310 * this is due to the limited x86 page protection hardware. The expected
2311 * behavior is in parens:
2312 *
2313 * map_type prot
2314 * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
2315 * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
2316 * w: (no) no w: (no) no w: (yes) yes w: (no) no
2317 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
2318 *
2319 * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
2320 * w: (no) no w: (no) no w: (copy) copy w: (no) no
2321 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
2322 *
2323 * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
2324 * MAP_PRIVATE (with Enhanced PAN supported):
2325 * r: (no) no
2326 * w: (no) no
2327 * x: (yes) yes
2328 */
2329#define DECLARE_VM_GET_PAGE_PROT \
2330pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \
2331{ \
2332 return protection_map[vm_flags & \
2333 (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
2334} \
2335EXPORT_SYMBOL(vm_get_page_prot);
2336
2337#endif /* _LINUX_PGTABLE_H */