Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: constify assert/test functions in mm.h

For improved const-correctness.

We select certain assert and test functions which either invoke each
other, functions that are already const-ified, or no further functions.

It is therefore relatively trivial to const-ify them, which provides a
basis for further const-ification further up the call stack.

Link: https://lkml.kernel.org/r/20250901205021.3573313-12-max.kellermann@ionos.com
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christian Zankel <chris@zankel.net>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <james.bottomley@HansenPartnership.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jocelyn Falempe <jfalempe@redhat.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Nysal Jan K.A" <nysal@linux.ibm.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russel King <linux@armlinux.org.uk>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Thomas Huth <thuth@redhat.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Wei Xu <weixugc@google.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Max Kellermann and committed by
Andrew Morton
da004558 f346a947

+20 -20
+20 -20
include/linux/mm.h
··· 719 719 mmap_read_unlock(vmf->vma->vm_mm); 720 720 } 721 721 722 - static inline void assert_fault_locked(struct vm_fault *vmf) 722 + static inline void assert_fault_locked(const struct vm_fault *vmf) 723 723 { 724 724 if (vmf->flags & FAULT_FLAG_VMA_LOCK) 725 725 vma_assert_locked(vmf->vma); ··· 732 732 mmap_read_unlock(vmf->vma->vm_mm); 733 733 } 734 734 735 - static inline void assert_fault_locked(struct vm_fault *vmf) 735 + static inline void assert_fault_locked(const struct vm_fault *vmf) 736 736 { 737 737 mmap_assert_locked(vmf->vma->vm_mm); 738 738 } ··· 875 875 vma->vm_end >= vma->vm_mm->start_stack; 876 876 } 877 877 878 - static inline bool vma_is_temporary_stack(struct vm_area_struct *vma) 878 + static inline bool vma_is_temporary_stack(const struct vm_area_struct *vma) 879 879 { 880 880 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP); 881 881 ··· 889 889 return false; 890 890 } 891 891 892 - static inline bool vma_is_foreign(struct vm_area_struct *vma) 892 + static inline bool vma_is_foreign(const struct vm_area_struct *vma) 893 893 { 894 894 if (!current->mm) 895 895 return true; ··· 900 900 return false; 901 901 } 902 902 903 - static inline bool vma_is_accessible(struct vm_area_struct *vma) 903 + static inline bool vma_is_accessible(const struct vm_area_struct *vma) 904 904 { 905 905 return vma->vm_flags & VM_ACCESS_FLAGS; 906 906 } ··· 911 911 (VM_SHARED | VM_MAYWRITE); 912 912 } 913 913 914 - static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) 914 + static inline bool vma_is_shared_maywrite(const struct vm_area_struct *vma) 915 915 { 916 916 return is_shared_maywrite(vma->vm_flags); 917 917 } ··· 1855 1855 } 1856 1856 1857 1857 #ifdef CONFIG_MMU 1858 - static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1858 + static inline pte_t mk_pte(const struct page *page, pgprot_t pgprot) 1859 1859 { 1860 1860 return pfn_pte(page_to_pfn(page), pgprot); 1861 1861 } ··· 1870 1870 * 1871 1871 * Return: A page table entry suitable for mapping this folio. 1872 1872 */ 1873 - static inline pte_t folio_mk_pte(struct folio *folio, pgprot_t pgprot) 1873 + static inline pte_t folio_mk_pte(const struct folio *folio, pgprot_t pgprot) 1874 1874 { 1875 1875 return pfn_pte(folio_pfn(folio), pgprot); 1876 1876 } ··· 1886 1886 * 1887 1887 * Return: A page table entry suitable for mapping this folio. 1888 1888 */ 1889 - static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot) 1889 + static inline pmd_t folio_mk_pmd(const struct folio *folio, pgprot_t pgprot) 1890 1890 { 1891 1891 return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot)); 1892 1892 } ··· 1902 1902 * 1903 1903 * Return: A page table entry suitable for mapping this folio. 1904 1904 */ 1905 - static inline pud_t folio_mk_pud(struct folio *folio, pgprot_t pgprot) 1905 + static inline pud_t folio_mk_pud(const struct folio *folio, pgprot_t pgprot) 1906 1906 { 1907 1907 return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot)); 1908 1908 } ··· 3520 3520 return mtree_load(&mm->mm_mt, addr); 3521 3521 } 3522 3522 3523 - static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) 3523 + static inline unsigned long stack_guard_start_gap(const struct vm_area_struct *vma) 3524 3524 { 3525 3525 if (vma->vm_flags & VM_GROWSDOWN) 3526 3526 return stack_guard_gap; ··· 3532 3532 return 0; 3533 3533 } 3534 3534 3535 - static inline unsigned long vm_start_gap(struct vm_area_struct *vma) 3535 + static inline unsigned long vm_start_gap(const struct vm_area_struct *vma) 3536 3536 { 3537 3537 unsigned long gap = stack_guard_start_gap(vma); 3538 3538 unsigned long vm_start = vma->vm_start; ··· 3543 3543 return vm_start; 3544 3544 } 3545 3545 3546 - static inline unsigned long vm_end_gap(struct vm_area_struct *vma) 3546 + static inline unsigned long vm_end_gap(const struct vm_area_struct *vma) 3547 3547 { 3548 3548 unsigned long vm_end = vma->vm_end; 3549 3549 ··· 3555 3555 return vm_end; 3556 3556 } 3557 3557 3558 - static inline unsigned long vma_pages(struct vm_area_struct *vma) 3558 + static inline unsigned long vma_pages(const struct vm_area_struct *vma) 3559 3559 { 3560 3560 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 3561 3561 } ··· 3572 3572 return vma; 3573 3573 } 3574 3574 3575 - static inline bool range_in_vma(struct vm_area_struct *vma, 3575 + static inline bool range_in_vma(const struct vm_area_struct *vma, 3576 3576 unsigned long start, unsigned long end) 3577 3577 { 3578 3578 return (vma && vma->vm_start <= start && end <= vma->vm_end); ··· 3688 3688 * Indicates whether GUP can follow a PROT_NONE mapped page, or whether 3689 3689 * a (NUMA hinting) fault is required. 3690 3690 */ 3691 - static inline bool gup_can_follow_protnone(struct vm_area_struct *vma, 3691 + static inline bool gup_can_follow_protnone(const struct vm_area_struct *vma, 3692 3692 unsigned int flags) 3693 3693 { 3694 3694 /* ··· 3818 3818 return static_branch_unlikely(&_debug_guardpage_enabled); 3819 3819 } 3820 3820 3821 - static inline bool page_is_guard(struct page *page) 3821 + static inline bool page_is_guard(const struct page *page) 3822 3822 { 3823 3823 if (!debug_guardpage_enabled()) 3824 3824 return false; ··· 3849 3849 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} 3850 3850 static inline unsigned int debug_guardpage_minorder(void) { return 0; } 3851 3851 static inline bool debug_guardpage_enabled(void) { return false; } 3852 - static inline bool page_is_guard(struct page *page) { return false; } 3852 + static inline bool page_is_guard(const struct page *page) { return false; } 3853 3853 static inline bool set_page_guard(struct zone *zone, struct page *page, 3854 3854 unsigned int order) { return false; } 3855 3855 static inline void clear_page_guard(struct zone *zone, struct page *page, ··· 3931 3931 #endif 3932 3932 3933 3933 #ifdef CONFIG_SPARSEMEM_VMEMMAP 3934 - static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) 3934 + static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap) 3935 3935 { 3936 3936 /* number of pfns from base where pfn_to_page() is valid */ 3937 3937 if (altmap) ··· 3945 3945 altmap->alloc -= nr_pfns; 3946 3946 } 3947 3947 #else 3948 - static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) 3948 + static inline unsigned long vmem_altmap_offset(const struct vmem_altmap *altmap) 3949 3949 { 3950 3950 return 0; 3951 3951 }