Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm: constify highmem related functions for improved const-correctness

Lots of functions in mm/highmem.c do not write to the given pointers and
do not call functions that take non-const pointers and can therefore be
constified.

This includes functions like kunmap() which might be implemented in a way
that writes to the pointer (e.g. to update reference counters or mapping
fields), but currently are not.

kmap() on the other hand cannot be made const because it calls
set_page_address() which is non-const in some
architectures/configurations.

[akpm@linux-foundation.org: "fix" folio_page() build failure]
Link: https://lkml.kernel.org/r/20250901205021.3573313-13-max.kellermann@ionos.com
Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christian Zankel <chris@zankel.net>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Helge Deller <deller@gmx.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Bottomley <james.bottomley@HansenPartnership.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jocelyn Falempe <jfalempe@redhat.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: "Nysal Jan K.A" <nysal@linux.ibm.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russel King <linux@armlinux.org.uk>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Thomas Huth <thuth@redhat.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Wei Xu <weixugc@google.com>
Cc: Yuanchu Xie <yuanchu@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Max Kellermann and committed by
Andrew Morton
a847b170 da004558

+33 -33
+3 -3
arch/arm/include/asm/highmem.h
··· 46 46 #endif 47 47 48 48 #ifdef ARCH_NEEDS_KMAP_HIGH_GET 49 - extern void *kmap_high_get(struct page *page); 49 + extern void *kmap_high_get(const struct page *page); 50 50 51 - static inline void *arch_kmap_local_high_get(struct page *page) 51 + static inline void *arch_kmap_local_high_get(const struct page *page) 52 52 { 53 53 if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt()) 54 54 return NULL; ··· 57 57 #define arch_kmap_local_high_get arch_kmap_local_high_get 58 58 59 59 #else /* ARCH_NEEDS_KMAP_HIGH_GET */ 60 - static inline void *kmap_high_get(struct page *page) 60 + static inline void *kmap_high_get(const struct page *page) 61 61 { 62 62 return NULL; 63 63 }
+1 -1
arch/xtensa/include/asm/highmem.h
··· 29 29 30 30 #if DCACHE_WAY_SIZE > PAGE_SIZE 31 31 #define get_pkmap_color get_pkmap_color 32 - static inline int get_pkmap_color(struct page *page) 32 + static inline int get_pkmap_color(const struct page *page) 33 33 { 34 34 return DCACHE_ALIAS(page_to_phys(page)); 35 35 }
+18 -18
include/linux/highmem-internal.h
··· 7 7 */ 8 8 #ifdef CONFIG_KMAP_LOCAL 9 9 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot); 10 - void *__kmap_local_page_prot(struct page *page, pgprot_t prot); 10 + void *__kmap_local_page_prot(const struct page *page, pgprot_t prot); 11 11 void kunmap_local_indexed(const void *vaddr); 12 12 void kmap_local_fork(struct task_struct *tsk); 13 13 void __kmap_local_sched_out(void); ··· 33 33 #endif 34 34 35 35 void *kmap_high(struct page *page); 36 - void kunmap_high(struct page *page); 36 + void kunmap_high(const struct page *page); 37 37 void __kmap_flush_unused(void); 38 38 struct page *__kmap_to_page(void *addr); 39 39 ··· 50 50 return addr; 51 51 } 52 52 53 - static inline void kunmap(struct page *page) 53 + static inline void kunmap(const struct page *page) 54 54 { 55 55 might_sleep(); 56 56 if (!PageHighMem(page)) ··· 68 68 __kmap_flush_unused(); 69 69 } 70 70 71 - static inline void *kmap_local_page(struct page *page) 71 + static inline void *kmap_local_page(const struct page *page) 72 72 { 73 73 return __kmap_local_page_prot(page, kmap_prot); 74 74 } 75 75 76 - static inline void *kmap_local_page_try_from_panic(struct page *page) 76 + static inline void *kmap_local_page_try_from_panic(const struct page *page) 77 77 { 78 78 if (!PageHighMem(page)) 79 79 return page_address(page); ··· 81 81 return NULL; 82 82 } 83 83 84 - static inline void *kmap_local_folio(struct folio *folio, size_t offset) 84 + static inline void *kmap_local_folio(const struct folio *folio, size_t offset) 85 85 { 86 - struct page *page = folio_page(folio, offset / PAGE_SIZE); 86 + const struct page *page = folio_page(folio, offset / PAGE_SIZE); 87 87 return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE; 88 88 } 89 89 90 - static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) 90 + static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot) 91 91 { 92 92 return __kmap_local_page_prot(page, prot); 93 93 } ··· 102 102 kunmap_local_indexed(vaddr); 103 103 } 104 104 105 - static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) 105 + static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot) 106 106 { 107 107 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 108 108 migrate_disable(); ··· 113 113 return __kmap_local_page_prot(page, prot); 114 114 } 115 115 116 - static inline void *kmap_atomic(struct page *page) 116 + static inline void *kmap_atomic(const struct page *page) 117 117 { 118 118 return kmap_atomic_prot(page, kmap_prot); 119 119 } ··· 173 173 return page_address(page); 174 174 } 175 175 176 - static inline void kunmap_high(struct page *page) { } 176 + static inline void kunmap_high(const struct page *page) { } 177 177 static inline void kmap_flush_unused(void) { } 178 178 179 - static inline void kunmap(struct page *page) 179 + static inline void kunmap(const struct page *page) 180 180 { 181 181 #ifdef ARCH_HAS_FLUSH_ON_KUNMAP 182 182 kunmap_flush_on_unmap(page_address(page)); 183 183 #endif 184 184 } 185 185 186 - static inline void *kmap_local_page(struct page *page) 186 + static inline void *kmap_local_page(const struct page *page) 187 187 { 188 188 return page_address(page); 189 189 } 190 190 191 - static inline void *kmap_local_page_try_from_panic(struct page *page) 191 + static inline void *kmap_local_page_try_from_panic(const struct page *page) 192 192 { 193 193 return page_address(page); 194 194 } 195 195 196 - static inline void *kmap_local_folio(struct folio *folio, size_t offset) 196 + static inline void *kmap_local_folio(const struct folio *folio, size_t offset) 197 197 { 198 198 return folio_address(folio) + offset; 199 199 } 200 200 201 - static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) 201 + static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot) 202 202 { 203 203 return kmap_local_page(page); 204 204 } ··· 215 215 #endif 216 216 } 217 217 218 - static inline void *kmap_atomic(struct page *page) 218 + static inline void *kmap_atomic(const struct page *page) 219 219 { 220 220 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 221 221 migrate_disable(); ··· 225 225 return page_address(page); 226 226 } 227 227 228 - static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) 228 + static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot) 229 229 { 230 230 return kmap_atomic(page); 231 231 }
+4 -4
include/linux/highmem.h
··· 43 43 * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of 44 44 * pages in the low memory area. 45 45 */ 46 - static inline void kunmap(struct page *page); 46 + static inline void kunmap(const struct page *page); 47 47 48 48 /** 49 49 * kmap_to_page - Get the page for a kmap'ed address ··· 93 93 * disabling migration in order to keep the virtual address stable across 94 94 * preemption. No caller of kmap_local_page() can rely on this side effect. 95 95 */ 96 - static inline void *kmap_local_page(struct page *page); 96 + static inline void *kmap_local_page(const struct page *page); 97 97 98 98 /** 99 99 * kmap_local_folio - Map a page in this folio for temporary usage ··· 129 129 * Context: Can be invoked from any context. 130 130 * Return: The virtual address of @offset. 131 131 */ 132 - static inline void *kmap_local_folio(struct folio *folio, size_t offset); 132 + static inline void *kmap_local_folio(const struct folio *folio, size_t offset); 133 133 134 134 /** 135 135 * kmap_atomic - Atomically map a page for temporary usage - Deprecated! ··· 176 176 * kunmap_atomic(vaddr2); 177 177 * kunmap_atomic(vaddr1); 178 178 */ 179 - static inline void *kmap_atomic(struct page *page); 179 + static inline void *kmap_atomic(const struct page *page); 180 180 181 181 /* Highmem related interfaces for management code */ 182 182 static inline unsigned long nr_free_highpages(void);
+2 -2
include/linux/page-flags.h
··· 316 316 * check that the page number lies within @folio; the caller is presumed 317 317 * to have a reference to the page. 318 318 */ 319 - static inline struct page *folio_page(struct folio *folio, unsigned long n) 319 + static inline struct page *folio_page(const struct folio *folio, unsigned long n) 320 320 { 321 - return &folio->page + n; 321 + return (struct page *)(&folio->page + n); 322 322 } 323 323 324 324 static __always_inline int PageTail(const struct page *page)
+5 -5
mm/highmem.c
··· 61 61 /* 62 62 * Determine color of virtual address where the page should be mapped. 63 63 */ 64 - static inline unsigned int get_pkmap_color(struct page *page) 64 + static inline unsigned int get_pkmap_color(const struct page *page) 65 65 { 66 66 return 0; 67 67 } ··· 334 334 * 335 335 * This can be called from any context. 336 336 */ 337 - void *kmap_high_get(struct page *page) 337 + void *kmap_high_get(const struct page *page) 338 338 { 339 339 unsigned long vaddr, flags; 340 340 ··· 356 356 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called 357 357 * only from user context. 358 358 */ 359 - void kunmap_high(struct page *page) 359 + void kunmap_high(const struct page *page) 360 360 { 361 361 unsigned long vaddr; 362 362 unsigned long nr; ··· 508 508 #endif 509 509 510 510 #ifndef arch_kmap_local_high_get 511 - static inline void *arch_kmap_local_high_get(struct page *page) 511 + static inline void *arch_kmap_local_high_get(const struct page *page) 512 512 { 513 513 return NULL; 514 514 } ··· 572 572 } 573 573 EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot); 574 574 575 - void *__kmap_local_page_prot(struct page *page, pgprot_t prot) 575 + void *__kmap_local_page_prot(const struct page *page, pgprot_t prot) 576 576 { 577 577 void *kmap; 578 578