Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/ptdump: split note_page() into level specific callbacks

Patch series "mm/ptdump: Drop assumption that pxd_val() is u64", v2.

Last argument passed down in note_page() is u64 assuming pxd_val()
returned value (all page table levels) is 64 bit - which might not be the
case going ahead when D128 page tables is enabled on arm64 platform.
Besides pxd_val() is very platform specific and its type should not be
assumed in generic MM. A similar problem exists for effective_prot(),
although it is restricted to x86 platform.

This series splits note_page() and effective_prot() into individual page
table level specific callbacks which accepts corresponding pxd_t page
table entry as an argument instead and later on all subscribing platforms
could derive pxd_val() from the table entries as required and proceed as
before.

Define ptdesc_t type which describes the basic page table descriptor
layout on arm64 platform. Subsequently all level specific pxxval_t
descriptors are derived from ptdesc_t thus establishing a common original
format, which can also be appropriate for page table entries, masks and
protection values etc which are used at all page table levels.


This patch (of 3):

Last argument passed down in note_page() is u64 assuming pxd_val()
returned value (all page table levels) is 64 bit - which might not be the
case going ahead when D128 page tables is enabled on arm64 platform.
Besides pxd_val() is very platform specific and its type should not be
assumed in generic MM.

Split note_page() into individual page table level specific callbacks
which accepts corresponding pxd_t argument instead and then subscribing
platforms just derive pxd_val() from the entries as required and proceed
as earlier.

Also add a note_page_flush() callback for flushing the last page table
page that was being handled earlier via level = -1.

Link: https://lkml.kernel.org/r/20250407053113.746295-1-anshuman.khandual@arm.com
Link: https://lkml.kernel.org/r/20250407053113.746295-2-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Anshuman Khandual and committed by
Andrew Morton
e064e738 e487a5d5

+266 -24
+14 -2
arch/arm64/include/asm/ptdump.h
··· 59 59 60 60 void ptdump_walk(struct seq_file *s, struct ptdump_info *info); 61 61 void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, 62 - u64 val); 62 + pteval_t val); 63 + void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte); 64 + void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd); 65 + void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud); 66 + void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d); 67 + void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd); 68 + void note_page_flush(struct ptdump_state *st); 63 69 #ifdef CONFIG_PTDUMP_DEBUGFS 64 70 #define EFI_RUNTIME_MAP_END DEFAULT_MAP_WINDOW_64 65 71 void __init ptdump_debugfs_register(struct ptdump_info *info, const char *name); ··· 75 69 #endif /* CONFIG_PTDUMP_DEBUGFS */ 76 70 #else 77 71 static inline void note_page(struct ptdump_state *pt_st, unsigned long addr, 78 - int level, u64 val) { } 72 + int level, pteval_t val) { } 73 + static inline void note_page_pte(struct ptdump_state *st, unsigned long addr, pte_t pte) { } 74 + static inline void note_page_pmd(struct ptdump_state *st, unsigned long addr, pmd_t pmd) { } 75 + static inline void note_page_pud(struct ptdump_state *st, unsigned long addr, pud_t pud) { } 76 + static inline void note_page_p4d(struct ptdump_state *st, unsigned long addr, p4d_t p4d) { } 77 + static inline void note_page_pgd(struct ptdump_state *st, unsigned long addr, pgd_t pgd) { } 78 + static inline void note_page_flush(struct ptdump_state *st) { } 79 79 #endif /* CONFIG_PTDUMP */ 80 80 81 81 #endif /* __ASM_PTDUMP_H */
+45 -3
arch/arm64/mm/ptdump.c
··· 189 189 } 190 190 191 191 void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, 192 - u64 val) 192 + pteval_t val) 193 193 { 194 194 struct ptdump_pg_state *st = container_of(pt_st, struct ptdump_pg_state, ptdump); 195 195 struct ptdump_pg_level *pg_level = st->pg_level; ··· 251 251 252 252 } 253 253 254 + void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) 255 + { 256 + note_page(pt_st, addr, 4, pte_val(pte)); 257 + } 258 + 259 + void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) 260 + { 261 + note_page(pt_st, addr, 3, pmd_val(pmd)); 262 + } 263 + 264 + void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) 265 + { 266 + note_page(pt_st, addr, 2, pud_val(pud)); 267 + } 268 + 269 + void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) 270 + { 271 + note_page(pt_st, addr, 1, p4d_val(p4d)); 272 + } 273 + 274 + void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) 275 + { 276 + note_page(pt_st, addr, 0, pgd_val(pgd)); 277 + } 278 + 279 + void note_page_flush(struct ptdump_state *pt_st) 280 + { 281 + pte_t pte_zero = {0}; 282 + 283 + note_page(pt_st, 0, -1, pte_val(pte_zero)); 284 + } 285 + 254 286 void ptdump_walk(struct seq_file *s, struct ptdump_info *info) 255 287 { 256 288 unsigned long end = ~0UL; ··· 298 266 .pg_level = &kernel_pg_levels[0], 299 267 .level = -1, 300 268 .ptdump = { 301 - .note_page = note_page, 269 + .note_page_pte = note_page_pte, 270 + .note_page_pmd = note_page_pmd, 271 + .note_page_pud = note_page_pud, 272 + .note_page_p4d = note_page_p4d, 273 + .note_page_pgd = note_page_pgd, 274 + .note_page_flush = note_page_flush, 302 275 .range = (struct ptdump_range[]){ 303 276 {info->base_addr, end}, 304 277 {0, 0} ··· 340 303 .level = -1, 341 304 .check_wx = true, 342 305 .ptdump = { 343 - .note_page = note_page, 306 + .note_page_pte = note_page_pte, 307 + .note_page_pmd = note_page_pmd, 308 + .note_page_pud = note_page_pud, 309 + .note_page_p4d = note_page_p4d, 310 + .note_page_pgd = note_page_pgd, 311 + .note_page_flush = note_page_flush, 344 312 .range = (struct ptdump_range[]) { 345 313 {_PAGE_OFFSET(vabits_actual), ~0UL}, 346 314 {0, 0}
+44 -2
arch/powerpc/mm/ptdump/ptdump.c
··· 298 298 #endif 299 299 } 300 300 301 + static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) 302 + { 303 + note_page(pt_st, addr, 4, pte_val(pte)); 304 + } 305 + 306 + static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) 307 + { 308 + note_page(pt_st, addr, 3, pmd_val(pmd)); 309 + } 310 + 311 + static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) 312 + { 313 + note_page(pt_st, addr, 2, pud_val(pud)); 314 + } 315 + 316 + static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) 317 + { 318 + note_page(pt_st, addr, 1, p4d_val(p4d)); 319 + } 320 + 321 + static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) 322 + { 323 + note_page(pt_st, addr, 0, pgd_val(pgd)); 324 + } 325 + 326 + static void note_page_flush(struct ptdump_state *pt_st) 327 + { 328 + pte_t pte_zero = {0}; 329 + 330 + note_page(pt_st, 0, -1, pte_val(pte_zero)); 331 + } 332 + 301 333 static int ptdump_show(struct seq_file *m, void *v) 302 334 { 303 335 struct pg_state st = { ··· 337 305 .marker = address_markers, 338 306 .level = -1, 339 307 .ptdump = { 340 - .note_page = note_page, 308 + .note_page_pte = note_page_pte, 309 + .note_page_pmd = note_page_pmd, 310 + .note_page_pud = note_page_pud, 311 + .note_page_p4d = note_page_p4d, 312 + .note_page_pgd = note_page_pgd, 313 + .note_page_flush = note_page_flush, 341 314 .range = ptdump_range, 342 315 } 343 316 }; ··· 375 338 .level = -1, 376 339 .check_wx = true, 377 340 .ptdump = { 378 - .note_page = note_page, 341 + .note_page_pte = note_page_pte, 342 + .note_page_pmd = note_page_pmd, 343 + .note_page_pud = note_page_pud, 344 + .note_page_p4d = note_page_p4d, 345 + .note_page_pgd = note_page_pgd, 346 + .note_page_flush = note_page_flush, 379 347 .range = ptdump_range, 380 348 } 381 349 };
+44 -2
arch/riscv/mm/ptdump.c
··· 318 318 } 319 319 } 320 320 321 + static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) 322 + { 323 + note_page(pt_st, addr, 4, pte_val(pte)); 324 + } 325 + 326 + static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) 327 + { 328 + note_page(pt_st, addr, 3, pmd_val(pmd)); 329 + } 330 + 331 + static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) 332 + { 333 + note_page(pt_st, addr, 2, pud_val(pud)); 334 + } 335 + 336 + static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) 337 + { 338 + note_page(pt_st, addr, 1, p4d_val(p4d)); 339 + } 340 + 341 + static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) 342 + { 343 + note_page(pt_st, addr, 0, pgd_val(pgd)); 344 + } 345 + 346 + static void note_page_flush(struct ptdump_state *pt_st) 347 + { 348 + pte_t pte_zero = {0}; 349 + 350 + note_page(pt_st, 0, -1, pte_val(pte_zero)); 351 + } 352 + 321 353 static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo) 322 354 { 323 355 struct pg_state st = { ··· 357 325 .marker = pinfo->markers, 358 326 .level = -1, 359 327 .ptdump = { 360 - .note_page = note_page, 328 + .note_page_pte = note_page_pte, 329 + .note_page_pmd = note_page_pmd, 330 + .note_page_pud = note_page_pud, 331 + .note_page_p4d = note_page_p4d, 332 + .note_page_pgd = note_page_pgd, 333 + .note_page_flush = note_page_flush, 361 334 .range = (struct ptdump_range[]) { 362 335 {pinfo->base_addr, pinfo->end}, 363 336 {0, 0} ··· 384 347 .level = -1, 385 348 .check_wx = true, 386 349 .ptdump = { 387 - .note_page = note_page, 350 + .note_page_pte = note_page_pte, 351 + .note_page_pmd = note_page_pmd, 352 + .note_page_pud = note_page_pud, 353 + .note_page_p4d = note_page_p4d, 354 + .note_page_pgd = note_page_pgd, 355 + .note_page_flush = note_page_flush, 388 356 .range = (struct ptdump_range[]) { 389 357 {KERN_VIRT_START, ULONG_MAX}, 390 358 {0, 0}
+44 -2
arch/s390/mm/dump_pagetables.c
··· 147 147 } 148 148 } 149 149 150 + static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) 151 + { 152 + note_page(pt_st, addr, 4, pte_val(pte)); 153 + } 154 + 155 + static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) 156 + { 157 + note_page(pt_st, addr, 3, pmd_val(pmd)); 158 + } 159 + 160 + static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) 161 + { 162 + note_page(pt_st, addr, 2, pud_val(pud)); 163 + } 164 + 165 + static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) 166 + { 167 + note_page(pt_st, addr, 1, p4d_val(p4d)); 168 + } 169 + 170 + static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) 171 + { 172 + note_page(pt_st, addr, 0, pgd_val(pgd)); 173 + } 174 + 175 + static void note_page_flush(struct ptdump_state *pt_st) 176 + { 177 + pte_t pte_zero = {0}; 178 + 179 + note_page(pt_st, 0, -1, pte_val(pte_zero)); 180 + } 181 + 150 182 bool ptdump_check_wx(void) 151 183 { 152 184 struct pg_state st = { 153 185 .ptdump = { 154 - .note_page = note_page, 186 + .note_page_pte = note_page_pte, 187 + .note_page_pmd = note_page_pmd, 188 + .note_page_pud = note_page_pud, 189 + .note_page_p4d = note_page_p4d, 190 + .note_page_pgd = note_page_pgd, 191 + .note_page_flush = note_page_flush, 155 192 .range = (struct ptdump_range[]) { 156 193 {.start = 0, .end = max_addr}, 157 194 {.start = 0, .end = 0}, ··· 227 190 { 228 191 struct pg_state st = { 229 192 .ptdump = { 230 - .note_page = note_page, 193 + .note_page_pte = note_page_pte, 194 + .note_page_pmd = note_page_pmd, 195 + .note_page_pud = note_page_pud, 196 + .note_page_p4d = note_page_p4d, 197 + .note_page_pgd = note_page_pgd, 198 + .note_page_flush = note_page_flush, 231 199 .range = (struct ptdump_range[]) { 232 200 {.start = 0, .end = max_addr}, 233 201 {.start = 0, .end = 0},
+38 -1
arch/x86/mm/dump_pagetables.c
··· 362 362 } 363 363 } 364 364 365 + static void note_page_pte(struct ptdump_state *pt_st, unsigned long addr, pte_t pte) 366 + { 367 + note_page(pt_st, addr, 4, pte_val(pte)); 368 + } 369 + 370 + static void note_page_pmd(struct ptdump_state *pt_st, unsigned long addr, pmd_t pmd) 371 + { 372 + note_page(pt_st, addr, 3, pmd_val(pmd)); 373 + } 374 + 375 + static void note_page_pud(struct ptdump_state *pt_st, unsigned long addr, pud_t pud) 376 + { 377 + note_page(pt_st, addr, 2, pud_val(pud)); 378 + } 379 + 380 + static void note_page_p4d(struct ptdump_state *pt_st, unsigned long addr, p4d_t p4d) 381 + { 382 + note_page(pt_st, addr, 1, p4d_val(p4d)); 383 + } 384 + 385 + static void note_page_pgd(struct ptdump_state *pt_st, unsigned long addr, pgd_t pgd) 386 + { 387 + note_page(pt_st, addr, 0, pgd_val(pgd)); 388 + } 389 + 390 + static void note_page_flush(struct ptdump_state *pt_st) 391 + { 392 + pte_t pte_zero = {0}; 393 + 394 + note_page(pt_st, 0, -1, pte_val(pte_zero)); 395 + } 396 + 365 397 bool ptdump_walk_pgd_level_core(struct seq_file *m, 366 398 struct mm_struct *mm, pgd_t *pgd, 367 399 bool checkwx, bool dmesg) ··· 410 378 411 379 struct pg_state st = { 412 380 .ptdump = { 413 - .note_page = note_page, 381 + .note_page_pte = note_page_pte, 382 + .note_page_pmd = note_page_pmd, 383 + .note_page_pud = note_page_pud, 384 + .note_page_p4d = note_page_p4d, 385 + .note_page_pgd = note_page_pgd, 386 + .note_page_flush = note_page_flush, 414 387 .effective_prot = effective_prot, 415 388 .range = ptdump_ranges 416 389 },
+6 -3
include/linux/ptdump.h
··· 11 11 }; 12 12 13 13 struct ptdump_state { 14 - /* level is 0:PGD to 4:PTE, or -1 if unknown */ 15 - void (*note_page)(struct ptdump_state *st, unsigned long addr, 16 - int level, u64 val); 14 + void (*note_page_pte)(struct ptdump_state *st, unsigned long addr, pte_t pte); 15 + void (*note_page_pmd)(struct ptdump_state *st, unsigned long addr, pmd_t pmd); 16 + void (*note_page_pud)(struct ptdump_state *st, unsigned long addr, pud_t pud); 17 + void (*note_page_p4d)(struct ptdump_state *st, unsigned long addr, p4d_t p4d); 18 + void (*note_page_pgd)(struct ptdump_state *st, unsigned long addr, pgd_t pgd); 19 + void (*note_page_flush)(struct ptdump_state *st); 17 20 void (*effective_prot)(struct ptdump_state *st, int level, u64 val); 18 21 const struct ptdump_range *range; 19 22 };
+31 -9
mm/ptdump.c
··· 18 18 { 19 19 struct ptdump_state *st = walk->private; 20 20 21 - st->note_page(st, addr, 4, pte_val(kasan_early_shadow_pte[0])); 21 + st->note_page_pte(st, addr, kasan_early_shadow_pte[0]); 22 22 23 23 walk->action = ACTION_CONTINUE; 24 24 ··· 42 42 st->effective_prot(st, 0, pgd_val(val)); 43 43 44 44 if (pgd_leaf(val)) { 45 - st->note_page(st, addr, 0, pgd_val(val)); 45 + st->note_page_pgd(st, addr, val); 46 46 walk->action = ACTION_CONTINUE; 47 47 } 48 48 ··· 65 65 st->effective_prot(st, 1, p4d_val(val)); 66 66 67 67 if (p4d_leaf(val)) { 68 - st->note_page(st, addr, 1, p4d_val(val)); 68 + st->note_page_p4d(st, addr, val); 69 69 walk->action = ACTION_CONTINUE; 70 70 } 71 71 ··· 88 88 st->effective_prot(st, 2, pud_val(val)); 89 89 90 90 if (pud_leaf(val)) { 91 - st->note_page(st, addr, 2, pud_val(val)); 91 + st->note_page_pud(st, addr, val); 92 92 walk->action = ACTION_CONTINUE; 93 93 } 94 94 ··· 109 109 if (st->effective_prot) 110 110 st->effective_prot(st, 3, pmd_val(val)); 111 111 if (pmd_leaf(val)) { 112 - st->note_page(st, addr, 3, pmd_val(val)); 112 + st->note_page_pmd(st, addr, val); 113 113 walk->action = ACTION_CONTINUE; 114 114 } 115 115 ··· 125 125 if (st->effective_prot) 126 126 st->effective_prot(st, 4, pte_val(val)); 127 127 128 - st->note_page(st, addr, 4, pte_val(val)); 128 + st->note_page_pte(st, addr, val); 129 129 130 130 return 0; 131 131 } ··· 134 134 int depth, struct mm_walk *walk) 135 135 { 136 136 struct ptdump_state *st = walk->private; 137 + pte_t pte_zero = {0}; 138 + pmd_t pmd_zero = {0}; 139 + pud_t pud_zero = {0}; 140 + p4d_t p4d_zero = {0}; 141 + pgd_t pgd_zero = {0}; 137 142 138 - st->note_page(st, addr, depth, 0); 139 - 143 + switch (depth) { 144 + case 4: 145 + st->note_page_pte(st, addr, pte_zero); 146 + break; 147 + case 3: 148 + st->note_page_pmd(st, addr, pmd_zero); 149 + break; 150 + case 2: 151 + st->note_page_pud(st, addr, pud_zero); 152 + break; 153 + case 1: 154 + st->note_page_p4d(st, addr, p4d_zero); 155 + break; 156 + case 0: 157 + st->note_page_pgd(st, addr, pgd_zero); 158 + break; 159 + default: 160 + break; 161 + } 140 162 return 0; 141 163 } 142 164 ··· 184 162 mmap_write_unlock(mm); 185 163 186 164 /* Flush out the last page */ 187 - st->note_page(st, 0, -1, 0); 165 + st->note_page_flush(st); 188 166 } 189 167 190 168 static int check_wx_show(struct seq_file *m, void *v)