Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-5.2-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:
"One fix for a regression introduced by our 32-bit KASAN support, which
broke booting on machines with "bootx" early debugging enabled.

A fix for a bug which broke kexec on 32-bit, introduced by changes to
the 32-bit STRICT_KERNEL_RWX support in v5.1.

Finally two fixes going to stable for our THP split/collapse handling,
discovered by Nick. The first fixes random crashes and/or corruption
in guests under sufficient load.

Thanks to: Nicholas Piggin, Christophe Leroy, Aaro Koskinen, Mathieu
Malaterre"

* tag 'powerpc-5.2-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/32s: fix booting with CONFIG_PPC_EARLY_DEBUG_BOOTX
powerpc/64s: __find_linux_pte() synchronization vs pmdp_invalidate()
powerpc/64s: Fix THP PMD collapse serialisation
powerpc: Fix kexec failure on book3s/32

+59 -4
+30
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 876 876 return false; 877 877 } 878 878 879 + static inline int pmd_is_serializing(pmd_t pmd) 880 + { 881 + /* 882 + * If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear 883 + * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate). 884 + * 885 + * This condition may also occur when flushing a pmd while flushing 886 + * it (see ptep_modify_prot_start), so callers must ensure this 887 + * case is fine as well. 888 + */ 889 + if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) == 890 + cpu_to_be64(_PAGE_INVALID)) 891 + return true; 892 + 893 + return false; 894 + } 895 + 879 896 static inline int pmd_bad(pmd_t pmd) 880 897 { 881 898 if (radix_enabled()) ··· 1109 1092 #define pmd_access_permitted pmd_access_permitted 1110 1093 static inline bool pmd_access_permitted(pmd_t pmd, bool write) 1111 1094 { 1095 + /* 1096 + * pmdp_invalidate sets this combination (which is not caught by 1097 + * !pte_present() check in pte_access_permitted), to prevent 1098 + * lock-free lookups, as part of the serialize_against_pte_lookup() 1099 + * synchronisation. 1100 + * 1101 + * This also catches the case where the PTE's hardware PRESENT bit is 1102 + * cleared while TLB is flushed, which is suboptimal but should not 1103 + * be frequent. 1104 + */ 1105 + if (pmd_is_serializing(pmd)) 1106 + return false; 1107 + 1112 1108 return pte_access_permitted(pmd_pte(pmd), write); 1113 1109 } 1114 1110
+4
arch/powerpc/include/asm/btext.h
··· 13 13 int depth, int pitch); 14 14 extern void btext_setup_display(int width, int height, int depth, int pitch, 15 15 unsigned long address); 16 + #ifdef CONFIG_PPC32 16 17 extern void btext_prepare_BAT(void); 18 + #else 19 + static inline void btext_prepare_BAT(void) { } 20 + #endif 17 21 extern void btext_map(void); 18 22 extern void btext_unmap(void); 19 23
+3
arch/powerpc/include/asm/kexec.h
··· 94 94 return crashing_cpu >= 0; 95 95 } 96 96 97 + void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer, 98 + unsigned long start_address) __noreturn; 99 + 97 100 #ifdef CONFIG_KEXEC_FILE 98 101 extern const struct kexec_file_ops kexec_elf64_ops; 99 102
+3 -1
arch/powerpc/kernel/machine_kexec_32.c
··· 30 30 */ 31 31 void default_machine_kexec(struct kimage *image) 32 32 { 33 - extern const unsigned char relocate_new_kernel[]; 34 33 extern const unsigned int relocate_new_kernel_size; 35 34 unsigned long page_list; 36 35 unsigned long reboot_code_buffer, reboot_code_buffer_phys; ··· 56 57 flush_icache_range(reboot_code_buffer, 57 58 reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); 58 59 printk(KERN_INFO "Bye!\n"); 60 + 61 + if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x)) 62 + relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start); 59 63 60 64 /* now call it */ 61 65 rnk = (relocate_new_kernel_t) reboot_code_buffer;
+1
arch/powerpc/kernel/prom_init.c
··· 2336 2336 prom_printf("W=%d H=%d LB=%d addr=0x%x\n", 2337 2337 width, height, pitch, addr); 2338 2338 btext_setup_display(width, height, 8, pitch, addr); 2339 + btext_prepare_BAT(); 2339 2340 } 2340 2341 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */ 2341 2342 }
+1 -1
arch/powerpc/kernel/prom_init_check.sh
··· 24 24 WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush 25 25 _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold 26 26 __secondary_hold_acknowledge __secondary_hold_spinloop __start 27 - logo_linux_clut224 27 + logo_linux_clut224 btext_prepare_BAT 28 28 reloc_got2 kernstart_addr memstart_addr linux_banner _stext 29 29 __prom_init_toc_start __prom_init_toc_end btext_setup_display TOC." 30 30
+3
arch/powerpc/mm/book3s64/pgtable.c
··· 112 112 /* 113 113 * This ensures that generic code that rely on IRQ disabling 114 114 * to prevent a parallel THP split work as expected. 115 + * 116 + * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires 117 + * a special case check in pmd_access_permitted. 115 118 */ 116 119 serialize_against_pte_lookup(vma->vm_mm); 117 120 return __pmd(old_pmd);
+14 -2
arch/powerpc/mm/pgtable.c
··· 368 368 pdshift = PMD_SHIFT; 369 369 pmdp = pmd_offset(&pud, ea); 370 370 pmd = READ_ONCE(*pmdp); 371 + 371 372 /* 372 - * A hugepage collapse is captured by pmd_none, because 373 - * it mark the pmd none and do a hpte invalidate. 373 + * A hugepage collapse is captured by this condition, see 374 + * pmdp_collapse_flush. 374 375 */ 375 376 if (pmd_none(pmd)) 376 377 return NULL; 378 + 379 + #ifdef CONFIG_PPC_BOOK3S_64 380 + /* 381 + * A hugepage split is captured by this condition, see 382 + * pmdp_invalidate. 383 + * 384 + * Huge page modification can be caught here too. 385 + */ 386 + if (pmd_is_serializing(pmd)) 387 + return NULL; 388 + #endif 377 389 378 390 if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) { 379 391 if (is_thp)