Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'powerpc-5.14-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux

Pull powerpc fixes from Michael Ellerman:

- Fix random crashes on some 32-bit CPUs by adding isync() after
locking/unlocking KUEP

- Fix intermittent crashes when loading modules with strict module RWX

- Fix a section mismatch introduce by a previous fix.

Thanks to Christophe Leroy, Fabiano Rosas, Laurent Vivier, Murilo
Opsfelder Araújo, Nathan Chancellor, and Stan Johnson.

h# -----BEGIN PGP SIGNATURE-----

* tag 'powerpc-5.14-6' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux:
powerpc/mm: Fix set_memory_*() against concurrent accesses
powerpc/32s: Fix random crashes by adding isync() after locking/unlocking KUEP
powerpc/xive: Do not mark xive_request_ipi() as __init

+31 -14
+20
arch/powerpc/include/asm/book3s/32/kup.h
··· 4 4 5 5 #include <asm/bug.h> 6 6 #include <asm/book3s/32/mmu-hash.h> 7 + #include <asm/mmu.h> 8 + #include <asm/synch.h> 7 9 8 10 #ifndef __ASSEMBLY__ 9 11 ··· 30 28 return; 31 29 32 30 update_user_segments(mfsr(0) | SR_NX); 31 + /* 32 + * This isync() shouldn't be necessary as the kernel is not excepted to 33 + * run any instruction in userspace soon after the update of segments, 34 + * but hash based cores (at least G3) seem to exhibit a random 35 + * behaviour when the 'isync' is not there. 603 cores don't have this 36 + * behaviour so don't do the 'isync' as it saves several CPU cycles. 37 + */ 38 + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) 39 + isync(); /* Context sync required after mtsr() */ 33 40 } 34 41 35 42 static inline void kuep_unlock(void) ··· 47 36 return; 48 37 49 38 update_user_segments(mfsr(0) & ~SR_NX); 39 + /* 40 + * This isync() shouldn't be necessary as a 'rfi' will soon be executed 41 + * to return to userspace, but hash based cores (at least G3) seem to 42 + * exhibit a random behaviour when the 'isync' is not there. 603 cores 43 + * don't have this behaviour so don't do the 'isync' as it saves several 44 + * CPU cycles. 45 + */ 46 + if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) 47 + isync(); /* Context sync required after mtsr() */ 50 48 } 51 49 52 50 #ifdef CONFIG_PPC_KUAP
+10 -13
arch/powerpc/mm/pageattr.c
··· 18 18 /* 19 19 * Updates the attributes of a page in three steps: 20 20 * 21 - * 1. invalidate the page table entry 22 - * 2. flush the TLB 23 - * 3. install the new entry with the updated attributes 21 + * 1. take the page_table_lock 22 + * 2. install the new entry with the updated attributes 23 + * 3. flush the TLB 24 24 * 25 - * Invalidating the pte means there are situations where this will not work 26 - * when in theory it should. 27 - * For example: 28 - * - removing write from page whilst it is being executed 29 - * - setting a page read-only whilst it is being read by another CPU 30 - * 25 + * This sequence is safe against concurrent updates, and also allows updating the 26 + * attributes of a page currently being executed or accessed. 31 27 */ 32 28 static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) 33 29 { ··· 32 36 33 37 spin_lock(&init_mm.page_table_lock); 34 38 35 - /* invalidate the PTE so it's safe to modify */ 36 - pte = ptep_get_and_clear(&init_mm, addr, ptep); 37 - flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 39 + pte = ptep_get(ptep); 38 40 39 41 /* modify the PTE bits as desired, then apply */ 40 42 switch (action) { ··· 53 59 break; 54 60 } 55 61 56 - set_pte_at(&init_mm, addr, ptep, pte); 62 + pte_update(&init_mm, addr, ptep, ~0UL, pte_val(pte), 0); 57 63 58 64 /* See ptesync comment in radix__set_pte_at() */ 59 65 if (radix_enabled()) 60 66 asm volatile("ptesync": : :"memory"); 67 + 68 + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 69 + 61 70 spin_unlock(&init_mm.page_table_lock); 62 71 63 72 return 0;
+1 -1
arch/powerpc/sysdev/xive/common.c
··· 1170 1170 return ret; 1171 1171 } 1172 1172 1173 - static int __init xive_request_ipi(unsigned int cpu) 1173 + static int xive_request_ipi(unsigned int cpu) 1174 1174 { 1175 1175 struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)]; 1176 1176 int ret;