Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

LoongArch: Rework KASAN initialization for PTW-enabled systems

kasan_init_generic() indicates that kasan is fully initialized, so it
should be put at end of kasan_init().

Otherwise bringing up the primary CPU failed when CONFIG_KASAN is set
on PTW-enabled systems, here are the call chains:

kernel_entry()
start_kernel()
setup_arch()
kasan_init()
kasan_init_generic()

The reason is PTW-enabled systems have speculative accesses which means
memory accesses to the shadow memory after kasan_init() may be executed
by hardware before. However, accessing shadow memory is safe only after
kasan fully initialized because kasan_init() uses a temporary PGD table
until we have populated all levels of shadow page tables and writen the
PGD register. Moving kasan_init_generic() later can defer the occasion
of kasan_enabled(), so as to avoid speculative accesses on shadow pages.

After moving kasan_init_generic() to the end, kasan_init() can no longer
call kasan_mem_to_shadow() for shadow address conversion because it will
always return kasan_early_shadow_page. On the other hand, we should keep
the current logic of kasan_mem_to_shadow() for both the early and final
stage because there may be instrumentation before kasan_init().

To solve this, we factor out a new mem_to_shadow() function from current
kasan_mem_to_shadow() for the shadow address conversion in kasan_init().

Cc: stable@vger.kernel.org
Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>

authored by

Tiezhu Yang and committed by
Huacai Chen
5ec5ac4c 7cb37af6

+41 -39
+41 -39
arch/loongarch/mm/kasan_init.c
··· 40 40 #define __pte_none(early, pte) (early ? pte_none(pte) : \ 41 41 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) 42 42 43 + static void *mem_to_shadow(const void *addr) 44 + { 45 + unsigned long offset = 0; 46 + unsigned long maddr = (unsigned long)addr; 47 + unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; 48 + 49 + if (maddr >= FIXADDR_START) 50 + return (void *)(kasan_early_shadow_page); 51 + 52 + maddr &= XRANGE_SHADOW_MASK; 53 + switch (xrange) { 54 + case XKPRANGE_CC_SEG: 55 + offset = XKPRANGE_CC_SHADOW_OFFSET; 56 + break; 57 + case XKPRANGE_UC_SEG: 58 + offset = XKPRANGE_UC_SHADOW_OFFSET; 59 + break; 60 + case XKPRANGE_WC_SEG: 61 + offset = XKPRANGE_WC_SHADOW_OFFSET; 62 + break; 63 + case XKVRANGE_VC_SEG: 64 + offset = XKVRANGE_VC_SHADOW_OFFSET; 65 + break; 66 + default: 67 + WARN_ON(1); 68 + return NULL; 69 + } 70 + 71 + return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); 72 + } 73 + 43 74 void *kasan_mem_to_shadow(const void *addr) 44 75 { 45 - if (!kasan_enabled()) { 76 + if (kasan_enabled()) 77 + return mem_to_shadow(addr); 78 + else 46 79 return (void *)(kasan_early_shadow_page); 47 - } else { 48 - unsigned long maddr = (unsigned long)addr; 49 - unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff; 50 - unsigned long offset = 0; 51 - 52 - if (maddr >= FIXADDR_START) 53 - return (void *)(kasan_early_shadow_page); 54 - 55 - maddr &= XRANGE_SHADOW_MASK; 56 - switch (xrange) { 57 - case XKPRANGE_CC_SEG: 58 - offset = XKPRANGE_CC_SHADOW_OFFSET; 59 - break; 60 - case XKPRANGE_UC_SEG: 61 - offset = XKPRANGE_UC_SHADOW_OFFSET; 62 - break; 63 - case XKPRANGE_WC_SEG: 64 - offset = XKPRANGE_WC_SHADOW_OFFSET; 65 - break; 66 - case XKVRANGE_VC_SEG: 67 - offset = XKVRANGE_VC_SHADOW_OFFSET; 68 - break; 69 - default: 70 - WARN_ON(1); 71 - return NULL; 72 - } 73 - 74 - return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset); 75 - } 76 80 } 77 81 78 82 const void *kasan_shadow_to_mem(const void *shadow_addr) ··· 297 293 /* Maps everything to a single page of zeroes */ 298 294 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true); 299 295 300 - kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), 301 - kasan_mem_to_shadow((void *)KFENCE_AREA_END)); 302 - 303 - /* Enable KASAN here before kasan_mem_to_shadow(). */ 304 - kasan_init_generic(); 296 + kasan_populate_early_shadow(mem_to_shadow((void *)VMALLOC_START), 297 + mem_to_shadow((void *)KFENCE_AREA_END)); 305 298 306 299 /* Populate the linear mapping */ 307 300 for_each_mem_range(i, &pa_start, &pa_end) { ··· 308 307 if (start >= end) 309 308 break; 310 309 311 - kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), 312 - (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE); 310 + kasan_map_populate((unsigned long)mem_to_shadow(start), 311 + (unsigned long)mem_to_shadow(end), NUMA_NO_NODE); 313 312 } 314 313 315 314 /* Populate modules mapping */ 316 - kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR), 317 - (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE); 315 + kasan_map_populate((unsigned long)mem_to_shadow((void *)MODULES_VADDR), 316 + (unsigned long)mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE); 318 317 /* 319 318 * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we 320 319 * should make sure that it maps the zero page read-only. ··· 329 328 330 329 /* At this point kasan is fully initialized. Enable error messages */ 331 330 init_task.kasan_depth = 0; 331 + kasan_init_generic(); 332 332 }