Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'spectre' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM spectre updates from Russell King:
"These are the currently known final bits that resolve the Spectre
issues. big.Little systems used to be sufficiently identical in that
there were no differences between individual CPUs in the system that
mattered to the kernel. With the advent of the Spectre problem, the
CPUs now have differences in how the workaround is applied.

As a result of previous Spectre patches, these systems ended up
reporting quite a lot of:

"CPUx: Spectre v2: incorrect context switching function, system vulnerable"

messages due to the action of the big.Little switcher causing the CPUs
to be re-initialised regularly. This series resolves that issue by
making the CPU vtable unique to each CPU.

However, since this is used very early, before per-cpu is setup,
per-cpu can't be used. We also have a problem that two of the methods
are not called from preempt-safe paths, but thankfully these remain
identical between all CPUs in the system. To make sure, we validate
that these are identical during boot"

* 'spectre' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: spectre-v2: per-CPU vtables to work around big.Little systems
ARM: add PROC_VTABLE and PROC_TABLE macros
ARM: clean up per-processor check_bugs method call
ARM: split out processor lookup
ARM: make lookup_processor_type() non-__init

+115 -49
+1
arch/arm/include/asm/cputype.h
··· 111 111 #include <linux/kernel.h> 112 112 113 113 extern unsigned int processor_id; 114 + struct proc_info_list *lookup_processor(u32 midr); 114 115 115 116 #ifdef CONFIG_CPU_CP15 116 117 #define read_cpuid(reg) \
+49 -12
arch/arm/include/asm/proc-fns.h
··· 23 23 /* 24 24 * Don't change this structure - ASM code relies on it. 25 25 */ 26 - extern struct processor { 26 + struct processor { 27 27 /* MISC 28 28 * get data abort address/flags 29 29 */ ··· 79 79 unsigned int suspend_size; 80 80 void (*do_suspend)(void *); 81 81 void (*do_resume)(void *); 82 - } processor; 82 + }; 83 83 84 84 #ifndef MULTI_CPU 85 + static inline void init_proc_vtable(const struct processor *p) 86 + { 87 + } 88 + 85 89 extern void cpu_proc_init(void); 86 90 extern void cpu_proc_fin(void); 87 91 extern int cpu_do_idle(void); ··· 102 98 extern void cpu_do_suspend(void *); 103 99 extern void cpu_do_resume(void *); 104 100 #else 105 - #define cpu_proc_init processor._proc_init 106 - #define cpu_proc_fin processor._proc_fin 107 - #define cpu_reset processor.reset 108 - #define cpu_do_idle processor._do_idle 109 - #define cpu_dcache_clean_area processor.dcache_clean_area 110 - #define cpu_set_pte_ext processor.set_pte_ext 111 - #define cpu_do_switch_mm processor.switch_mm 112 101 113 - /* These three are private to arch/arm/kernel/suspend.c */ 114 - #define cpu_do_suspend processor.do_suspend 115 - #define cpu_do_resume processor.do_resume 102 + extern struct processor processor; 103 + #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) 104 + #include <linux/smp.h> 105 + /* 106 + * This can't be a per-cpu variable because we need to access it before 107 + * per-cpu has been initialised. We have a couple of functions that are 108 + * called in a pre-emptible context, and so can't use smp_processor_id() 109 + * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the 110 + * function pointers for these are identical across all CPUs. 111 + */ 112 + extern struct processor *cpu_vtable[]; 113 + #define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f 114 + #define PROC_TABLE(f) cpu_vtable[0]->f 115 + static inline void init_proc_vtable(const struct processor *p) 116 + { 117 + unsigned int cpu = smp_processor_id(); 118 + *cpu_vtable[cpu] = *p; 119 + WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area != 120 + cpu_vtable[0]->dcache_clean_area); 121 + WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext != 122 + cpu_vtable[0]->set_pte_ext); 123 + } 124 + #else 125 + #define PROC_VTABLE(f) processor.f 126 + #define PROC_TABLE(f) processor.f 127 + static inline void init_proc_vtable(const struct processor *p) 128 + { 129 + processor = *p; 130 + } 131 + #endif 132 + 133 + #define cpu_proc_init PROC_VTABLE(_proc_init) 134 + #define cpu_check_bugs PROC_VTABLE(check_bugs) 135 + #define cpu_proc_fin PROC_VTABLE(_proc_fin) 136 + #define cpu_reset PROC_VTABLE(reset) 137 + #define cpu_do_idle PROC_VTABLE(_do_idle) 138 + #define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area) 139 + #define cpu_set_pte_ext PROC_TABLE(set_pte_ext) 140 + #define cpu_do_switch_mm PROC_VTABLE(switch_mm) 141 + 142 + /* These two are private to arch/arm/kernel/suspend.c */ 143 + #define cpu_do_suspend PROC_VTABLE(do_suspend) 144 + #define cpu_do_resume PROC_VTABLE(do_resume) 116 145 #endif 117 146 118 147 extern void cpu_resume(void);
+2 -2
arch/arm/kernel/bugs.c
··· 6 6 void check_other_bugs(void) 7 7 { 8 8 #ifdef MULTI_CPU 9 - if (processor.check_bugs) 10 - processor.check_bugs(); 9 + if (cpu_check_bugs) 10 + cpu_check_bugs(); 11 11 #endif 12 12 } 13 13
+3 -3
arch/arm/kernel/head-common.S
··· 145 145 #endif 146 146 .size __mmap_switched_data, . - __mmap_switched_data 147 147 148 + __FINIT 149 + .text 150 + 148 151 /* 149 152 * This provides a C-API version of __lookup_processor_type 150 153 */ ··· 158 155 mov r0, r5 159 156 ldmfd sp!, {r4 - r6, r9, pc} 160 157 ENDPROC(lookup_processor_type) 161 - 162 - __FINIT 163 - .text 164 158 165 159 /* 166 160 * Read processor ID register (CP#15, CR0), and look up in the linker-built
+27 -17
arch/arm/kernel/setup.c
··· 114 114 115 115 #ifdef MULTI_CPU 116 116 struct processor processor __ro_after_init; 117 + #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) 118 + struct processor *cpu_vtable[NR_CPUS] = { 119 + [0] = &processor, 120 + }; 121 + #endif 117 122 #endif 118 123 #ifdef MULTI_TLB 119 124 struct cpu_tlb_fns cpu_tlb __ro_after_init; ··· 671 666 } 672 667 #endif 673 668 669 + /* 670 + * locate processor in the list of supported processor types. The linker 671 + * builds this table for us from the entries in arch/arm/mm/proc-*.S 672 + */ 673 + struct proc_info_list *lookup_processor(u32 midr) 674 + { 675 + struct proc_info_list *list = lookup_processor_type(midr); 676 + 677 + if (!list) { 678 + pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", 679 + smp_processor_id(), midr); 680 + while (1) 681 + /* can't use cpu_relax() here as it may require MMU setup */; 682 + } 683 + 684 + return list; 685 + } 686 + 674 687 static void __init setup_processor(void) 675 688 { 676 - struct proc_info_list *list; 677 - 678 - /* 679 - * locate processor in the list of supported processor 680 - * types. The linker builds this table for us from the 681 - * entries in arch/arm/mm/proc-*.S 682 - */ 683 - list = lookup_processor_type(read_cpuid_id()); 684 - if (!list) { 685 - pr_err("CPU configuration botched (ID %08x), unable to continue.\n", 686 - read_cpuid_id()); 687 - while (1); 688 - } 689 + unsigned int midr = read_cpuid_id(); 690 + struct proc_info_list *list = lookup_processor(midr); 689 691 690 692 cpu_name = list->cpu_name; 691 693 __cpu_architecture = __get_cpu_architecture(); 692 694 693 - #ifdef MULTI_CPU 694 - processor = *list->proc; 695 - #endif 695 + init_proc_vtable(list->proc); 696 696 #ifdef MULTI_TLB 697 697 cpu_tlb = *list->tlb; 698 698 #endif ··· 709 699 #endif 710 700 711 701 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 712 - cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 702 + list->cpu_name, midr, midr & 15, 713 703 proc_arch[cpu_architecture()], get_cr()); 714 704 715 705 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
+31
arch/arm/kernel/smp.c
··· 42 42 #include <asm/mmu_context.h> 43 43 #include <asm/pgtable.h> 44 44 #include <asm/pgalloc.h> 45 + #include <asm/procinfo.h> 45 46 #include <asm/processor.h> 46 47 #include <asm/sections.h> 47 48 #include <asm/tlbflush.h> ··· 103 102 #endif 104 103 } 105 104 105 + #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) 106 + static int secondary_biglittle_prepare(unsigned int cpu) 107 + { 108 + if (!cpu_vtable[cpu]) 109 + cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); 110 + 111 + return cpu_vtable[cpu] ? 0 : -ENOMEM; 112 + } 113 + 114 + static void secondary_biglittle_init(void) 115 + { 116 + init_proc_vtable(lookup_processor(read_cpuid_id())->proc); 117 + } 118 + #else 119 + static int secondary_biglittle_prepare(unsigned int cpu) 120 + { 121 + return 0; 122 + } 123 + 124 + static void secondary_biglittle_init(void) 125 + { 126 + } 127 + #endif 128 + 106 129 int __cpu_up(unsigned int cpu, struct task_struct *idle) 107 130 { 108 131 int ret; 109 132 110 133 if (!smp_ops.smp_boot_secondary) 111 134 return -ENOSYS; 135 + 136 + ret = secondary_biglittle_prepare(cpu); 137 + if (ret) 138 + return ret; 112 139 113 140 /* 114 141 * We need to tell the secondary core where to find ··· 387 358 { 388 359 struct mm_struct *mm = &init_mm; 389 360 unsigned int cpu; 361 + 362 + secondary_biglittle_init(); 390 363 391 364 /* 392 365 * The identity mapping is uncached (strongly ordered), so
+2 -15
arch/arm/mm/proc-v7-bugs.c
··· 52 52 case ARM_CPU_PART_CORTEX_A17: 53 53 case ARM_CPU_PART_CORTEX_A73: 54 54 case ARM_CPU_PART_CORTEX_A75: 55 - if (processor.switch_mm != cpu_v7_bpiall_switch_mm) 56 - goto bl_error; 57 55 per_cpu(harden_branch_predictor_fn, cpu) = 58 56 harden_branch_predictor_bpiall; 59 57 spectre_v2_method = "BPIALL"; ··· 59 61 60 62 case ARM_CPU_PART_CORTEX_A15: 61 63 case ARM_CPU_PART_BRAHMA_B15: 62 - if (processor.switch_mm != cpu_v7_iciallu_switch_mm) 63 - goto bl_error; 64 64 per_cpu(harden_branch_predictor_fn, cpu) = 65 65 harden_branch_predictor_iciallu; 66 66 spectre_v2_method = "ICIALLU"; ··· 84 88 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 85 89 if ((int)res.a0 != 0) 86 90 break; 87 - if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu) 88 - goto bl_error; 89 91 per_cpu(harden_branch_predictor_fn, cpu) = 90 92 call_hvc_arch_workaround_1; 91 - processor.switch_mm = cpu_v7_hvc_switch_mm; 93 + cpu_do_switch_mm = cpu_v7_hvc_switch_mm; 92 94 spectre_v2_method = "hypervisor"; 93 95 break; 94 96 ··· 95 101 ARM_SMCCC_ARCH_WORKAROUND_1, &res); 96 102 if ((int)res.a0 != 0) 97 103 break; 98 - if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu) 99 - goto bl_error; 100 104 per_cpu(harden_branch_predictor_fn, cpu) = 101 105 call_smc_arch_workaround_1; 102 - processor.switch_mm = cpu_v7_smc_switch_mm; 106 + cpu_do_switch_mm = cpu_v7_smc_switch_mm; 103 107 spectre_v2_method = "firmware"; 104 108 break; 105 109 ··· 111 119 if (spectre_v2_method) 112 120 pr_info("CPU%u: Spectre v2: using %s workaround\n", 113 121 smp_processor_id(), spectre_v2_method); 114 - return; 115 - 116 - bl_error: 117 - pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n", 118 - cpu); 119 122 } 120 123 #else 121 124 static void cpu_v7_spectre_init(void)