Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v6.17_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Convert the SSB mitigation to the attack vector controls which got
forgotten at the time

- Prevent the CPUID topology hierarchy detection on AMD from
overwriting the correct initial APIC ID

- Fix the case of a machine shipping without microcode in the BIOS, in
the AMD microcode loader

- Correct the Pentium 4 model range which has a constant TSC

* tag 'x86_urgent_for_v6.17_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/bugs: Add attack vector controls for SSB
x86/cpu/topology: Use initial APIC ID from XTOPOLOGY leaf on AMD/HYGON
x86/microcode/AMD: Handle the case of no BIOS microcode
x86/cpu/intel: Fix the constant_tsc model check for Pentium 4

+47 -18
+1 -4
Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
··· 215 215 Spectre_v2_user X X * (Note 1) 216 216 SRBDS X X X X 217 217 SRSO X X X X 218 - SSB (Note 4) 218 + SSB X 219 219 TAA X X X X * (Note 2) 220 220 TSA X X X X 221 221 =============== ============== ============ ============= ============== ============ ======== ··· 228 228 229 229 3 -- Disables SMT if cross-thread mitigations are fully enabled, the CPU is 230 230 vulnerable, and STIBP is not supported 231 - 232 - 4 -- Speculative store bypass is always enabled by default (no kernel 233 - mitigation applied) unless overridden with spec_store_bypass_disable option 234 231 235 232 When an attack-vector is disabled, all mitigations for the vulnerabilities 236 233 listed in the above table are disabled, unless mitigation is required for a
+9
arch/x86/kernel/cpu/bugs.c
··· 416 416 cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 417 417 cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) || 418 418 (smt_mitigations != SMT_MITIGATIONS_OFF); 419 + 420 + case X86_BUG_SPEC_STORE_BYPASS: 421 + return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER); 422 + 419 423 default: 420 424 WARN(1, "Unknown bug %x\n", bug); 421 425 return false; ··· 2714 2710 ssb_mode = SPEC_STORE_BYPASS_DISABLE; 2715 2711 break; 2716 2712 case SPEC_STORE_BYPASS_CMD_AUTO: 2713 + if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS)) 2714 + ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2715 + else 2716 + ssb_mode = SPEC_STORE_BYPASS_NONE; 2717 + break; 2717 2718 case SPEC_STORE_BYPASS_CMD_PRCTL: 2718 2719 ssb_mode = SPEC_STORE_BYPASS_PRCTL; 2719 2720 break;
+1 -1
arch/x86/kernel/cpu/intel.c
··· 262 262 if (c->x86_power & (1 << 8)) { 263 263 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 264 264 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 265 - } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) || 265 + } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_CEDARMILL) || 266 266 (c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) { 267 267 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 268 268 }
+20 -2
arch/x86/kernel/cpu/microcode/amd.c
··· 171 171 return 1; 172 172 } 173 173 174 + static u32 cpuid_to_ucode_rev(unsigned int val) 175 + { 176 + union zen_patch_rev p = {}; 177 + union cpuid_1_eax c; 178 + 179 + c.full = val; 180 + 181 + p.stepping = c.stepping; 182 + p.model = c.model; 183 + p.ext_model = c.ext_model; 184 + p.ext_fam = c.ext_fam; 185 + 186 + return p.ucode_rev; 187 + } 188 + 174 189 static bool need_sha_check(u32 cur_rev) 175 190 { 191 + if (!cur_rev) { 192 + cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax); 193 + pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev); 194 + } 195 + 176 196 switch (cur_rev >> 8) { 177 197 case 0x80012: return cur_rev <= 0x800126f; break; 178 198 case 0x80082: return cur_rev <= 0x800820f; break; ··· 768 748 769 749 n.equiv_cpu = equiv_cpu; 770 750 n.patch_id = uci->cpu_sig.rev; 771 - 772 - WARN_ON_ONCE(!n.patch_id); 773 751 774 752 list_for_each_entry(p, &microcode_cache, plist) 775 753 if (patch_cpus_equivalent(p, &n, false))
+16 -11
arch/x86/kernel/cpu/topology_amd.c
··· 81 81 82 82 cpuid_leaf(0x8000001e, &leaf); 83 83 84 - tscan->c->topo.initial_apicid = leaf.ext_apic_id; 85 - 86 84 /* 87 - * If leaf 0xb is available, then the domain shifts are set 88 - * already and nothing to do here. Only valid for family >= 0x17. 85 + * If leaf 0xb/0x26 is available, then the APIC ID and the domain 86 + * shifts are set already. 89 87 */ 90 - if (!has_topoext && tscan->c->x86 >= 0x17) { 91 - /* 92 - * Leaf 0x80000008 set the CORE domain shift already. 93 - * Update the SMT domain, but do not propagate it. 94 - */ 95 - unsigned int nthreads = leaf.core_nthreads + 1; 88 + if (!has_topoext) { 89 + tscan->c->topo.initial_apicid = leaf.ext_apic_id; 96 90 97 - topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads); 91 + /* 92 + * Leaf 0x8000008 sets the CORE domain shift but not the 93 + * SMT domain shift. On CPUs with family >= 0x17, there 94 + * might be hyperthreads. 95 + */ 96 + if (tscan->c->x86 >= 0x17) { 97 + /* Update the SMT domain, but do not propagate it. */ 98 + unsigned int nthreads = leaf.core_nthreads + 1; 99 + 100 + topology_update_dom(tscan, TOPO_SMT_DOMAIN, 101 + get_count_order(nthreads), nthreads); 102 + } 98 103 } 99 104 100 105 store_node(tscan, leaf.nnodes_per_socket + 1, leaf.node_id);