Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull cpu hotplug fixes from Thomas Gleixner:
"Two fixes for the cpu hotplug machinery:

- Replace the overly clever 'SMT disabled by BIOS' detection logic as
it breaks KVM scenarios and prevents speculation control updates
when the Hyperthreads are brought online late after boot.

- Remove a redundant invocation of the speculation control update
function"

* 'smp-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
cpu/hotplug: Fix "SMT disabled by BIOS" detection for KVM
x86/speculation: Remove redundant arch_smt_update() invocation

+9 -39
+1 -1
arch/x86/kernel/cpu/bugs.c
··· 71 71 * identify_boot_cpu() initialized SMT support information, let the 72 72 * core code know. 73 73 */ 74 - cpu_smt_check_topology_early(); 74 + cpu_smt_check_topology(); 75 75 76 76 if (!IS_ENABLED(CONFIG_SMP)) { 77 77 pr_info("CPU: ");
+2 -1
arch/x86/kvm/vmx/vmx.c
··· 26 26 #include <linux/mod_devicetable.h> 27 27 #include <linux/mm.h> 28 28 #include <linux/sched.h> 29 + #include <linux/sched/smt.h> 29 30 #include <linux/slab.h> 30 31 #include <linux/tboot.h> 31 32 #include <linux/trace_events.h> ··· 6824 6823 * Warn upon starting the first VM in a potentially 6825 6824 * insecure environment. 6826 6825 */ 6827 - if (cpu_smt_control == CPU_SMT_ENABLED) 6826 + if (sched_smt_active()) 6828 6827 pr_warn_once(L1TF_MSG_SMT); 6829 6828 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) 6830 6829 pr_warn_once(L1TF_MSG_L1D);
-2
include/linux/cpu.h
··· 180 180 #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) 181 181 extern enum cpuhp_smt_control cpu_smt_control; 182 182 extern void cpu_smt_disable(bool force); 183 - extern void cpu_smt_check_topology_early(void); 184 183 extern void cpu_smt_check_topology(void); 185 184 #else 186 185 # define cpu_smt_control (CPU_SMT_ENABLED) 187 186 static inline void cpu_smt_disable(bool force) { } 188 - static inline void cpu_smt_check_topology_early(void) { } 189 187 static inline void cpu_smt_check_topology(void) { } 190 188 #endif 191 189
+5 -33
kernel/cpu.c
··· 376 376 377 377 #ifdef CONFIG_HOTPLUG_SMT 378 378 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; 379 - EXPORT_SYMBOL_GPL(cpu_smt_control); 380 - 381 - static bool cpu_smt_available __read_mostly; 382 379 383 380 void __init cpu_smt_disable(bool force) 384 381 { ··· 394 397 395 398 /* 396 399 * The decision whether SMT is supported can only be done after the full 397 - * CPU identification. Called from architecture code before non boot CPUs 398 - * are brought up. 399 - */ 400 - void __init cpu_smt_check_topology_early(void) 401 - { 402 - if (!topology_smt_supported()) 403 - cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 404 - } 405 - 406 - /* 407 - * If SMT was disabled by BIOS, detect it here, after the CPUs have been 408 - * brought online. This ensures the smt/l1tf sysfs entries are consistent 409 - * with reality. cpu_smt_available is set to true during the bringup of non 410 - * boot CPUs when a SMT sibling is detected. Note, this may overwrite 411 - * cpu_smt_control's previous setting. 400 + * CPU identification. Called from architecture code. 412 401 */ 413 402 void __init cpu_smt_check_topology(void) 414 403 { 415 - if (!cpu_smt_available) 404 + if (!topology_smt_supported()) 416 405 cpu_smt_control = CPU_SMT_NOT_SUPPORTED; 417 406 } 418 407 ··· 411 428 412 429 static inline bool cpu_smt_allowed(unsigned int cpu) 413 430 { 414 - if (topology_is_primary_thread(cpu)) 431 + if (cpu_smt_control == CPU_SMT_ENABLED) 415 432 return true; 416 433 417 - /* 418 - * If the CPU is not a 'primary' thread and the booted_once bit is 419 - * set then the processor has SMT support. Store this information 420 - * for the late check of SMT support in cpu_smt_check_topology(). 421 - */ 422 - if (per_cpu(cpuhp_state, cpu).booted_once) 423 - cpu_smt_available = true; 424 - 425 - if (cpu_smt_control == CPU_SMT_ENABLED) 434 + if (topology_is_primary_thread(cpu)) 426 435 return true; 427 436 428 437 /* ··· 2065 2090 */ 2066 2091 cpuhp_offline_cpu_device(cpu); 2067 2092 } 2068 - if (!ret) { 2093 + if (!ret) 2069 2094 cpu_smt_control = ctrlval; 2070 - arch_smt_update(); 2071 - } 2072 2095 cpu_maps_update_done(); 2073 2096 return ret; 2074 2097 } ··· 2077 2104 2078 2105 cpu_maps_update_begin(); 2079 2106 cpu_smt_control = CPU_SMT_ENABLED; 2080 - arch_smt_update(); 2081 2107 for_each_present_cpu(cpu) { 2082 2108 /* Skip online CPUs and CPUs on offline nodes */ 2083 2109 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
+1
kernel/sched/fair.c
··· 5980 5980 5981 5981 #ifdef CONFIG_SCHED_SMT 5982 5982 DEFINE_STATIC_KEY_FALSE(sched_smt_present); 5983 + EXPORT_SYMBOL_GPL(sched_smt_present); 5983 5984 5984 5985 static inline void set_idle_cores(int cpu, int val) 5985 5986 {
-2
kernel/smp.c
··· 584 584 num_nodes, (num_nodes > 1 ? "s" : ""), 585 585 num_cpus, (num_cpus > 1 ? "s" : "")); 586 586 587 - /* Final decision about SMT support */ 588 - cpu_smt_check_topology(); 589 587 /* Any cleanup work */ 590 588 smp_cpus_done(setup_max_cpus); 591 589 }