Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"Last minute x86 fixes:

- Fix a softlockup detector warning and long delays if using ptdump
with KASAN enabled.

- Two more TSC-adjust fixes for interesting firmware interactions.

- Two commits to fix an AMD CPU topology enumeration bug that caused
a measurable gaming performance regression"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm/ptdump: Fix soft lockup in page table walker
x86/tsc: Make the TSC ADJUST sanitizing work for tsc_reliable
x86/tsc: Avoid the large time jump when sanitizing TSC ADJUST
x86/CPU/AMD: Fix Zen SMT topology
x86/CPU/AMD: Bring back Compute Unit ID

+38 -15
+1
arch/x86/include/asm/processor.h
··· 104 104 __u8 x86_phys_bits; 105 105 /* CPUID returned core id bits: */ 106 106 __u8 x86_coreid_bits; 107 + __u8 cu_id; 107 108 /* Max extended CPUID function supported: */ 108 109 __u32 extended_cpuid_level; 109 110 /* Maximum supported CPUID level, -1=no CPUID: */
+15 -1
arch/x86/kernel/cpu/amd.c
··· 309 309 310 310 /* get information required for multi-node processors */ 311 311 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { 312 + u32 eax, ebx, ecx, edx; 312 313 313 - node_id = cpuid_ecx(0x8000001e) & 7; 314 + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx); 315 + 316 + node_id = ecx & 0xff; 317 + smp_num_siblings = ((ebx >> 8) & 0xff) + 1; 318 + 319 + if (c->x86 == 0x15) 320 + c->cu_id = ebx & 0xff; 321 + 322 + if (c->x86 >= 0x17) { 323 + c->cpu_core_id = ebx & 0xff; 324 + 325 + if (smp_num_siblings > 1) 326 + c->x86_max_cores /= smp_num_siblings; 327 + } 314 328 315 329 /* 316 330 * We may have multiple LLCs if L3 caches exist, so check if we
+1
arch/x86/kernel/cpu/common.c
··· 1015 1015 c->x86_model_id[0] = '\0'; /* Unset */ 1016 1016 c->x86_max_cores = 1; 1017 1017 c->x86_coreid_bits = 0; 1018 + c->cu_id = 0xff; 1018 1019 #ifdef CONFIG_X86_64 1019 1020 c->x86_clflush_size = 64; 1020 1021 c->x86_phys_bits = 36;
+9 -3
arch/x86/kernel/smpboot.c
··· 433 433 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 434 434 435 435 if (c->phys_proc_id == o->phys_proc_id && 436 - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && 437 - c->cpu_core_id == o->cpu_core_id) 438 - return topology_sane(c, o, "smt"); 436 + per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) { 437 + if (c->cpu_core_id == o->cpu_core_id) 438 + return topology_sane(c, o, "smt"); 439 + 440 + if ((c->cu_id != 0xff) && 441 + (o->cu_id != 0xff) && 442 + (c->cu_id == o->cu_id)) 443 + return topology_sane(c, o, "smt"); 444 + } 439 445 440 446 } else if (c->phys_proc_id == o->phys_proc_id && 441 447 c->cpu_core_id == o->cpu_core_id) {
+3 -2
arch/x86/kernel/tsc.c
··· 1356 1356 (unsigned long)cpu_khz / 1000, 1357 1357 (unsigned long)cpu_khz % 1000); 1358 1358 1359 + /* Sanitize TSC ADJUST before cyc2ns gets initialized */ 1360 + tsc_store_and_check_tsc_adjust(true); 1361 + 1359 1362 /* 1360 1363 * Secondary CPUs do not run through tsc_init(), so set up 1361 1364 * all the scale factors for all CPUs, assuming the same ··· 1389 1386 1390 1387 if (unsynchronized_tsc()) 1391 1388 mark_tsc_unstable("TSCs unsynchronized"); 1392 - else 1393 - tsc_store_and_check_tsc_adjust(true); 1394 1389 1395 1390 check_system_tsc_reliable(); 1396 1391
+7 -9
arch/x86/kernel/tsc_sync.c
··· 286 286 if (unsynchronized_tsc()) 287 287 return; 288 288 289 - if (tsc_clocksource_reliable) { 290 - if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING) 291 - pr_info( 292 - "Skipped synchronization checks as TSC is reliable.\n"); 293 - return; 294 - } 295 - 296 289 /* 297 290 * Set the maximum number of test runs to 298 291 * 1 if the CPU does not provide the TSC_ADJUST MSR ··· 373 380 int cpus = 2; 374 381 375 382 /* Also aborts if there is no TSC. */ 376 - if (unsynchronized_tsc() || tsc_clocksource_reliable) 383 + if (unsynchronized_tsc()) 377 384 return; 378 385 379 386 /* 380 387 * Store, verify and sanitize the TSC adjust register. If 381 388 * successful skip the test. 389 + * 390 + * The test is also skipped when the TSC is marked reliable. This 391 + * is true for SoCs which have no fallback clocksource. On these 392 + * SoCs the TSC is frequency synchronized, but still the TSC ADJUST 393 + * register might have been wreckaged by the BIOS.. 382 394 */ 383 - if (tsc_store_and_check_tsc_adjust(false)) { 395 + if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) { 384 396 atomic_inc(&skip_test); 385 397 return; 386 398 }
+2
arch/x86/mm/dump_pagetables.c
··· 15 15 #include <linux/debugfs.h> 16 16 #include <linux/mm.h> 17 17 #include <linux/init.h> 18 + #include <linux/sched.h> 18 19 #include <linux/seq_file.h> 19 20 20 21 #include <asm/pgtable.h> ··· 407 406 } else 408 407 note_page(m, &st, __pgprot(0), 1); 409 408 409 + cond_resched(); 410 410 start++; 411 411 } 412 412