Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v6.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Reset the why-the-system-rebooted register on AMD to avoid stale bits
remaining from previous boots

- Add a missing barrier in the TLB flushing code to prevent erroneously
not flushing a TLB generation

- Make sure cpa_flush() does not overshoot when computing the end range
of a flush region

- Fix resctrl bandwidth counting on AMD systems when the amount of
monitoring groups created exceeds the number the hardware can track

* tag 'x86_urgent_for_v6.18_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/CPU/AMD: Prevent reset reasons from being retained across reboot
x86/mm: Fix SMP ordering in switch_mm_irqs_off()
x86/mm: Fix overflow in __cpa_addr()
x86/resctrl: Fix miscount of bandwidth event when reactivating previously unavailable RMID

+47 -9
+14 -2
arch/x86/kernel/cpu/amd.c
··· 1355 1355 return 0; 1356 1356 1357 1357 value = ioread32(addr); 1358 - iounmap(addr); 1359 1358 1360 1359 /* Value with "all bits set" is an error response and should be ignored. */ 1361 - if (value == U32_MAX) 1360 + if (value == U32_MAX) { 1361 + iounmap(addr); 1362 1362 return 0; 1363 + } 1364 + 1365 + /* 1366 + * Clear all reason bits so they won't be retained if the next reset 1367 + * does not update the register. Besides, some bits are never cleared by 1368 + * hardware so it's software's responsibility to clear them. 1369 + * 1370 + * Writing the value back effectively clears all reason bits as they are 1371 + * write-1-to-clear. 1372 + */ 1373 + iowrite32(value, addr); 1374 + iounmap(addr); 1363 1375 1364 1376 for (i = 0; i < ARRAY_SIZE(s5_reset_reason_txt); i++) { 1365 1377 if (!(value & BIT(i)))
+10 -4
arch/x86/kernel/cpu/resctrl/monitor.c
··· 242 242 u32 unused, u32 rmid, enum resctrl_event_id eventid, 243 243 u64 *val, void *ignored) 244 244 { 245 + struct rdt_hw_mon_domain *hw_dom = resctrl_to_arch_mon_dom(d); 245 246 int cpu = cpumask_any(&d->hdr.cpu_mask); 247 + struct arch_mbm_state *am; 246 248 u64 msr_val; 247 249 u32 prmid; 248 250 int ret; ··· 253 251 254 252 prmid = logical_rmid_to_physical_rmid(cpu, rmid); 255 253 ret = __rmid_read_phys(prmid, eventid, &msr_val); 256 - if (ret) 257 - return ret; 258 254 259 - *val = get_corrected_val(r, d, rmid, eventid, msr_val); 255 + if (!ret) { 256 + *val = get_corrected_val(r, d, rmid, eventid, msr_val); 257 + } else if (ret == -EINVAL) { 258 + am = get_arch_mbm_state(hw_dom, rmid, eventid); 259 + if (am) 260 + am->prev_msr = 0; 261 + } 260 262 261 - return 0; 263 + return ret; 262 264 } 263 265 264 266 static int __cntr_id_read(u32 cntr_id, u64 *val)
+1 -1
arch/x86/mm/pat/set_memory.c
··· 446 446 } 447 447 448 448 start = fix_addr(__cpa_addr(cpa, 0)); 449 - end = fix_addr(__cpa_addr(cpa, cpa->numpages)); 449 + end = start + cpa->numpages * PAGE_SIZE; 450 450 if (cpa->force_flush_all) 451 451 end = TLB_FLUSH_ALL; 452 452
+22 -2
arch/x86/mm/tlb.c
··· 911 911 * CR3 and cpu_tlbstate.loaded_mm are not all in sync. 912 912 */ 913 913 this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); 914 - barrier(); 915 914 916 - /* Start receiving IPIs and then read tlb_gen (and LAM below) */ 915 + /* 916 + * Make sure this CPU is set in mm_cpumask() such that we'll 917 + * receive invalidation IPIs. 918 + * 919 + * Rely on the smp_mb() implied by cpumask_set_cpu()'s atomic 920 + * operation, or explicitly provide one. Such that: 921 + * 922 + * switch_mm_irqs_off() flush_tlb_mm_range() 923 + * smp_store_release(loaded_mm, SWITCHING); atomic64_inc_return(tlb_gen) 924 + * smp_mb(); // here // smp_mb() implied 925 + * atomic64_read(tlb_gen); this_cpu_read(loaded_mm); 926 + * 927 + * we properly order against flush_tlb_mm_range(), where the 928 + * loaded_mm load can happen in mative_flush_tlb_multi() -> 929 + * should_flush_tlb(). 930 + * 931 + * This way switch_mm() must see the new tlb_gen or 932 + * flush_tlb_mm_range() must see the new loaded_mm, or both. 933 + */ 917 934 if (next != &init_mm && !cpumask_test_cpu(cpu, mm_cpumask(next))) 918 935 cpumask_set_cpu(cpu, mm_cpumask(next)); 936 + else 937 + smp_mb(); 938 + 919 939 next_tlb_gen = atomic64_read(&next->context.tlb_gen); 920 940 921 941 ns = choose_new_asid(next, next_tlb_gen);