Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_5.8_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- AMD Memory bandwidth counter width fix, by Babu Moger.

- Use the proper length type in the 32-bit truncate() syscall variant,
by Jiri Slaby.

- Reinit IA32_FEAT_CTL during wakeup to fix the case where after
resume, VMXON would #GP due to VMX not being properly enabled, by
Sean Christopherson.

- Fix a static checker warning in the resctrl code, by Dan Carpenter.

- Add a CR4 pinning mask for bits which cannot change after boot, by
Kees Cook.

- Align the start of the loop of __clear_user() to 16 bytes, to improve
performance on AMD zen1 and zen2 microarchitectures, by Matt Fleming.

* tag 'x86_urgent_for_5.8_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/asm/64: Align start of __clear_user() loop to 16-bytes
x86/cpu: Use pinning mask for CR4 bits needing to be 0
x86/resctrl: Fix a NULL vs IS_ERR() static checker warning in rdt_cdp_peer_get()
x86/cpu: Reinitialize IA32_FEAT_CTL MSR on BSP during wakeup
syscalls: Fix offset type of ksys_ftruncate()
x86/resctrl: Fix memory bandwidth counter width for AMD

+33 -21
+5
arch/x86/include/asm/cpu.h
··· 58 58 return false; 59 59 } 60 60 #endif 61 + #ifdef CONFIG_IA32_FEAT_CTL 62 + void init_ia32_feat_ctl(struct cpuinfo_x86 *c); 63 + #else 64 + static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {} 65 + #endif 61 66 #endif /* _ASM_X86_CPU_H */
+1
arch/x86/kernel/cpu/centaur.c
··· 3 3 #include <linux/sched.h> 4 4 #include <linux/sched/clock.h> 5 5 6 + #include <asm/cpu.h> 6 7 #include <asm/cpufeature.h> 7 8 #include <asm/e820/api.h> 8 9 #include <asm/mtrr.h>
+12 -12
arch/x86/kernel/cpu/common.c
··· 347 347 cr4_clear_bits(X86_CR4_UMIP); 348 348 } 349 349 350 + /* These bits should not change their value after CPU init is finished. */ 351 + static const unsigned long cr4_pinned_mask = 352 + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE; 350 353 static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning); 351 354 static unsigned long cr4_pinned_bits __ro_after_init; 352 355 ··· 374 371 375 372 void native_write_cr4(unsigned long val) 376 373 { 377 - unsigned long bits_missing = 0; 374 + unsigned long bits_changed = 0; 378 375 379 376 set_register: 380 377 asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits)); 381 378 382 379 if (static_branch_likely(&cr_pinning)) { 383 - if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) { 384 - bits_missing = ~val & cr4_pinned_bits; 385 - val |= bits_missing; 380 + if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) { 381 + bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits; 382 + val = (val & ~cr4_pinned_mask) | cr4_pinned_bits; 386 383 goto set_register; 387 384 } 388 - /* Warn after we've set the missing bits. */ 389 - WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n", 390 - bits_missing); 385 + /* Warn after we've corrected the changed bits. */ 386 + WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n", 387 + bits_changed); 391 388 } 392 389 } 393 390 #if IS_MODULE(CONFIG_LKDTM) ··· 422 419 if (boot_cpu_has(X86_FEATURE_PCID)) 423 420 cr4 |= X86_CR4_PCIDE; 424 421 if (static_branch_likely(&cr_pinning)) 425 - cr4 |= cr4_pinned_bits; 422 + cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits; 426 423 427 424 __write_cr4(cr4); 428 425 ··· 437 434 */ 438 435 static void __init setup_cr_pinning(void) 439 436 { 440 - unsigned long mask; 441 - 442 - mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP); 443 - cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask; 437 + cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask; 444 438 static_key_enable(&cr_pinning.key); 445 439 } 446 440
-4
arch/x86/kernel/cpu/cpu.h
··· 81 81 82 82 extern u64 x86_read_arch_cap_msr(void); 83 83 84 - #ifdef CONFIG_IA32_FEAT_CTL 85 - void init_ia32_feat_ctl(struct cpuinfo_x86 *c); 86 - #endif 87 - 88 84 #endif /* ARCH_X86_CPU_H */
+4 -4
arch/x86/kernel/cpu/resctrl/core.c
··· 981 981 982 982 c->x86_cache_max_rmid = ecx; 983 983 c->x86_cache_occ_scale = ebx; 984 - if (c->x86_vendor == X86_VENDOR_INTEL) 985 - c->x86_cache_mbm_width_offset = eax & 0xff; 986 - else 987 - c->x86_cache_mbm_width_offset = -1; 984 + c->x86_cache_mbm_width_offset = eax & 0xff; 985 + 986 + if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) 987 + c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; 988 988 } 989 989 } 990 990
+1
arch/x86/kernel/cpu/resctrl/internal.h
··· 37 37 #define MBA_IS_LINEAR 0x4 38 38 #define MBA_MAX_MBPS U32_MAX 39 39 #define MAX_MBA_BW_AMD 0x800 40 + #define MBM_CNTR_WIDTH_OFFSET_AMD 20 40 41 41 42 #define RMID_VAL_ERROR BIT_ULL(63) 42 43 #define RMID_VAL_UNAVAIL BIT_ULL(62)
+1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 1117 1117 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL); 1118 1118 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) { 1119 1119 _r_cdp = NULL; 1120 + _d_cdp = NULL; 1120 1121 ret = -EINVAL; 1121 1122 } 1122 1123
+1
arch/x86/kernel/cpu/zhaoxin.c
··· 2 2 #include <linux/sched.h> 3 3 #include <linux/sched/clock.h> 4 4 5 + #include <asm/cpu.h> 5 6 #include <asm/cpufeature.h> 6 7 7 8 #include "cpu.h"
+1
arch/x86/lib/usercopy_64.c
··· 24 24 asm volatile( 25 25 " testq %[size8],%[size8]\n" 26 26 " jz 4f\n" 27 + " .align 16\n" 27 28 "0: movq $0,(%[dst])\n" 28 29 " addq $8,%[dst]\n" 29 30 " decl %%ecx ; jnz 0b\n"
+6
arch/x86/power/cpu.c
··· 193 193 */ 194 194 static void notrace __restore_processor_state(struct saved_context *ctxt) 195 195 { 196 + struct cpuinfo_x86 *c; 197 + 196 198 if (ctxt->misc_enable_saved) 197 199 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); 198 200 /* ··· 265 263 mtrr_bp_restore(); 266 264 perf_restore_debug_store(); 267 265 msr_restore_context(ctxt); 266 + 267 + c = &cpu_data(smp_processor_id()); 268 + if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) 269 + init_ia32_feat_ctl(c); 268 270 } 269 271 270 272 /* Needed by apm.c */
+1 -1
include/linux/syscalls.h
··· 1360 1360 1361 1361 extern long do_sys_ftruncate(unsigned int fd, loff_t length, int small); 1362 1362 1363 - static inline long ksys_ftruncate(unsigned int fd, unsigned long length) 1363 + static inline long ksys_ftruncate(unsigned int fd, loff_t length) 1364 1364 { 1365 1365 return do_sys_ftruncate(fd, length, 1); 1366 1366 }