Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Make sure a kdump kernel with CONFIG_IMA_KEXEC enabled and booted on
an AMD SME enabled hardware properly decrypts the ima_kexec buffer
information passed to it from the previous kernel

- Fix building the kernel with Clang where a non-TLS definition of the
stack protector guard cookie leads to bogus code generation

- Clear a wrongly advertised virtualized VMLOAD/VMSAVE feature flag on
some Zen4 client systems as those insns are not supported on client

* tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm: Fix a kdump kernel failure on SME system when CONFIG_IMA_KEXEC=y
x86/stackprotector: Work around strict Clang TLS symbol requirements
x86/CPU/AMD: Clear virtualized VMLOAD/VMSAVE on Zen4 client

+42 -4
+3 -2
arch/x86/Makefile
··· 142 142 143 143 ifeq ($(CONFIG_STACKPROTECTOR),y) 144 144 ifeq ($(CONFIG_SMP),y) 145 - KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard 145 + KBUILD_CFLAGS += -mstack-protector-guard-reg=fs \ 146 + -mstack-protector-guard-symbol=__ref_stack_chk_guard 146 147 else 147 - KBUILD_CFLAGS += -mstack-protector-guard=global 148 + KBUILD_CFLAGS += -mstack-protector-guard=global 148 149 endif 149 150 endif 150 151 else
+16
arch/x86/entry/entry.S
··· 51 51 .popsection 52 52 53 53 THUNK warn_thunk_thunk, __warn_thunk 54 + 55 + #ifndef CONFIG_X86_64 56 + /* 57 + * Clang's implementation of TLS stack cookies requires the variable in 58 + * question to be a TLS variable. If the variable happens to be defined as an 59 + * ordinary variable with external linkage in the same compilation unit (which 60 + * amounts to the whole of vmlinux with LTO enabled), Clang will drop the 61 + * segment register prefix from the references, resulting in broken code. Work 62 + * around this by avoiding the symbol used in -mstack-protector-guard-symbol= 63 + * entirely in the C code, and use an alias emitted by the linker script 64 + * instead. 65 + */ 66 + #ifdef CONFIG_STACKPROTECTOR 67 + EXPORT_SYMBOL(__ref_stack_chk_guard); 68 + #endif 69 + #endif
+3
arch/x86/include/asm/asm-prototypes.h
··· 20 20 extern void cmpxchg8b_emu(void); 21 21 #endif 22 22 23 + #if defined(__GENKSYMS__) && defined(CONFIG_STACKPROTECTOR) 24 + extern unsigned long __ref_stack_chk_guard; 25 + #endif
+11
arch/x86/kernel/cpu/amd.c
··· 924 924 { 925 925 if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) 926 926 msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); 927 + 928 + /* 929 + * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE 930 + * in some BIOS versions but they can lead to random host reboots. 931 + */ 932 + switch (c->x86_model) { 933 + case 0x18 ... 0x1f: 934 + case 0x60 ... 0x7f: 935 + clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD); 936 + break; 937 + } 927 938 } 928 939 929 940 static void init_amd_zen5(struct cpuinfo_x86 *c)
+2
arch/x86/kernel/cpu/common.c
··· 2089 2089 2090 2090 #ifdef CONFIG_STACKPROTECTOR 2091 2091 DEFINE_PER_CPU(unsigned long, __stack_chk_guard); 2092 + #ifndef CONFIG_SMP 2092 2093 EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); 2094 + #endif 2093 2095 #endif 2094 2096 2095 2097 #endif /* CONFIG_X86_64 */
+3
arch/x86/kernel/vmlinux.lds.S
··· 491 491 . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), 492 492 "kernel image bigger than KERNEL_IMAGE_SIZE"); 493 493 494 + /* needed for Clang - see arch/x86/entry/entry.S */ 495 + PROVIDE(__ref_stack_chk_guard = __stack_chk_guard); 496 + 494 497 #ifdef CONFIG_X86_64 495 498 /* 496 499 * Per-cpu symbols which need to be offset from __per_cpu_load
+4 -2
arch/x86/mm/ioremap.c
··· 656 656 paddr_next = data->next; 657 657 len = data->len; 658 658 659 - if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { 659 + if ((phys_addr > paddr) && 660 + (phys_addr < (paddr + sizeof(struct setup_data) + len))) { 660 661 memunmap(data); 661 662 return true; 662 663 } ··· 719 718 paddr_next = data->next; 720 719 len = data->len; 721 720 722 - if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { 721 + if ((phys_addr > paddr) && 722 + (phys_addr < (paddr + sizeof(struct setup_data) + len))) { 723 723 early_memunmap(data, sizeof(*data)); 724 724 return true; 725 725 }