Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86-urgent-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc x86 fixes from Ingo Molnar:

- Fix PAT on Xen, which caused i915 driver failures

- Fix compat INT 80 entry crash on Xen PV guests

- Fix 'MMIO Stale Data' mitigation status reporting on older Intel CPUs

- Fix RSB stuffing regressions

- Fix ORC unwinding on ftrace trampolines

- Add Intel Raptor Lake CPU model number

- Fix (work around) a SEV-SNP bootloader bug providing bogus values in
boot_params->cc_blob_address, by ignoring the value on !SEV-SNP
bootups.

- Fix SEV-SNP early boot failure

- Fix the objtool list of noreturn functions and annotate snp_abort(),
which bug confused objtool on gcc-12.

- Fix the documentation for retbleed

* tag 'x86-urgent-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
Documentation/ABI: Mention retbleed vulnerability info file for sysfs
x86/sev: Mark snp_abort() noreturn
x86/sev: Don't use cc_platform_has() for early SEV-SNP calls
x86/boot: Don't propagate uninitialized boot_params->cc_blob_address
x86/cpu: Add new Raptor Lake CPU model number
x86/unwind/orc: Unwind ftrace trampolines with correct ORC entry
x86/nospec: Fix i386 RSB stuffing
x86/nospec: Unwreck the RSB stuffing
x86/bugs: Add "unknown" reporting for MMIO Stale Data
x86/entry: Fix entry_INT80_compat for Xen PV guests
x86/PAT: Have pat_enabled() properly reflect state when running on Xen

+188 -93
+1
Documentation/ABI/testing/sysfs-devices-system-cpu
··· 523 523 /sys/devices/system/cpu/vulnerabilities/tsx_async_abort 524 524 /sys/devices/system/cpu/vulnerabilities/itlb_multihit 525 525 /sys/devices/system/cpu/vulnerabilities/mmio_stale_data 526 + /sys/devices/system/cpu/vulnerabilities/retbleed 526 527 Date: January 2018 527 528 Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> 528 529 Description: Information about CPU vulnerabilities
+14
Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
··· 230 230 * - 'Mitigation: Clear CPU buffers' 231 231 - The processor is vulnerable and the CPU buffer clearing mitigation is 232 232 enabled. 233 + * - 'Unknown: No mitigations' 234 + - The processor vulnerability status is unknown because it is 235 + out of Servicing period. Mitigation is not attempted. 236 + 237 + Definitions: 238 + ------------ 239 + 240 + Servicing period: The process of providing functional and security updates to 241 + Intel processors or platforms, utilizing the Intel Platform Update (IPU) 242 + process or other similar mechanisms. 243 + 244 + End of Servicing Updates (ESU): ESU is the date at which Intel will no 245 + longer provide Servicing, such as through IPU or other similar update 246 + processes. ESU dates will typically be aligned to end of quarter. 233 247 234 248 If the processor is vulnerable then the following information is appended to 235 249 the above information:
+11 -1
arch/x86/boot/compressed/misc.h
··· 132 132 void snp_set_page_shared(unsigned long paddr); 133 133 void sev_prep_identity_maps(unsigned long top_level_pgt); 134 134 #else 135 - static inline void sev_enable(struct boot_params *bp) { } 135 + static inline void sev_enable(struct boot_params *bp) 136 + { 137 + /* 138 + * bp->cc_blob_address should only be set by boot/compressed kernel. 139 + * Initialize it to 0 unconditionally (thus here in this stub too) to 140 + * ensure that uninitialized values from buggy bootloaders aren't 141 + * propagated. 142 + */ 143 + if (bp) 144 + bp->cc_blob_address = 0; 145 + } 136 146 static inline void sev_es_shutdown_ghcb(void) { } 137 147 static inline bool sev_es_check_ghcb_fault(unsigned long address) 138 148 {
+8
arch/x86/boot/compressed/sev.c
··· 277 277 bool snp; 278 278 279 279 /* 280 + * bp->cc_blob_address should only be set by boot/compressed kernel. 281 + * Initialize it to 0 to ensure that uninitialized values from 282 + * buggy bootloaders aren't propagated. 283 + */ 284 + if (bp) 285 + bp->cc_blob_address = 0; 286 + 287 + /* 280 288 * Setup/preliminary detection of SNP. This will be sanity-checked 281 289 * against CPUID/MSR values later. 282 290 */
+1 -1
arch/x86/entry/entry_64_compat.S
··· 311 311 * Interrupts are off on entry. 312 312 */ 313 313 ASM_CLAC /* Do this early to minimize exposure */ 314 - SWAPGS 314 + ALTERNATIVE "swapgs", "", X86_FEATURE_XENPV 315 315 316 316 /* 317 317 * User tracing code (ptrace or signal handlers) might assume that
+3 -2
arch/x86/include/asm/cpufeatures.h
··· 457 457 #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */ 458 458 #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */ 459 459 #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */ 460 - #define X86_BUG_RETBLEED X86_BUG(26) /* CPU is affected by RETBleed */ 461 - #define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ 460 + #define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */ 461 + #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ 462 + #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ 462 463 463 464 #endif /* _ASM_X86_CPUFEATURES_H */
+2
arch/x86/include/asm/intel-family.h
··· 27 27 * _X - regular server parts 28 28 * _D - micro server parts 29 29 * _N,_P - other mobile parts 30 + * _S - other client parts 30 31 * 31 32 * Historical OPTDIFFs: 32 33 * ··· 113 112 114 113 #define INTEL_FAM6_RAPTORLAKE 0xB7 115 114 #define INTEL_FAM6_RAPTORLAKE_P 0xBA 115 + #define INTEL_FAM6_RAPTORLAKE_S 0xBF 116 116 117 117 /* "Small Core" Processors (Atom) */ 118 118
+51 -41
arch/x86/include/asm/nospec-branch.h
··· 35 35 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ 36 36 37 37 /* 38 + * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. 39 + */ 40 + #define __FILL_RETURN_SLOT \ 41 + ANNOTATE_INTRA_FUNCTION_CALL; \ 42 + call 772f; \ 43 + int3; \ 44 + 772: 45 + 46 + /* 47 + * Stuff the entire RSB. 48 + * 38 49 * Google experimented with loop-unrolling and this turned out to be 39 50 * the optimal version - two calls, each with their own speculation 40 51 * trap should their return address end up getting used, in a loop. 41 52 */ 42 - #define __FILL_RETURN_BUFFER(reg, nr, sp) \ 43 - mov $(nr/2), reg; \ 44 - 771: \ 45 - ANNOTATE_INTRA_FUNCTION_CALL; \ 46 - call 772f; \ 47 - 773: /* speculation trap */ \ 48 - UNWIND_HINT_EMPTY; \ 49 - pause; \ 50 - lfence; \ 51 - jmp 773b; \ 52 - 772: \ 53 - ANNOTATE_INTRA_FUNCTION_CALL; \ 54 - call 774f; \ 55 - 775: /* speculation trap */ \ 56 - UNWIND_HINT_EMPTY; \ 57 - pause; \ 58 - lfence; \ 59 - jmp 775b; \ 60 - 774: \ 61 - add $(BITS_PER_LONG/8) * 2, sp; \ 62 - dec reg; \ 63 - jnz 771b; \ 64 - /* barrier for jnz misprediction */ \ 53 + #ifdef CONFIG_X86_64 54 + #define __FILL_RETURN_BUFFER(reg, nr) \ 55 + mov $(nr/2), reg; \ 56 + 771: \ 57 + __FILL_RETURN_SLOT \ 58 + __FILL_RETURN_SLOT \ 59 + add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ 60 + dec reg; \ 61 + jnz 771b; \ 62 + /* barrier for jnz misprediction */ \ 63 + lfence; 64 + #else 65 + /* 66 + * i386 doesn't unconditionally have LFENCE, as such it can't 67 + * do a loop. 68 + */ 69 + #define __FILL_RETURN_BUFFER(reg, nr) \ 70 + .rept nr; \ 71 + __FILL_RETURN_SLOT; \ 72 + .endr; \ 73 + add $(BITS_PER_LONG/8) * nr, %_ASM_SP; 74 + #endif 75 + 76 + /* 77 + * Stuff a single RSB slot. 78 + * 79 + * To mitigate Post-Barrier RSB speculation, one CALL instruction must be 80 + * forced to retire before letting a RET instruction execute. 81 + * 82 + * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed 83 + * before this point. 84 + */ 85 + #define __FILL_ONE_RETURN \ 86 + __FILL_RETURN_SLOT \ 87 + add $(BITS_PER_LONG/8), %_ASM_SP; \ 65 88 lfence; 66 89 67 90 #ifdef __ASSEMBLY__ ··· 155 132 #endif 156 133 .endm 157 134 158 - .macro ISSUE_UNBALANCED_RET_GUARD 159 - ANNOTATE_INTRA_FUNCTION_CALL 160 - call .Lunbalanced_ret_guard_\@ 161 - int3 162 - .Lunbalanced_ret_guard_\@: 163 - add $(BITS_PER_LONG/8), %_ASM_SP 164 - lfence 165 - .endm 166 - 167 135 /* 168 136 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP 169 137 * monstrosity above, manually. 170 138 */ 171 - .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2 172 - .ifb \ftr2 173 - ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr 174 - .else 175 - ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2 176 - .endif 177 - __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) 178 - .Lunbalanced_\@: 179 - ISSUE_UNBALANCED_RET_GUARD 139 + .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) 140 + ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ 141 + __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ 142 + __stringify(__FILL_ONE_RETURN), \ftr2 143 + 180 144 .Lskip_rsb_\@: 181 145 .endm 182 146
+1 -1
arch/x86/include/asm/sev.h
··· 195 195 void snp_set_memory_private(unsigned long vaddr, unsigned int npages); 196 196 void snp_set_wakeup_secondary_cpu(void); 197 197 bool snp_init(struct boot_params *bp); 198 - void snp_abort(void); 198 + void __init __noreturn snp_abort(void); 199 199 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, unsigned long *fw_err); 200 200 #else 201 201 static inline void sev_es_ist_enter(struct pt_regs *regs) { }
+12 -2
arch/x86/kernel/cpu/bugs.c
··· 433 433 u64 ia32_cap; 434 434 435 435 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || 436 - cpu_mitigations_off()) { 436 + boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) || 437 + cpu_mitigations_off()) { 437 438 mmio_mitigation = MMIO_MITIGATION_OFF; 438 439 return; 439 440 } ··· 539 538 pr_info("TAA: %s\n", taa_strings[taa_mitigation]); 540 539 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) 541 540 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]); 541 + else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 542 + pr_info("MMIO Stale Data: Unknown: No mitigations\n"); 542 543 } 543 544 544 545 static void __init md_clear_select_mitigation(void) ··· 2278 2275 2279 2276 static ssize_t mmio_stale_data_show_state(char *buf) 2280 2277 { 2278 + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2279 + return sysfs_emit(buf, "Unknown: No mitigations\n"); 2280 + 2281 2281 if (mmio_mitigation == MMIO_MITIGATION_OFF) 2282 2282 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); 2283 2283 ··· 2427 2421 return srbds_show_state(buf); 2428 2422 2429 2423 case X86_BUG_MMIO_STALE_DATA: 2424 + case X86_BUG_MMIO_UNKNOWN: 2430 2425 return mmio_stale_data_show_state(buf); 2431 2426 2432 2427 case X86_BUG_RETBLEED: ··· 2487 2480 2488 2481 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) 2489 2482 { 2490 - return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 2483 + if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN)) 2484 + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN); 2485 + else 2486 + return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); 2491 2487 } 2492 2488 2493 2489 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
+27 -15
arch/x86/kernel/cpu/common.c
··· 1135 1135 #define NO_SWAPGS BIT(6) 1136 1136 #define NO_ITLB_MULTIHIT BIT(7) 1137 1137 #define NO_SPECTRE_V2 BIT(8) 1138 - #define NO_EIBRS_PBRSB BIT(9) 1138 + #define NO_MMIO BIT(9) 1139 + #define NO_EIBRS_PBRSB BIT(10) 1139 1140 1140 1141 #define VULNWL(vendor, family, model, whitelist) \ 1141 1142 X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist) ··· 1159 1158 VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION), 1160 1159 1161 1160 /* Intel Family 6 */ 1161 + VULNWL_INTEL(TIGERLAKE, NO_MMIO), 1162 + VULNWL_INTEL(TIGERLAKE_L, NO_MMIO), 1163 + VULNWL_INTEL(ALDERLAKE, NO_MMIO), 1164 + VULNWL_INTEL(ALDERLAKE_L, NO_MMIO), 1165 + 1162 1166 VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT), 1163 1167 VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT), 1164 1168 VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT), ··· 1182 1176 VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT), 1183 1177 VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1184 1178 1185 - VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1186 - VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT), 1187 - VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), 1179 + VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1180 + VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1181 + VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB), 1188 1182 1189 1183 /* 1190 1184 * Technically, swapgs isn't serializing on AMD (despite it previously ··· 1199 1193 VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB), 1200 1194 1201 1195 /* AMD Family 0xf - 0x12 */ 1202 - VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1203 - VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1204 - VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1205 - VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1196 + VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1197 + VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1198 + VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1199 + VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1206 1200 1207 1201 /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */ 1208 - VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1209 - VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT), 1202 + VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1203 + VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO), 1210 1204 1211 1205 /* Zhaoxin Family 7 */ 1212 - VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), 1213 - VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS), 1206 + VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), 1207 + VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO), 1214 1208 {} 1215 1209 }; 1216 1210 ··· 1364 1358 * Affected CPU list is generally enough to enumerate the vulnerability, 1365 1359 * but for virtualization case check for ARCH_CAP MSR bits also, VMM may 1366 1360 * not want the guest to enumerate the bug. 1361 + * 1362 + * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist, 1363 + * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits. 1367 1364 */ 1368 - if (cpu_matches(cpu_vuln_blacklist, MMIO) && 1369 - !arch_cap_mmio_immune(ia32_cap)) 1370 - setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); 1365 + if (!arch_cap_mmio_immune(ia32_cap)) { 1366 + if (cpu_matches(cpu_vuln_blacklist, MMIO)) 1367 + setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA); 1368 + else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO)) 1369 + setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN); 1370 + } 1371 1371 1372 1372 if (!cpu_has(c, X86_FEATURE_BTC_NO)) { 1373 1373 if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+15 -3
arch/x86/kernel/sev.c
··· 701 701 void __init early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, 702 702 unsigned int npages) 703 703 { 704 - if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 704 + /* 705 + * This can be invoked in early boot while running identity mapped, so 706 + * use an open coded check for SNP instead of using cc_platform_has(). 707 + * This eliminates worries about jump tables or checking boot_cpu_data 708 + * in the cc_platform_has() function. 709 + */ 710 + if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) 705 711 return; 706 712 707 713 /* ··· 723 717 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr, 724 718 unsigned int npages) 725 719 { 726 - if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 720 + /* 721 + * This can be invoked in early boot while running identity mapped, so 722 + * use an open coded check for SNP instead of using cc_platform_has(). 723 + * This eliminates worries about jump tables or checking boot_cpu_data 724 + * in the cc_platform_has() function. 725 + */ 726 + if (!(sev_status & MSR_AMD64_SEV_SNP_ENABLED)) 727 727 return; 728 728 729 729 /* Invalidate the memory pages before they are marked shared in the RMP table. */ ··· 2112 2100 return true; 2113 2101 } 2114 2102 2115 - void __init snp_abort(void) 2103 + void __init __noreturn snp_abort(void) 2116 2104 { 2117 2105 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED); 2118 2106 }
+10 -5
arch/x86/kernel/unwind_orc.c
··· 93 93 static struct orc_entry *orc_ftrace_find(unsigned long ip) 94 94 { 95 95 struct ftrace_ops *ops; 96 - unsigned long caller; 96 + unsigned long tramp_addr, offset; 97 97 98 98 ops = ftrace_ops_trampoline(ip); 99 99 if (!ops) 100 100 return NULL; 101 101 102 + /* Set tramp_addr to the start of the code copied by the trampoline */ 102 103 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) 103 - caller = (unsigned long)ftrace_regs_call; 104 + tramp_addr = (unsigned long)ftrace_regs_caller; 104 105 else 105 - caller = (unsigned long)ftrace_call; 106 + tramp_addr = (unsigned long)ftrace_caller; 107 + 108 + /* Now place tramp_addr to the location within the trampoline ip is at */ 109 + offset = ip - ops->trampoline; 110 + tramp_addr += offset; 106 111 107 112 /* Prevent unlikely recursion */ 108 - if (ip == caller) 113 + if (ip == tramp_addr) 109 114 return NULL; 110 115 111 - return orc_find(caller); 116 + return orc_find(tramp_addr); 112 117 } 113 118 #else 114 119 static struct orc_entry *orc_ftrace_find(unsigned long ip)
+9 -1
arch/x86/mm/pat/memtype.c
··· 62 62 63 63 static bool __read_mostly pat_bp_initialized; 64 64 static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT); 65 + static bool __initdata pat_force_disabled = !IS_ENABLED(CONFIG_X86_PAT); 65 66 static bool __read_mostly pat_bp_enabled; 66 67 static bool __read_mostly pat_cm_initialized; 67 68 ··· 87 86 static int __init nopat(char *str) 88 87 { 89 88 pat_disable("PAT support disabled via boot option."); 89 + pat_force_disabled = true; 90 90 return 0; 91 91 } 92 92 early_param("nopat", nopat); ··· 274 272 wrmsrl(MSR_IA32_CR_PAT, pat); 275 273 } 276 274 277 - void init_cache_modes(void) 275 + void __init init_cache_modes(void) 278 276 { 279 277 u64 pat = 0; 280 278 ··· 315 313 */ 316 314 pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) | 317 315 PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC); 316 + } else if (!pat_force_disabled && cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) { 317 + /* 318 + * Clearly PAT is enabled underneath. Allow pat_enabled() to 319 + * reflect this. 320 + */ 321 + pat_bp_enabled = true; 318 322 } 319 323 320 324 __init_cache_modes(pat);
+23 -21
tools/objtool/check.c
··· 162 162 163 163 /* 164 164 * Unfortunately these have to be hard coded because the noreturn 165 - * attribute isn't provided in ELF data. 165 + * attribute isn't provided in ELF data. Keep 'em sorted. 166 166 */ 167 167 static const char * const global_noreturns[] = { 168 - "__stack_chk_fail", 169 - "panic", 170 - "do_exit", 171 - "do_task_dead", 172 - "kthread_exit", 173 - "make_task_dead", 174 - "__module_put_and_kthread_exit", 175 - "kthread_complete_and_exit", 176 - "__reiserfs_panic", 177 - "lbug_with_loc", 178 - "fortify_panic", 179 - "usercopy_abort", 180 - "machine_real_restart", 181 - "rewind_stack_and_make_dead", 182 - "kunit_try_catch_throw", 183 - "xen_start_kernel", 184 - "cpu_bringup_and_idle", 185 - "do_group_exit", 186 - "stop_this_cpu", 187 168 "__invalid_creds", 188 - "cpu_startup_entry", 169 + "__module_put_and_kthread_exit", 170 + "__reiserfs_panic", 171 + "__stack_chk_fail", 189 172 "__ubsan_handle_builtin_unreachable", 173 + "cpu_bringup_and_idle", 174 + "cpu_startup_entry", 175 + "do_exit", 176 + "do_group_exit", 177 + "do_task_dead", 190 178 "ex_handler_msr_mce", 179 + "fortify_panic", 180 + "kthread_complete_and_exit", 181 + "kthread_exit", 182 + "kunit_try_catch_throw", 183 + "lbug_with_loc", 184 + "machine_real_restart", 185 + "make_task_dead", 186 + "panic", 187 + "rewind_stack_and_make_dead", 188 + "sev_es_terminate", 189 + "snp_abort", 190 + "stop_this_cpu", 191 + "usercopy_abort", 192 + "xen_start_kernel", 191 193 }; 192 194 193 195 if (!func)