Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_urgent_for_v6.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

- Remove a transitional asm/cpuid.h header which was added only as a
fallback during cpuid helpers reorg

- Initialize reserved fields in the SVSM page validation calls
structure to zero in order to allow for future structure extensions

- Have the sev-guest driver's buffers used in encryption operations be
in linear mapping space as the encryption operation can be offloaded
to an accelerator

- Have a read-only MSR write when in an AMD SNP guest trap to the
hypervisor as it is usually done. This makes the guest user
experience better by simply raising a #GP instead of terminating said
guest

- Do not output AVX512 elapsed time for kernel threads because the data
is wrong and fix a NULL pointer dereferencing in the process

- Adjust the SRSO mitigation selection to the new attack vectors

* tag 'x86_urgent_for_v6.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/cpuid: Remove transitional <asm/cpuid.h> header
x86/sev: Ensure SVSM reserved fields in a page validation entry are initialized to zero
virt: sev-guest: Satisfy linear mapping requirement in get_derived_key()
x86/sev: Improve handling of writes to intercepted TSC MSRs
x86/fpu: Fix NULL dereference in avx512_status()
x86/bugs: Select best SRSO mitigation

+54 -51
+1 -1
Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
··· 214 214 Spectre_v2 X X 215 215 Spectre_v2_user X X * (Note 1) 216 216 SRBDS X X X X 217 - SRSO X X 217 + SRSO X X X X 218 218 SSB (Note 4) 219 219 TAA X X X X * (Note 2) 220 220 TSA X X X X
+1
arch/x86/boot/startup/sev-shared.c
··· 785 785 pc->entry[0].page_size = RMP_PG_SIZE_4K; 786 786 pc->entry[0].action = validate; 787 787 pc->entry[0].ignore_cf = 0; 788 + pc->entry[0].rsvd = 0; 788 789 pc->entry[0].pfn = paddr >> PAGE_SHIFT; 789 790 790 791 /* Protocol 0, Call ID 1 */
+2
arch/x86/coco/sev/core.c
··· 227 227 pe->page_size = RMP_PG_SIZE_4K; 228 228 pe->action = action; 229 229 pe->ignore_cf = 0; 230 + pe->rsvd = 0; 230 231 pe->pfn = pfn; 231 232 232 233 pe++; ··· 258 257 pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; 259 258 pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; 260 259 pe->ignore_cf = 0; 260 + pe->rsvd = 0; 261 261 pe->pfn = e->gfn; 262 262 263 263 pe++;
+17 -16
arch/x86/coco/sev/vc-handle.c
··· 371 371 * executing with Secure TSC enabled, so special handling is required for 372 372 * accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ. 373 373 */ 374 - static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write) 374 + static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write) 375 375 { 376 + struct pt_regs *regs = ctxt->regs; 376 377 u64 tsc; 377 378 378 379 /* 379 - * GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled. 380 - * Terminate the SNP guest when the interception is enabled. 380 + * Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to 381 + * return undefined values, and GUEST_TSC_FREQ is read-only. Generate 382 + * a #GP on all writes. 383 + */ 384 + if (write) { 385 + ctxt->fi.vector = X86_TRAP_GP; 386 + ctxt->fi.error_code = 0; 387 + return ES_EXCEPTION; 388 + } 389 + 390 + /* 391 + * GUEST_TSC_FREQ read should not be intercepted when Secure TSC is 392 + * enabled. Terminate the guest if a read is attempted. 381 393 */ 382 394 if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ) 383 395 return ES_VMM_ERROR; 384 396 385 - /* 386 - * Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC 387 - * to return undefined values, so ignore all writes. 388 - * 389 - * Reads: Reads of MSR_IA32_TSC should return the current TSC value, use 390 - * the value returned by rdtsc_ordered(). 391 - */ 392 - if (write) { 393 - WARN_ONCE(1, "TSC MSR writes are verboten!\n"); 394 - return ES_OK; 395 - } 396 - 397 + /* Reads of MSR_IA32_TSC should return the current TSC value. */ 397 398 tsc = rdtsc_ordered(); 398 399 regs->ax = lower_32_bits(tsc); 399 400 regs->dx = upper_32_bits(tsc); ··· 417 416 case MSR_IA32_TSC: 418 417 case MSR_AMD64_GUEST_TSC_FREQ: 419 418 if (sev_status & MSR_AMD64_SNP_SECURE_TSC) 420 - return __vc_handle_secure_tsc_msrs(regs, write); 419 + return __vc_handle_secure_tsc_msrs(ctxt, write); 421 420 break; 422 421 default: 423 422 break;
-8
arch/x86/include/asm/cpuid.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 2 - 3 - #ifndef _ASM_X86_CPUID_H 4 - #define _ASM_X86_CPUID_H 5 - 6 - #include <asm/cpuid/api.h> 7 - 8 - #endif /* _ASM_X86_CPUID_H */
+11 -2
arch/x86/kernel/cpu/bugs.c
··· 386 386 387 387 case X86_BUG_SPECTRE_V2: 388 388 case X86_BUG_RETBLEED: 389 - case X86_BUG_SRSO: 390 389 case X86_BUG_L1TF: 391 390 case X86_BUG_ITS: 392 391 return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) || ··· 3183 3184 } 3184 3185 3185 3186 if (srso_mitigation == SRSO_MITIGATION_AUTO) { 3186 - if (should_mitigate_vuln(X86_BUG_SRSO)) { 3187 + /* 3188 + * Use safe-RET if user->kernel or guest->host protection is 3189 + * required. Otherwise the 'microcode' mitigation is sufficient 3190 + * to protect the user->user and guest->guest vectors. 3191 + */ 3192 + if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) || 3193 + (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) && 3194 + !boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) { 3187 3195 srso_mitigation = SRSO_MITIGATION_SAFE_RET; 3196 + } else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) || 3197 + cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) { 3198 + srso_mitigation = SRSO_MITIGATION_MICROCODE; 3188 3199 } else { 3189 3200 srso_mitigation = SRSO_MITIGATION_NONE; 3190 3201 return;
+10 -9
arch/x86/kernel/fpu/xstate.c
··· 1881 1881 #ifdef CONFIG_PROC_PID_ARCH_STATUS 1882 1882 /* 1883 1883 * Report the amount of time elapsed in millisecond since last AVX512 1884 - * use in the task. 1884 + * use in the task. Report -1 if no AVX-512 usage. 1885 1885 */ 1886 1886 static void avx512_status(struct seq_file *m, struct task_struct *task) 1887 1887 { 1888 - unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); 1889 - long delta; 1888 + unsigned long timestamp; 1889 + long delta = -1; 1890 1890 1891 - if (!timestamp) { 1892 - /* 1893 - * Report -1 if no AVX512 usage 1894 - */ 1895 - delta = -1; 1896 - } else { 1891 + /* AVX-512 usage is not tracked for kernel threads. Don't report anything. */ 1892 + if (task->flags & (PF_KTHREAD | PF_USER_WORKER)) 1893 + return; 1894 + 1895 + timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp); 1896 + 1897 + if (timestamp) { 1897 1898 delta = (long)(jiffies - timestamp); 1898 1899 /* 1899 1900 * Cap to LONG_MAX if time difference > LONG_MAX
+12 -15
drivers/virt/coco/sev-guest/sev-guest.c
··· 116 116 117 117 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) 118 118 { 119 + struct snp_derived_key_resp *derived_key_resp __free(kfree) = NULL; 119 120 struct snp_derived_key_req *derived_key_req __free(kfree) = NULL; 120 - struct snp_derived_key_resp derived_key_resp = {0}; 121 121 struct snp_msg_desc *mdesc = snp_dev->msg_desc; 122 122 struct snp_guest_req req = {}; 123 123 int rc, resp_len; 124 - /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ 125 - u8 buf[64 + 16]; 126 124 127 125 if (!arg->req_data || !arg->resp_data) 128 126 return -EINVAL; ··· 130 132 * response payload. Make sure that it has enough space to cover the 131 133 * authtag. 132 134 */ 133 - resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; 134 - if (sizeof(buf) < resp_len) 135 + resp_len = sizeof(derived_key_resp->data) + mdesc->ctx->authsize; 136 + derived_key_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); 137 + if (!derived_key_resp) 135 138 return -ENOMEM; 136 139 137 140 derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT); ··· 148 149 req.vmpck_id = mdesc->vmpck_id; 149 150 req.req_buf = derived_key_req; 150 151 req.req_sz = sizeof(*derived_key_req); 151 - req.resp_buf = buf; 152 + req.resp_buf = derived_key_resp; 152 153 req.resp_sz = resp_len; 153 154 req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; 154 155 155 156 rc = snp_send_guest_request(mdesc, &req); 156 157 arg->exitinfo2 = req.exitinfo2; 157 - if (rc) 158 - return rc; 159 - 160 - memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data)); 161 - if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp, 162 - sizeof(derived_key_resp))) 163 - rc = -EFAULT; 158 + if (!rc) { 159 + if (copy_to_user((void __user *)arg->resp_data, derived_key_resp, 160 + sizeof(derived_key_resp->data))) 161 + rc = -EFAULT; 162 + } 164 163 165 164 /* The response buffer contains the sensitive data, explicitly clear it. */ 166 - memzero_explicit(buf, sizeof(buf)); 167 - memzero_explicit(&derived_key_resp, sizeof(derived_key_resp)); 165 + memzero_explicit(derived_key_resp, sizeof(*derived_key_resp)); 166 + 168 167 return rc; 169 168 } 170 169