Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'x86_sev_for_v7.0_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 SEV updates from Borislav Petkov:

- Make the SEV internal header really internal and carve out the
SVSM-specific code into a separate compilation unit, along with other
cleanups and fixups

[ TLA translation service: 'SEV' is AMD's 'Secure Encrypted
Virtualization' and SVSM is an ETLA ('Enhanced TLA') for 'Secure
VM Service Module'.

Some of us have trouble keeping track of this all and need all the
help we can get ]

* tag 'x86_sev_for_v7.0_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/sev: Don't emit BSS_DECRYPTED section unless it is in use
x86/sev: Use kfree_sensitive() when freeing a SNP message descriptor
x86/sev: Rename sev_es_ghcb_handle_msr() to __vc_handle_msr()
x86/sev: Carve out the SVSM code into a separate compilation unit
x86/sev: Add internal header guards
x86/sev: Move the internal header

+421 -398
+2 -1
arch/x86/boot/startup/sev-startup.c
··· 27 27 #include <asm/cpu_entry_area.h> 28 28 #include <asm/stacktrace.h> 29 29 #include <asm/sev.h> 30 - #include <asm/sev-internal.h> 31 30 #include <asm/insn-eval.h> 32 31 #include <asm/fpu/xcr.h> 33 32 #include <asm/processor.h> ··· 39 40 #include <asm/apic.h> 40 41 #include <asm/cpuid/api.h> 41 42 #include <asm/cmdline.h> 43 + 44 + #include "../../coco/sev/internal.h" 42 45 43 46 /* Include code shared with pre-decompression boot stage */ 44 47 #include "sev-shared.c"
+1 -1
arch/x86/coco/sev/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 3 - obj-y += core.o noinstr.o vc-handle.o 3 + obj-y += core.o noinstr.o vc-handle.o svsm.o 4 4 5 5 # Clang 14 and older may fail to respect __no_sanitize_undefined when inlining 6 6 UBSAN_SANITIZE_noinstr.o := n
+5 -382
arch/x86/coco/sev/core.c
··· 31 31 #include <asm/cpu_entry_area.h> 32 32 #include <asm/stacktrace.h> 33 33 #include <asm/sev.h> 34 - #include <asm/sev-internal.h> 35 34 #include <asm/insn-eval.h> 36 35 #include <asm/fpu/xcr.h> 37 36 #include <asm/processor.h> ··· 45 46 #include <asm/cmdline.h> 46 47 #include <asm/msr.h> 47 48 49 + #include "internal.h" 50 + 48 51 /* Bitmap of SEV features supported by the hypervisor */ 49 52 u64 sev_hv_features __ro_after_init; 50 53 SYM_PIC_ALIAS(sev_hv_features); ··· 54 53 /* Secrets page physical address from the CC blob */ 55 54 u64 sev_secrets_pa __ro_after_init; 56 55 SYM_PIC_ALIAS(sev_secrets_pa); 57 - 58 - /* For early boot SVSM communication */ 59 - struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); 60 - SYM_PIC_ALIAS(boot_svsm_ca_page); 61 - 62 - /* 63 - * SVSM related information: 64 - * During boot, the page tables are set up as identity mapped and later 65 - * changed to use kernel virtual addresses. Maintain separate virtual and 66 - * physical addresses for the CAA to allow SVSM functions to be used during 67 - * early boot, both with identity mapped virtual addresses and proper kernel 68 - * virtual addresses. 69 - */ 70 - u64 boot_svsm_caa_pa __ro_after_init; 71 - SYM_PIC_ALIAS(boot_svsm_caa_pa); 72 - 73 - DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); 74 - DEFINE_PER_CPU(u64, svsm_caa_pa); 75 - 76 - static inline struct svsm_ca *svsm_get_caa(void) 77 - { 78 - if (sev_cfg.use_cas) 79 - return this_cpu_read(svsm_caa); 80 - else 81 - return rip_rel_ptr(&boot_svsm_ca_page); 82 - } 83 - 84 - static inline u64 svsm_get_caa_pa(void) 85 - { 86 - if (sev_cfg.use_cas) 87 - return this_cpu_read(svsm_caa_pa); 88 - else 89 - return boot_svsm_caa_pa; 90 - } 91 56 92 57 /* AP INIT values as documented in the APM2 section "Processor Initialization State" */ 93 58 #define AP_INIT_CS_LIMIT 0xffff ··· 184 217 return ret; 185 218 } 186 219 187 - static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) 188 - { 189 - struct es_em_ctxt ctxt; 190 - u8 pending = 0; 191 - 192 - vc_ghcb_invalidate(ghcb); 193 - 194 - /* 195 - * Fill in protocol and format specifiers. This can be called very early 196 - * in the boot, so use rip-relative references as needed. 197 - */ 198 - ghcb->protocol_version = ghcb_version; 199 - ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 200 - 201 - ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL); 202 - ghcb_set_sw_exit_info_1(ghcb, 0); 203 - ghcb_set_sw_exit_info_2(ghcb, 0); 204 - 205 - sev_es_wr_ghcb_msr(__pa(ghcb)); 206 - 207 - svsm_issue_call(call, &pending); 208 - 209 - if (pending) 210 - return -EINVAL; 211 - 212 - switch (verify_exception_info(ghcb, &ctxt)) { 213 - case ES_OK: 214 - break; 215 - case ES_EXCEPTION: 216 - vc_forward_exception(&ctxt); 217 - fallthrough; 218 - default: 219 - return -EINVAL; 220 - } 221 - 222 - return svsm_process_result_codes(call); 223 - } 224 - 225 - static int svsm_perform_call_protocol(struct svsm_call *call) 226 - { 227 - struct ghcb_state state; 228 - unsigned long flags; 229 - struct ghcb *ghcb; 230 - int ret; 231 - 232 - flags = native_local_irq_save(); 233 - 234 - if (sev_cfg.ghcbs_initialized) 235 - ghcb = __sev_get_ghcb(&state); 236 - else if (boot_ghcb) 237 - ghcb = boot_ghcb; 238 - else 239 - ghcb = NULL; 240 - 241 - do { 242 - ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call) 243 - : __pi_svsm_perform_msr_protocol(call); 244 - } while (ret == -EAGAIN); 245 - 246 - if (sev_cfg.ghcbs_initialized) 247 - __sev_put_ghcb(&state); 248 - 249 - native_local_irq_restore(flags); 250 - 251 - return ret; 252 - } 253 - 254 - static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size, 255 - int ret, u64 svsm_ret) 256 - { 257 - WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n", 258 - pfn, action, page_size, ret, svsm_ret); 259 - 260 - sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 261 - } 262 - 263 - static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret) 264 - { 265 - unsigned int page_size; 266 - bool action; 267 - u64 pfn; 268 - 269 - pfn = pc->entry[pc->cur_index].pfn; 270 - action = pc->entry[pc->cur_index].action; 271 - page_size = pc->entry[pc->cur_index].page_size; 272 - 273 - __pval_terminate(pfn, action, page_size, ret, svsm_ret); 274 - } 275 - 276 220 static void pval_pages(struct snp_psc_desc *desc) 277 221 { 278 222 struct psc_entry *e; ··· 218 340 __pval_terminate(pfn, validate, size, rc, 0); 219 341 } 220 342 } 221 - } 222 - 223 - static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, 224 - struct svsm_pvalidate_call *pc) 225 - { 226 - struct svsm_pvalidate_entry *pe; 227 - 228 - /* Nothing in the CA yet */ 229 - pc->num_entries = 0; 230 - pc->cur_index = 0; 231 - 232 - pe = &pc->entry[0]; 233 - 234 - while (pfn < pfn_end) { 235 - pe->page_size = RMP_PG_SIZE_4K; 236 - pe->action = action; 237 - pe->ignore_cf = 0; 238 - pe->rsvd = 0; 239 - pe->pfn = pfn; 240 - 241 - pe++; 242 - pfn++; 243 - 244 - pc->num_entries++; 245 - if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) 246 - break; 247 - } 248 - 249 - return pfn; 250 - } 251 - 252 - static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry, 253 - struct svsm_pvalidate_call *pc) 254 - { 255 - struct svsm_pvalidate_entry *pe; 256 - struct psc_entry *e; 257 - 258 - /* Nothing in the CA yet */ 259 - pc->num_entries = 0; 260 - pc->cur_index = 0; 261 - 262 - pe = &pc->entry[0]; 263 - e = &desc->entries[desc_entry]; 264 - 265 - while (desc_entry <= desc->hdr.end_entry) { 266 - pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; 267 - pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; 268 - pe->ignore_cf = 0; 269 - pe->rsvd = 0; 270 - pe->pfn = e->gfn; 271 - 272 - pe++; 273 - e++; 274 - 275 - desc_entry++; 276 - pc->num_entries++; 277 - if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) 278 - break; 279 - } 280 - 281 - return desc_entry; 282 - } 283 - 284 - static void svsm_pval_pages(struct snp_psc_desc *desc) 285 - { 286 - struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY]; 287 - unsigned int i, pv_4k_count = 0; 288 - struct svsm_pvalidate_call *pc; 289 - struct svsm_call call = {}; 290 - unsigned long flags; 291 - bool action; 292 - u64 pc_pa; 293 - int ret; 294 - 295 - /* 296 - * This can be called very early in the boot, use native functions in 297 - * order to avoid paravirt issues. 298 - */ 299 - flags = native_local_irq_save(); 300 - 301 - /* 302 - * The SVSM calling area (CA) can support processing 510 entries at a 303 - * time. Loop through the Page State Change descriptor until the CA is 304 - * full or the last entry in the descriptor is reached, at which time 305 - * the SVSM is invoked. This repeats until all entries in the descriptor 306 - * are processed. 307 - */ 308 - call.caa = svsm_get_caa(); 309 - 310 - pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; 311 - pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); 312 - 313 - /* Protocol 0, Call ID 1 */ 314 - call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); 315 - call.rcx = pc_pa; 316 - 317 - for (i = 0; i <= desc->hdr.end_entry;) { 318 - i = svsm_build_ca_from_psc_desc(desc, i, pc); 319 - 320 - do { 321 - ret = svsm_perform_call_protocol(&call); 322 - if (!ret) 323 - continue; 324 - 325 - /* 326 - * Check if the entry failed because of an RMP mismatch (a 327 - * PVALIDATE at 2M was requested, but the page is mapped in 328 - * the RMP as 4K). 329 - */ 330 - 331 - if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH && 332 - pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { 333 - /* Save this entry for post-processing at 4K */ 334 - pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; 335 - 336 - /* Skip to the next one unless at the end of the list */ 337 - pc->cur_index++; 338 - if (pc->cur_index < pc->num_entries) 339 - ret = -EAGAIN; 340 - else 341 - ret = 0; 342 - } 343 - } while (ret == -EAGAIN); 344 - 345 - if (ret) 346 - svsm_pval_terminate(pc, ret, call.rax_out); 347 - } 348 - 349 - /* Process any entries that failed to be validated at 2M and validate them at 4K */ 350 - for (i = 0; i < pv_4k_count; i++) { 351 - u64 pfn, pfn_end; 352 - 353 - action = pv_4k[i].action; 354 - pfn = pv_4k[i].pfn; 355 - pfn_end = pfn + 512; 356 - 357 - while (pfn < pfn_end) { 358 - pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc); 359 - 360 - ret = svsm_perform_call_protocol(&call); 361 - if (ret) 362 - svsm_pval_terminate(pc, ret, call.rax_out); 363 - } 364 - } 365 - 366 - native_local_irq_restore(flags); 367 343 } 368 344 369 345 static void pvalidate_pages(struct snp_psc_desc *desc) ··· 989 1257 ghcb = __sev_get_ghcb(&state); 990 1258 vc_ghcb_invalidate(ghcb); 991 1259 992 - res = sev_es_ghcb_handle_msr(ghcb, &ctxt, false); 1260 + res = __vc_handle_msr(ghcb, &ctxt, false); 993 1261 if (res != ES_OK) { 994 1262 pr_err("Secure AVIC MSR (0x%llx) read returned error (%d)\n", msr, res); 995 1263 /* MSR read failures are treated as fatal errors */ ··· 1019 1287 ghcb = __sev_get_ghcb(&state); 1020 1288 vc_ghcb_invalidate(ghcb); 1021 1289 1022 - res = sev_es_ghcb_handle_msr(ghcb, &ctxt, true); 1290 + res = __vc_handle_msr(ghcb, &ctxt, true); 1023 1291 if (res != ES_OK) { 1024 1292 pr_err("Secure AVIC MSR (0x%llx) write returned error (%d)\n", msr, res); 1025 1293 /* MSR writes should never fail. Any failure is fatal error for SNP guest */ ··· 1320 1588 } 1321 1589 arch_initcall(report_snp_info); 1322 1590 1323 - static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input) 1324 - { 1325 - /* If (new) lengths have been returned, propagate them up */ 1326 - if (call->rcx_out != call->rcx) 1327 - input->manifest_buf.len = call->rcx_out; 1328 - 1329 - if (call->rdx_out != call->rdx) 1330 - input->certificates_buf.len = call->rdx_out; 1331 - 1332 - if (call->r8_out != call->r8) 1333 - input->report_buf.len = call->r8_out; 1334 - } 1335 - 1336 - int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, 1337 - struct svsm_attest_call *input) 1338 - { 1339 - struct svsm_attest_call *ac; 1340 - unsigned long flags; 1341 - u64 attest_call_pa; 1342 - int ret; 1343 - 1344 - if (!snp_vmpl) 1345 - return -EINVAL; 1346 - 1347 - local_irq_save(flags); 1348 - 1349 - call->caa = svsm_get_caa(); 1350 - 1351 - ac = (struct svsm_attest_call *)call->caa->svsm_buffer; 1352 - attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); 1353 - 1354 - *ac = *input; 1355 - 1356 - /* 1357 - * Set input registers for the request and set RDX and R8 to known 1358 - * values in order to detect length values being returned in them. 1359 - */ 1360 - call->rax = call_id; 1361 - call->rcx = attest_call_pa; 1362 - call->rdx = -1; 1363 - call->r8 = -1; 1364 - ret = svsm_perform_call_protocol(call); 1365 - update_attest_input(call, input); 1366 - 1367 - local_irq_restore(flags); 1368 - 1369 - return ret; 1370 - } 1371 - EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req); 1372 - 1373 1591 static int snp_issue_guest_request(struct snp_guest_req *req) 1374 1592 { 1375 1593 struct snp_req_data *input = &req->input; ··· 1383 1701 1384 1702 return ret; 1385 1703 } 1386 - 1387 - /** 1388 - * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device 1389 - * 1390 - * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND 1391 - * which is the only request used so far. 1392 - * 1393 - * Return: true if the platform provides a vTPM SVSM device, false otherwise. 1394 - */ 1395 - static bool snp_svsm_vtpm_probe(void) 1396 - { 1397 - struct svsm_call call = {}; 1398 - 1399 - /* The vTPM device is available only if a SVSM is present */ 1400 - if (!snp_vmpl) 1401 - return false; 1402 - 1403 - call.caa = svsm_get_caa(); 1404 - call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY); 1405 - 1406 - if (svsm_perform_call_protocol(&call)) 1407 - return false; 1408 - 1409 - /* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */ 1410 - return call.rcx_out & BIT_ULL(8); 1411 - } 1412 - 1413 - /** 1414 - * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM 1415 - * @buffer: A buffer used to both send the command and receive the response. 1416 - * 1417 - * Execute a SVSM_VTPM_CMD call as defined by 1418 - * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00 1419 - * 1420 - * All command request/response buffers have a common structure as specified by 1421 - * the following table: 1422 - * Byte Size     In/Out    Description 1423 - * Offset    (Bytes) 1424 - * 0x000     4          In        Platform command 1425 -  *                         Out       Platform command response size 1426 - * 1427 - * Each command can build upon this common request/response structure to create 1428 - * a structure specific to the command. See include/linux/tpm_svsm.h for more 1429 - * details. 1430 - * 1431 - * Return: 0 on success, -errno on failure 1432 - */ 1433 - int snp_svsm_vtpm_send_command(u8 *buffer) 1434 - { 1435 - struct svsm_call call = {}; 1436 - 1437 - call.caa = svsm_get_caa(); 1438 - call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD); 1439 - call.rcx = __pa(buffer); 1440 - 1441 - return svsm_perform_call_protocol(&call); 1442 - } 1443 - EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command); 1444 1704 1445 1705 static struct platform_device sev_guest_device = { 1446 1706 .name = "sev-guest", ··· 1632 2008 free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); 1633 2009 iounmap((__force void __iomem *)mdesc->secrets); 1634 2010 1635 - memset(mdesc, 0, sizeof(*mdesc)); 1636 - kfree(mdesc); 2011 + kfree_sensitive(mdesc); 1637 2012 } 1638 2013 EXPORT_SYMBOL_GPL(snp_msg_free); 1639 2014
+2 -1
arch/x86/coco/sev/noinstr.c
··· 16 16 #include <asm/msr.h> 17 17 #include <asm/ptrace.h> 18 18 #include <asm/sev.h> 19 - #include <asm/sev-internal.h> 19 + 20 + #include "internal.h" 20 21 21 22 static __always_inline bool on_vc_stack(struct pt_regs *regs) 22 23 {
+362
arch/x86/coco/sev/svsm.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * SVSM support code 4 + */ 5 + 6 + #include <linux/types.h> 7 + 8 + #include <asm/sev.h> 9 + 10 + #include "internal.h" 11 + 12 + /* For early boot SVSM communication */ 13 + struct svsm_ca boot_svsm_ca_page __aligned(PAGE_SIZE); 14 + SYM_PIC_ALIAS(boot_svsm_ca_page); 15 + 16 + /* 17 + * SVSM related information: 18 + * During boot, the page tables are set up as identity mapped and later 19 + * changed to use kernel virtual addresses. Maintain separate virtual and 20 + * physical addresses for the CAA to allow SVSM functions to be used during 21 + * early boot, both with identity mapped virtual addresses and proper kernel 22 + * virtual addresses. 23 + */ 24 + u64 boot_svsm_caa_pa __ro_after_init; 25 + SYM_PIC_ALIAS(boot_svsm_caa_pa); 26 + 27 + DEFINE_PER_CPU(struct svsm_ca *, svsm_caa); 28 + DEFINE_PER_CPU(u64, svsm_caa_pa); 29 + 30 + static int svsm_perform_ghcb_protocol(struct ghcb *ghcb, struct svsm_call *call) 31 + { 32 + struct es_em_ctxt ctxt; 33 + u8 pending = 0; 34 + 35 + vc_ghcb_invalidate(ghcb); 36 + 37 + /* 38 + * Fill in protocol and format specifiers. This can be called very early 39 + * in the boot, so use rip-relative references as needed. 40 + */ 41 + ghcb->protocol_version = ghcb_version; 42 + ghcb->ghcb_usage = GHCB_DEFAULT_USAGE; 43 + 44 + ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_SNP_RUN_VMPL); 45 + ghcb_set_sw_exit_info_1(ghcb, 0); 46 + ghcb_set_sw_exit_info_2(ghcb, 0); 47 + 48 + sev_es_wr_ghcb_msr(__pa(ghcb)); 49 + 50 + svsm_issue_call(call, &pending); 51 + 52 + if (pending) 53 + return -EINVAL; 54 + 55 + switch (verify_exception_info(ghcb, &ctxt)) { 56 + case ES_OK: 57 + break; 58 + case ES_EXCEPTION: 59 + vc_forward_exception(&ctxt); 60 + fallthrough; 61 + default: 62 + return -EINVAL; 63 + } 64 + 65 + return svsm_process_result_codes(call); 66 + } 67 + 68 + int svsm_perform_call_protocol(struct svsm_call *call) 69 + { 70 + struct ghcb_state state; 71 + unsigned long flags; 72 + struct ghcb *ghcb; 73 + int ret; 74 + 75 + flags = native_local_irq_save(); 76 + 77 + if (sev_cfg.ghcbs_initialized) 78 + ghcb = __sev_get_ghcb(&state); 79 + else if (boot_ghcb) 80 + ghcb = boot_ghcb; 81 + else 82 + ghcb = NULL; 83 + 84 + do { 85 + ret = ghcb ? svsm_perform_ghcb_protocol(ghcb, call) 86 + : __pi_svsm_perform_msr_protocol(call); 87 + } while (ret == -EAGAIN); 88 + 89 + if (sev_cfg.ghcbs_initialized) 90 + __sev_put_ghcb(&state); 91 + 92 + native_local_irq_restore(flags); 93 + 94 + return ret; 95 + } 96 + 97 + static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action, 98 + struct svsm_pvalidate_call *pc) 99 + { 100 + struct svsm_pvalidate_entry *pe; 101 + 102 + /* Nothing in the CA yet */ 103 + pc->num_entries = 0; 104 + pc->cur_index = 0; 105 + 106 + pe = &pc->entry[0]; 107 + 108 + while (pfn < pfn_end) { 109 + pe->page_size = RMP_PG_SIZE_4K; 110 + pe->action = action; 111 + pe->ignore_cf = 0; 112 + pe->rsvd = 0; 113 + pe->pfn = pfn; 114 + 115 + pe++; 116 + pfn++; 117 + 118 + pc->num_entries++; 119 + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) 120 + break; 121 + } 122 + 123 + return pfn; 124 + } 125 + 126 + static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int desc_entry, 127 + struct svsm_pvalidate_call *pc) 128 + { 129 + struct svsm_pvalidate_entry *pe; 130 + struct psc_entry *e; 131 + 132 + /* Nothing in the CA yet */ 133 + pc->num_entries = 0; 134 + pc->cur_index = 0; 135 + 136 + pe = &pc->entry[0]; 137 + e = &desc->entries[desc_entry]; 138 + 139 + while (desc_entry <= desc->hdr.end_entry) { 140 + pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K; 141 + pe->action = e->operation == SNP_PAGE_STATE_PRIVATE; 142 + pe->ignore_cf = 0; 143 + pe->rsvd = 0; 144 + pe->pfn = e->gfn; 145 + 146 + pe++; 147 + e++; 148 + 149 + desc_entry++; 150 + pc->num_entries++; 151 + if (pc->num_entries == SVSM_PVALIDATE_MAX_COUNT) 152 + break; 153 + } 154 + 155 + return desc_entry; 156 + } 157 + 158 + static void svsm_pval_terminate(struct svsm_pvalidate_call *pc, int ret, u64 svsm_ret) 159 + { 160 + unsigned int page_size; 161 + bool action; 162 + u64 pfn; 163 + 164 + pfn = pc->entry[pc->cur_index].pfn; 165 + action = pc->entry[pc->cur_index].action; 166 + page_size = pc->entry[pc->cur_index].page_size; 167 + 168 + __pval_terminate(pfn, action, page_size, ret, svsm_ret); 169 + } 170 + 171 + void svsm_pval_pages(struct snp_psc_desc *desc) 172 + { 173 + struct svsm_pvalidate_entry pv_4k[VMGEXIT_PSC_MAX_ENTRY]; 174 + unsigned int i, pv_4k_count = 0; 175 + struct svsm_pvalidate_call *pc; 176 + struct svsm_call call = {}; 177 + unsigned long flags; 178 + bool action; 179 + u64 pc_pa; 180 + int ret; 181 + 182 + /* 183 + * This can be called very early in the boot, use native functions in 184 + * order to avoid paravirt issues. 185 + */ 186 + flags = native_local_irq_save(); 187 + 188 + /* 189 + * The SVSM calling area (CA) can support processing 510 entries at a 190 + * time. Loop through the Page State Change descriptor until the CA is 191 + * full or the last entry in the descriptor is reached, at which time 192 + * the SVSM is invoked. This repeats until all entries in the descriptor 193 + * are processed. 194 + */ 195 + call.caa = svsm_get_caa(); 196 + 197 + pc = (struct svsm_pvalidate_call *)call.caa->svsm_buffer; 198 + pc_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); 199 + 200 + /* Protocol 0, Call ID 1 */ 201 + call.rax = SVSM_CORE_CALL(SVSM_CORE_PVALIDATE); 202 + call.rcx = pc_pa; 203 + 204 + for (i = 0; i <= desc->hdr.end_entry;) { 205 + i = svsm_build_ca_from_psc_desc(desc, i, pc); 206 + 207 + do { 208 + ret = svsm_perform_call_protocol(&call); 209 + if (!ret) 210 + continue; 211 + 212 + /* 213 + * Check if the entry failed because of an RMP mismatch (a 214 + * PVALIDATE at 2M was requested, but the page is mapped in 215 + * the RMP as 4K). 216 + */ 217 + 218 + if (call.rax_out == SVSM_PVALIDATE_FAIL_SIZEMISMATCH && 219 + pc->entry[pc->cur_index].page_size == RMP_PG_SIZE_2M) { 220 + /* Save this entry for post-processing at 4K */ 221 + pv_4k[pv_4k_count++] = pc->entry[pc->cur_index]; 222 + 223 + /* Skip to the next one unless at the end of the list */ 224 + pc->cur_index++; 225 + if (pc->cur_index < pc->num_entries) 226 + ret = -EAGAIN; 227 + else 228 + ret = 0; 229 + } 230 + } while (ret == -EAGAIN); 231 + 232 + if (ret) 233 + svsm_pval_terminate(pc, ret, call.rax_out); 234 + } 235 + 236 + /* Process any entries that failed to be validated at 2M and validate them at 4K */ 237 + for (i = 0; i < pv_4k_count; i++) { 238 + u64 pfn, pfn_end; 239 + 240 + action = pv_4k[i].action; 241 + pfn = pv_4k[i].pfn; 242 + pfn_end = pfn + 512; 243 + 244 + while (pfn < pfn_end) { 245 + pfn = svsm_build_ca_from_pfn_range(pfn, pfn_end, action, pc); 246 + 247 + ret = svsm_perform_call_protocol(&call); 248 + if (ret) 249 + svsm_pval_terminate(pc, ret, call.rax_out); 250 + } 251 + } 252 + 253 + native_local_irq_restore(flags); 254 + } 255 + 256 + static void update_attest_input(struct svsm_call *call, struct svsm_attest_call *input) 257 + { 258 + /* If (new) lengths have been returned, propagate them up */ 259 + if (call->rcx_out != call->rcx) 260 + input->manifest_buf.len = call->rcx_out; 261 + 262 + if (call->rdx_out != call->rdx) 263 + input->certificates_buf.len = call->rdx_out; 264 + 265 + if (call->r8_out != call->r8) 266 + input->report_buf.len = call->r8_out; 267 + } 268 + 269 + int snp_issue_svsm_attest_req(u64 call_id, struct svsm_call *call, 270 + struct svsm_attest_call *input) 271 + { 272 + struct svsm_attest_call *ac; 273 + unsigned long flags; 274 + u64 attest_call_pa; 275 + int ret; 276 + 277 + if (!snp_vmpl) 278 + return -EINVAL; 279 + 280 + local_irq_save(flags); 281 + 282 + call->caa = svsm_get_caa(); 283 + 284 + ac = (struct svsm_attest_call *)call->caa->svsm_buffer; 285 + attest_call_pa = svsm_get_caa_pa() + offsetof(struct svsm_ca, svsm_buffer); 286 + 287 + *ac = *input; 288 + 289 + /* 290 + * Set input registers for the request and set RDX and R8 to known 291 + * values in order to detect length values being returned in them. 292 + */ 293 + call->rax = call_id; 294 + call->rcx = attest_call_pa; 295 + call->rdx = -1; 296 + call->r8 = -1; 297 + ret = svsm_perform_call_protocol(call); 298 + update_attest_input(call, input); 299 + 300 + local_irq_restore(flags); 301 + 302 + return ret; 303 + } 304 + EXPORT_SYMBOL_GPL(snp_issue_svsm_attest_req); 305 + 306 + /** 307 + * snp_svsm_vtpm_send_command() - Execute a vTPM operation on SVSM 308 + * @buffer: A buffer used to both send the command and receive the response. 309 + * 310 + * Execute a SVSM_VTPM_CMD call as defined by 311 + * "Secure VM Service Module for SEV-SNP Guests" Publication # 58019 Revision: 1.00 312 + * 313 + * All command request/response buffers have a common structure as specified by 314 + * the following table: 315 + * Byte Size     In/Out    Description 316 + * Offset    (Bytes) 317 + * 0x000     4          In        Platform command 318 +  *                         Out       Platform command response size 319 + * 320 + * Each command can build upon this common request/response structure to create 321 + * a structure specific to the command. See include/linux/tpm_svsm.h for more 322 + * details. 323 + * 324 + * Return: 0 on success, -errno on failure 325 + */ 326 + int snp_svsm_vtpm_send_command(u8 *buffer) 327 + { 328 + struct svsm_call call = {}; 329 + 330 + call.caa = svsm_get_caa(); 331 + call.rax = SVSM_VTPM_CALL(SVSM_VTPM_CMD); 332 + call.rcx = __pa(buffer); 333 + 334 + return svsm_perform_call_protocol(&call); 335 + } 336 + EXPORT_SYMBOL_GPL(snp_svsm_vtpm_send_command); 337 + 338 + /** 339 + * snp_svsm_vtpm_probe() - Probe if SVSM provides a vTPM device 340 + * 341 + * Check that there is SVSM and that it supports at least TPM_SEND_COMMAND 342 + * which is the only request used so far. 343 + * 344 + * Return: true if the platform provides a vTPM SVSM device, false otherwise. 345 + */ 346 + bool snp_svsm_vtpm_probe(void) 347 + { 348 + struct svsm_call call = {}; 349 + 350 + /* The vTPM device is available only if a SVSM is present */ 351 + if (!snp_vmpl) 352 + return false; 353 + 354 + call.caa = svsm_get_caa(); 355 + call.rax = SVSM_VTPM_CALL(SVSM_VTPM_QUERY); 356 + 357 + if (svsm_perform_call_protocol(&call)) 358 + return false; 359 + 360 + /* Check platform commands contains TPM_SEND_COMMAND - platform command 8 */ 361 + return call.rcx_out & BIT_ULL(8); 362 + }
+4 -3
arch/x86/coco/sev/vc-handle.c
··· 23 23 #include <asm/init.h> 24 24 #include <asm/stacktrace.h> 25 25 #include <asm/sev.h> 26 - #include <asm/sev-internal.h> 27 26 #include <asm/insn-eval.h> 28 27 #include <asm/fpu/xcr.h> 29 28 #include <asm/processor.h> ··· 33 34 #include <asm/cpu.h> 34 35 #include <asm/apic.h> 35 36 #include <asm/cpuid/api.h> 37 + 38 + #include "internal.h" 36 39 37 40 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, 38 41 unsigned long vaddr, phys_addr_t *paddr) ··· 404 403 return ES_OK; 405 404 } 406 405 407 - enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write) 406 + enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write) 408 407 { 409 408 struct pt_regs *regs = ctxt->regs; 410 409 enum es_result ret; ··· 448 447 449 448 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt) 450 449 { 451 - return sev_es_ghcb_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30); 450 + return __vc_handle_msr(ghcb, ctxt, ctxt->insn.opcode.bytes[1] == 0x30); 452 451 } 453 452 454 453 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
+33 -1
arch/x86/include/asm/sev-internal.h arch/x86/coco/sev/internal.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __X86_COCO_SEV_INTERNAL_H__ 3 + #define __X86_COCO_SEV_INTERNAL_H__ 2 4 3 5 #define DR7_RESET_VALUE 0x400 4 6 ··· 66 64 67 65 enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt); 68 66 void vc_forward_exception(struct es_em_ctxt *ctxt); 67 + void svsm_pval_pages(struct snp_psc_desc *desc); 68 + int svsm_perform_call_protocol(struct svsm_call *call); 69 + bool snp_svsm_vtpm_probe(void); 69 70 70 71 static inline u64 sev_es_rd_ghcb_msr(void) 71 72 { ··· 85 80 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high); 86 81 } 87 82 88 - enum es_result sev_es_ghcb_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write); 83 + enum es_result __vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt, bool write); 89 84 90 85 u64 get_hv_features(void); 91 86 92 87 const struct snp_cpuid_table *snp_cpuid_get_table(void); 88 + 89 + static inline struct svsm_ca *svsm_get_caa(void) 90 + { 91 + if (sev_cfg.use_cas) 92 + return this_cpu_read(svsm_caa); 93 + else 94 + return rip_rel_ptr(&boot_svsm_ca_page); 95 + } 96 + 97 + static inline u64 svsm_get_caa_pa(void) 98 + { 99 + if (sev_cfg.use_cas) 100 + return this_cpu_read(svsm_caa_pa); 101 + else 102 + return boot_svsm_caa_pa; 103 + } 104 + 105 + static inline void __pval_terminate(u64 pfn, bool action, unsigned int page_size, 106 + int ret, u64 svsm_ret) 107 + { 108 + WARN(1, "PVALIDATE failure: pfn: 0x%llx, action: %u, size: %u, ret: %d, svsm_ret: 0x%llx\n", 109 + pfn, action, page_size, ret, svsm_ret); 110 + 111 + sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE); 112 + } 113 + 114 + #endif /* __X86_COCO_SEV_INTERNAL_H__ */
+12 -9
arch/x86/kernel/vmlinux.lds.S
··· 67 67 68 68 #define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE); 69 69 #define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE); 70 + #else 70 71 72 + #define X86_ALIGN_RODATA_BEGIN 73 + #define X86_ALIGN_RODATA_END \ 74 + . = ALIGN(PAGE_SIZE); \ 75 + __end_rodata_aligned = .; 76 + 77 + #define ALIGN_ENTRY_TEXT_BEGIN 78 + #define ALIGN_ENTRY_TEXT_END 79 + #endif 80 + 81 + #ifdef CONFIG_AMD_MEM_ENCRYPT 71 82 /* 72 83 * This section contains data which will be mapped as decrypted. Memory 73 84 * encryption operates on a page basis. Make this section PMD-aligned ··· 99 88 __pi___end_bss_decrypted = .; \ 100 89 101 90 #else 102 - 103 - #define X86_ALIGN_RODATA_BEGIN 104 - #define X86_ALIGN_RODATA_END \ 105 - . = ALIGN(PAGE_SIZE); \ 106 - __end_rodata_aligned = .; 107 - 108 - #define ALIGN_ENTRY_TEXT_BEGIN 109 - #define ALIGN_ENTRY_TEXT_END 110 91 #define BSS_DECRYPTED 111 - 112 92 #endif 93 + 113 94 #if defined(CONFIG_X86_64) && defined(CONFIG_KEXEC_CORE) 114 95 #define KEXEC_RELOCATE_KERNEL \ 115 96 . = ALIGN(0x100); \