Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

KVM: arm64: Introduce data structure tracking both RES0 and RES1 bits

We have so far mostly tracked RES0 bits, but only made a few attempts
at being just as strict for RES1 bits (probably because they are both
rarer and harder to handle).

Start scratching the surface by introducing a data structure tracking
RES0 and RES1 bits at the same time.

Note that contrary to the usual idiom, this structure is mostly passed
around by value -- the ABI handles it nicely, and the resulting code is
much nicer.

Reviewed-by: Fuad Tabba <tabba@google.com>
Tested-by: Fuad Tabba <tabba@google.com>
Link: https://patch.msgid.link/20260202184329.2724080-5-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>

+161 -139
+17 -6
arch/arm64/include/asm/kvm_host.h
··· 626 626 NR_SYS_REGS /* Nothing after this line! */ 627 627 }; 628 628 629 - struct kvm_sysreg_masks { 630 - struct { 631 - u64 res0; 632 - u64 res1; 633 - } mask[NR_SYS_REGS - __SANITISED_REG_START__]; 629 + struct resx { 630 + u64 res0; 631 + u64 res1; 634 632 }; 633 + 634 + struct kvm_sysreg_masks { 635 + struct resx mask[NR_SYS_REGS - __SANITISED_REG_START__]; 636 + }; 637 + 638 + static inline void __kvm_set_sysreg_resx(struct kvm_arch *arch, 639 + enum vcpu_sysreg sr, struct resx resx) 640 + { 641 + arch->sysreg_masks->mask[sr - __SANITISED_REG_START__] = resx; 642 + } 643 + 644 + #define kvm_set_sysreg_resx(k, sr, resx) \ 645 + __kvm_set_sysreg_resx(&(k)->arch, (sr), (resx)) 635 646 636 647 struct fgt_masks { 637 648 const char *str; ··· 1618 1607 } 1619 1608 1620 1609 void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt); 1621 - void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1); 1610 + struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg); 1622 1611 void check_feature_map(void); 1623 1612 void kvm_vcpu_load_fgt(struct kvm_vcpu *vcpu); 1624 1613
+80 -68
arch/arm64/kvm/config.c
··· 1290 1290 } 1291 1291 } 1292 1292 1293 - static u64 __compute_fixed_bits(struct kvm *kvm, 1294 - const struct reg_bits_to_feat_map *map, 1295 - int map_size, 1296 - u64 *fixed_bits, 1297 - unsigned long require, 1298 - unsigned long exclude) 1293 + static struct resx __compute_fixed_bits(struct kvm *kvm, 1294 + const struct reg_bits_to_feat_map *map, 1295 + int map_size, 1296 + u64 *fixed_bits, 1297 + unsigned long require, 1298 + unsigned long exclude) 1299 1299 { 1300 - u64 val = 0; 1300 + struct resx resx = {}; 1301 1301 1302 1302 for (int i = 0; i < map_size; i++) { 1303 1303 bool match; ··· 1316 1316 match = idreg_feat_match(kvm, &map[i]); 1317 1317 1318 1318 if (!match || (map[i].flags & FIXED_VALUE)) 1319 - val |= reg_feat_map_bits(&map[i]); 1319 + resx.res0 |= reg_feat_map_bits(&map[i]); 1320 1320 } 1321 1321 1322 - return val; 1322 + return resx; 1323 1323 } 1324 1324 1325 - static u64 compute_res0_bits(struct kvm *kvm, 1326 - const struct reg_bits_to_feat_map *map, 1327 - int map_size, 1328 - unsigned long require, 1329 - unsigned long exclude) 1325 + static struct resx compute_resx_bits(struct kvm *kvm, 1326 + const struct reg_bits_to_feat_map *map, 1327 + int map_size, 1328 + unsigned long require, 1329 + unsigned long exclude) 1330 1330 { 1331 1331 return __compute_fixed_bits(kvm, map, map_size, NULL, 1332 1332 require, exclude | FIXED_VALUE); 1333 1333 } 1334 1334 1335 - static u64 compute_reg_res0_bits(struct kvm *kvm, 1336 - const struct reg_feat_map_desc *r, 1337 - unsigned long require, unsigned long exclude) 1335 + static struct resx compute_reg_resx_bits(struct kvm *kvm, 1336 + const struct reg_feat_map_desc *r, 1337 + unsigned long require, 1338 + unsigned long exclude) 1338 1339 { 1339 - u64 res0; 1340 + struct resx resx, tmp; 1340 1341 1341 - res0 = compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, 1342 + resx = compute_resx_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, 1342 1343 require, exclude); 1343 1344 1344 - res0 |= compute_res0_bits(kvm, &r->feat_map, 1, require, exclude); 1345 - res0 |= ~reg_feat_map_bits(&r->feat_map); 1345 + tmp = compute_resx_bits(kvm, &r->feat_map, 1, require, exclude); 1346 1346 1347 - return res0; 1347 + resx.res0 |= tmp.res0; 1348 + resx.res0 |= ~reg_feat_map_bits(&r->feat_map); 1349 + resx.res1 |= tmp.res1; 1350 + 1351 + return resx; 1348 1352 } 1349 1353 1350 1354 static u64 compute_fgu_bits(struct kvm *kvm, const struct reg_feat_map_desc *r) 1351 1355 { 1356 + struct resx resx; 1357 + 1352 1358 /* 1353 1359 * If computing FGUs, we collect the unsupported feature bits as 1354 - * RES0 bits, but don't take the actual RES0 bits or register 1360 + * RESx bits, but don't take the actual RESx bits or register 1355 1361 * existence into account -- we're not computing bits for the 1356 1362 * register itself. 1357 1363 */ 1358 - return compute_res0_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, 1364 + resx = compute_resx_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, 1359 1365 0, NEVER_FGU); 1366 + 1367 + return resx.res0 | resx.res1; 1360 1368 } 1361 1369 1362 - static u64 compute_reg_fixed_bits(struct kvm *kvm, 1363 - const struct reg_feat_map_desc *r, 1364 - u64 *fixed_bits, unsigned long require, 1365 - unsigned long exclude) 1370 + static struct resx compute_reg_fixed_bits(struct kvm *kvm, 1371 + const struct reg_feat_map_desc *r, 1372 + u64 *fixed_bits, 1373 + unsigned long require, 1374 + unsigned long exclude) 1366 1375 { 1367 1376 return __compute_fixed_bits(kvm, r->bit_feat_map, r->bit_feat_map_sz, 1368 1377 fixed_bits, require | FIXED_VALUE, exclude); ··· 1414 1405 kvm->arch.fgu[fgt] = val; 1415 1406 } 1416 1407 1417 - void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1) 1408 + struct resx get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg) 1418 1409 { 1419 1410 u64 fixed = 0, mask; 1411 + struct resx resx; 1420 1412 1421 1413 switch (reg) { 1422 1414 case HFGRTR_EL2: 1423 - *res0 = compute_reg_res0_bits(kvm, &hfgrtr_desc, 0, 0); 1424 - *res1 = HFGRTR_EL2_RES1; 1415 + resx = compute_reg_resx_bits(kvm, &hfgrtr_desc, 0, 0); 1416 + resx.res1 |= HFGRTR_EL2_RES1; 1425 1417 break; 1426 1418 case HFGWTR_EL2: 1427 - *res0 = compute_reg_res0_bits(kvm, &hfgwtr_desc, 0, 0); 1428 - *res1 = HFGWTR_EL2_RES1; 1419 + resx = compute_reg_resx_bits(kvm, &hfgwtr_desc, 0, 0); 1420 + resx.res1 |= HFGWTR_EL2_RES1; 1429 1421 break; 1430 1422 case HFGITR_EL2: 1431 - *res0 = compute_reg_res0_bits(kvm, &hfgitr_desc, 0, 0); 1432 - *res1 = HFGITR_EL2_RES1; 1423 + resx = compute_reg_resx_bits(kvm, &hfgitr_desc, 0, 0); 1424 + resx.res1 |= HFGITR_EL2_RES1; 1433 1425 break; 1434 1426 case HDFGRTR_EL2: 1435 - *res0 = compute_reg_res0_bits(kvm, &hdfgrtr_desc, 0, 0); 1436 - *res1 = HDFGRTR_EL2_RES1; 1427 + resx = compute_reg_resx_bits(kvm, &hdfgrtr_desc, 0, 0); 1428 + resx.res1 |= HDFGRTR_EL2_RES1; 1437 1429 break; 1438 1430 case HDFGWTR_EL2: 1439 - *res0 = compute_reg_res0_bits(kvm, &hdfgwtr_desc, 0, 0); 1440 - *res1 = HDFGWTR_EL2_RES1; 1431 + resx = compute_reg_resx_bits(kvm, &hdfgwtr_desc, 0, 0); 1432 + resx.res1 |= HDFGWTR_EL2_RES1; 1441 1433 break; 1442 1434 case HAFGRTR_EL2: 1443 - *res0 = compute_reg_res0_bits(kvm, &hafgrtr_desc, 0, 0); 1444 - *res1 = HAFGRTR_EL2_RES1; 1435 + resx = compute_reg_resx_bits(kvm, &hafgrtr_desc, 0, 0); 1436 + resx.res1 |= HAFGRTR_EL2_RES1; 1445 1437 break; 1446 1438 case HFGRTR2_EL2: 1447 - *res0 = compute_reg_res0_bits(kvm, &hfgrtr2_desc, 0, 0); 1448 - *res1 = HFGRTR2_EL2_RES1; 1439 + resx = compute_reg_resx_bits(kvm, &hfgrtr2_desc, 0, 0); 1440 + resx.res1 |= HFGRTR2_EL2_RES1; 1449 1441 break; 1450 1442 case HFGWTR2_EL2: 1451 - *res0 = compute_reg_res0_bits(kvm, &hfgwtr2_desc, 0, 0); 1452 - *res1 = HFGWTR2_EL2_RES1; 1443 + resx = compute_reg_resx_bits(kvm, &hfgwtr2_desc, 0, 0); 1444 + resx.res1 |= HFGWTR2_EL2_RES1; 1453 1445 break; 1454 1446 case HFGITR2_EL2: 1455 - *res0 = compute_reg_res0_bits(kvm, &hfgitr2_desc, 0, 0); 1456 - *res1 = HFGITR2_EL2_RES1; 1447 + resx = compute_reg_resx_bits(kvm, &hfgitr2_desc, 0, 0); 1448 + resx.res1 |= HFGITR2_EL2_RES1; 1457 1449 break; 1458 1450 case HDFGRTR2_EL2: 1459 - *res0 = compute_reg_res0_bits(kvm, &hdfgrtr2_desc, 0, 0); 1460 - *res1 = HDFGRTR2_EL2_RES1; 1451 + resx = compute_reg_resx_bits(kvm, &hdfgrtr2_desc, 0, 0); 1452 + resx.res1 |= HDFGRTR2_EL2_RES1; 1461 1453 break; 1462 1454 case HDFGWTR2_EL2: 1463 - *res0 = compute_reg_res0_bits(kvm, &hdfgwtr2_desc, 0, 0); 1464 - *res1 = HDFGWTR2_EL2_RES1; 1455 + resx = compute_reg_resx_bits(kvm, &hdfgwtr2_desc, 0, 0); 1456 + resx.res1 |= HDFGWTR2_EL2_RES1; 1465 1457 break; 1466 1458 case HCRX_EL2: 1467 - *res0 = compute_reg_res0_bits(kvm, &hcrx_desc, 0, 0); 1468 - *res1 = __HCRX_EL2_RES1; 1459 + resx = compute_reg_resx_bits(kvm, &hcrx_desc, 0, 0); 1460 + resx.res1 |= __HCRX_EL2_RES1; 1469 1461 break; 1470 1462 case HCR_EL2: 1471 - mask = compute_reg_fixed_bits(kvm, &hcr_desc, &fixed, 0, 0); 1472 - *res0 = compute_reg_res0_bits(kvm, &hcr_desc, 0, 0); 1473 - *res0 |= (mask & ~fixed); 1474 - *res1 = HCR_EL2_RES1 | (mask & fixed); 1463 + mask = compute_reg_fixed_bits(kvm, &hcr_desc, &fixed, 0, 0).res0; 1464 + resx = compute_reg_resx_bits(kvm, &hcr_desc, 0, 0); 1465 + resx.res0 |= (mask & ~fixed); 1466 + resx.res1 |= HCR_EL2_RES1 | (mask & fixed); 1475 1467 break; 1476 1468 case SCTLR2_EL1: 1477 1469 case SCTLR2_EL2: 1478 - *res0 = compute_reg_res0_bits(kvm, &sctlr2_desc, 0, 0); 1479 - *res1 = SCTLR2_EL1_RES1; 1470 + resx = compute_reg_resx_bits(kvm, &sctlr2_desc, 0, 0); 1471 + resx.res1 |= SCTLR2_EL1_RES1; 1480 1472 break; 1481 1473 case TCR2_EL2: 1482 - *res0 = compute_reg_res0_bits(kvm, &tcr2_el2_desc, 0, 0); 1483 - *res1 = TCR2_EL2_RES1; 1474 + resx = compute_reg_resx_bits(kvm, &tcr2_el2_desc, 0, 0); 1475 + resx.res1 |= TCR2_EL2_RES1; 1484 1476 break; 1485 1477 case SCTLR_EL1: 1486 - *res0 = compute_reg_res0_bits(kvm, &sctlr_el1_desc, 0, 0); 1487 - *res1 = SCTLR_EL1_RES1; 1478 + resx = compute_reg_resx_bits(kvm, &sctlr_el1_desc, 0, 0); 1479 + resx.res1 |= SCTLR_EL1_RES1; 1488 1480 break; 1489 1481 case MDCR_EL2: 1490 - *res0 = compute_reg_res0_bits(kvm, &mdcr_el2_desc, 0, 0); 1491 - *res1 = MDCR_EL2_RES1; 1482 + resx = compute_reg_resx_bits(kvm, &mdcr_el2_desc, 0, 0); 1483 + resx.res1 |= MDCR_EL2_RES1; 1492 1484 break; 1493 1485 case VTCR_EL2: 1494 - *res0 = compute_reg_res0_bits(kvm, &vtcr_el2_desc, 0, 0); 1495 - *res1 = VTCR_EL2_RES1; 1486 + resx = compute_reg_resx_bits(kvm, &vtcr_el2_desc, 0, 0); 1487 + resx.res1 |= VTCR_EL2_RES1; 1496 1488 break; 1497 1489 default: 1498 1490 WARN_ON_ONCE(1); 1499 - *res0 = *res1 = 0; 1491 + resx = (typeof(resx)){}; 1500 1492 break; 1501 1493 } 1494 + 1495 + return resx; 1502 1496 } 1503 1497 1504 1498 static __always_inline struct fgt_masks *__fgt_reg_to_masks(enum vcpu_sysreg reg)
+64 -65
arch/arm64/kvm/nested.c
··· 1683 1683 return v; 1684 1684 } 1685 1685 1686 - static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1) 1686 + static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, struct resx resx) 1687 1687 { 1688 - int i = sr - __SANITISED_REG_START__; 1689 - 1690 1688 BUILD_BUG_ON(!__builtin_constant_p(sr)); 1691 1689 BUILD_BUG_ON(sr < __SANITISED_REG_START__); 1692 1690 BUILD_BUG_ON(sr >= NR_SYS_REGS); 1693 1691 1694 - kvm->arch.sysreg_masks->mask[i].res0 = res0; 1695 - kvm->arch.sysreg_masks->mask[i].res1 = res1; 1692 + kvm_set_sysreg_resx(kvm, sr, resx); 1696 1693 } 1697 1694 1698 1695 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu) 1699 1696 { 1700 1697 struct kvm *kvm = vcpu->kvm; 1701 - u64 res0, res1; 1698 + struct resx resx; 1702 1699 1703 1700 lockdep_assert_held(&kvm->arch.config_lock); 1704 1701 ··· 1708 1711 return -ENOMEM; 1709 1712 1710 1713 /* VTTBR_EL2 */ 1711 - res0 = res1 = 0; 1714 + resx = (typeof(resx)){}; 1712 1715 if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16)) 1713 - res0 |= GENMASK(63, 56); 1716 + resx.res0 |= GENMASK(63, 56); 1714 1717 if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP)) 1715 - res0 |= VTTBR_CNP_BIT; 1716 - set_sysreg_masks(kvm, VTTBR_EL2, res0, res1); 1718 + resx.res0 |= VTTBR_CNP_BIT; 1719 + set_sysreg_masks(kvm, VTTBR_EL2, resx); 1717 1720 1718 1721 /* VTCR_EL2 */ 1719 - get_reg_fixed_bits(kvm, VTCR_EL2, &res0, &res1); 1720 - set_sysreg_masks(kvm, VTCR_EL2, res0, res1); 1722 + resx = get_reg_fixed_bits(kvm, VTCR_EL2); 1723 + set_sysreg_masks(kvm, VTCR_EL2, resx); 1721 1724 1722 1725 /* VMPIDR_EL2 */ 1723 - res0 = GENMASK(63, 40) | GENMASK(30, 24); 1724 - res1 = BIT(31); 1725 - set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1); 1726 + resx.res0 = GENMASK(63, 40) | GENMASK(30, 24); 1727 + resx.res1 = BIT(31); 1728 + set_sysreg_masks(kvm, VMPIDR_EL2, resx); 1726 1729 1727 1730 /* HCR_EL2 */ 1728 - get_reg_fixed_bits(kvm, HCR_EL2, &res0, &res1); 1729 - set_sysreg_masks(kvm, HCR_EL2, res0, res1); 1731 + resx = get_reg_fixed_bits(kvm, HCR_EL2); 1732 + set_sysreg_masks(kvm, HCR_EL2, resx); 1730 1733 1731 1734 /* HCRX_EL2 */ 1732 - get_reg_fixed_bits(kvm, HCRX_EL2, &res0, &res1); 1733 - set_sysreg_masks(kvm, HCRX_EL2, res0, res1); 1735 + resx = get_reg_fixed_bits(kvm, HCRX_EL2); 1736 + set_sysreg_masks(kvm, HCRX_EL2, resx); 1734 1737 1735 1738 /* HFG[RW]TR_EL2 */ 1736 - get_reg_fixed_bits(kvm, HFGRTR_EL2, &res0, &res1); 1737 - set_sysreg_masks(kvm, HFGRTR_EL2, res0, res1); 1738 - get_reg_fixed_bits(kvm, HFGWTR_EL2, &res0, &res1); 1739 - set_sysreg_masks(kvm, HFGWTR_EL2, res0, res1); 1739 + resx = get_reg_fixed_bits(kvm, HFGRTR_EL2); 1740 + set_sysreg_masks(kvm, HFGRTR_EL2, resx); 1741 + resx = get_reg_fixed_bits(kvm, HFGWTR_EL2); 1742 + set_sysreg_masks(kvm, HFGWTR_EL2, resx); 1740 1743 1741 1744 /* HDFG[RW]TR_EL2 */ 1742 - get_reg_fixed_bits(kvm, HDFGRTR_EL2, &res0, &res1); 1743 - set_sysreg_masks(kvm, HDFGRTR_EL2, res0, res1); 1744 - get_reg_fixed_bits(kvm, HDFGWTR_EL2, &res0, &res1); 1745 - set_sysreg_masks(kvm, HDFGWTR_EL2, res0, res1); 1745 + resx = get_reg_fixed_bits(kvm, HDFGRTR_EL2); 1746 + set_sysreg_masks(kvm, HDFGRTR_EL2, resx); 1747 + resx = get_reg_fixed_bits(kvm, HDFGWTR_EL2); 1748 + set_sysreg_masks(kvm, HDFGWTR_EL2, resx); 1746 1749 1747 1750 /* HFGITR_EL2 */ 1748 - get_reg_fixed_bits(kvm, HFGITR_EL2, &res0, &res1); 1749 - set_sysreg_masks(kvm, HFGITR_EL2, res0, res1); 1751 + resx = get_reg_fixed_bits(kvm, HFGITR_EL2); 1752 + set_sysreg_masks(kvm, HFGITR_EL2, resx); 1750 1753 1751 1754 /* HAFGRTR_EL2 - not a lot to see here */ 1752 - get_reg_fixed_bits(kvm, HAFGRTR_EL2, &res0, &res1); 1753 - set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1); 1755 + resx = get_reg_fixed_bits(kvm, HAFGRTR_EL2); 1756 + set_sysreg_masks(kvm, HAFGRTR_EL2, resx); 1754 1757 1755 1758 /* HFG[RW]TR2_EL2 */ 1756 - get_reg_fixed_bits(kvm, HFGRTR2_EL2, &res0, &res1); 1757 - set_sysreg_masks(kvm, HFGRTR2_EL2, res0, res1); 1758 - get_reg_fixed_bits(kvm, HFGWTR2_EL2, &res0, &res1); 1759 - set_sysreg_masks(kvm, HFGWTR2_EL2, res0, res1); 1759 + resx = get_reg_fixed_bits(kvm, HFGRTR2_EL2); 1760 + set_sysreg_masks(kvm, HFGRTR2_EL2, resx); 1761 + resx = get_reg_fixed_bits(kvm, HFGWTR2_EL2); 1762 + set_sysreg_masks(kvm, HFGWTR2_EL2, resx); 1760 1763 1761 1764 /* HDFG[RW]TR2_EL2 */ 1762 - get_reg_fixed_bits(kvm, HDFGRTR2_EL2, &res0, &res1); 1763 - set_sysreg_masks(kvm, HDFGRTR2_EL2, res0, res1); 1764 - get_reg_fixed_bits(kvm, HDFGWTR2_EL2, &res0, &res1); 1765 - set_sysreg_masks(kvm, HDFGWTR2_EL2, res0, res1); 1765 + resx = get_reg_fixed_bits(kvm, HDFGRTR2_EL2); 1766 + set_sysreg_masks(kvm, HDFGRTR2_EL2, resx); 1767 + resx = get_reg_fixed_bits(kvm, HDFGWTR2_EL2); 1768 + set_sysreg_masks(kvm, HDFGWTR2_EL2, resx); 1766 1769 1767 1770 /* HFGITR2_EL2 */ 1768 - get_reg_fixed_bits(kvm, HFGITR2_EL2, &res0, &res1); 1769 - set_sysreg_masks(kvm, HFGITR2_EL2, res0, res1); 1771 + resx = get_reg_fixed_bits(kvm, HFGITR2_EL2); 1772 + set_sysreg_masks(kvm, HFGITR2_EL2, resx); 1770 1773 1771 1774 /* TCR2_EL2 */ 1772 - get_reg_fixed_bits(kvm, TCR2_EL2, &res0, &res1); 1773 - set_sysreg_masks(kvm, TCR2_EL2, res0, res1); 1775 + resx = get_reg_fixed_bits(kvm, TCR2_EL2); 1776 + set_sysreg_masks(kvm, TCR2_EL2, resx); 1774 1777 1775 1778 /* SCTLR_EL1 */ 1776 - get_reg_fixed_bits(kvm, SCTLR_EL1, &res0, &res1); 1777 - set_sysreg_masks(kvm, SCTLR_EL1, res0, res1); 1779 + resx = get_reg_fixed_bits(kvm, SCTLR_EL1); 1780 + set_sysreg_masks(kvm, SCTLR_EL1, resx); 1778 1781 1779 1782 /* SCTLR2_ELx */ 1780 - get_reg_fixed_bits(kvm, SCTLR2_EL1, &res0, &res1); 1781 - set_sysreg_masks(kvm, SCTLR2_EL1, res0, res1); 1782 - get_reg_fixed_bits(kvm, SCTLR2_EL2, &res0, &res1); 1783 - set_sysreg_masks(kvm, SCTLR2_EL2, res0, res1); 1783 + resx = get_reg_fixed_bits(kvm, SCTLR2_EL1); 1784 + set_sysreg_masks(kvm, SCTLR2_EL1, resx); 1785 + resx = get_reg_fixed_bits(kvm, SCTLR2_EL2); 1786 + set_sysreg_masks(kvm, SCTLR2_EL2, resx); 1784 1787 1785 1788 /* MDCR_EL2 */ 1786 - get_reg_fixed_bits(kvm, MDCR_EL2, &res0, &res1); 1787 - set_sysreg_masks(kvm, MDCR_EL2, res0, res1); 1789 + resx = get_reg_fixed_bits(kvm, MDCR_EL2); 1790 + set_sysreg_masks(kvm, MDCR_EL2, resx); 1788 1791 1789 1792 /* CNTHCTL_EL2 */ 1790 - res0 = GENMASK(63, 20); 1791 - res1 = 0; 1793 + resx.res0 = GENMASK(63, 20); 1794 + resx.res1 = 0; 1792 1795 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP)) 1793 - res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK; 1796 + resx.res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK; 1794 1797 if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) { 1795 - res0 |= CNTHCTL_ECV; 1798 + resx.res0 |= CNTHCTL_ECV; 1796 1799 if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP)) 1797 - res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT | 1798 - CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT); 1800 + resx.res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT | 1801 + CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT); 1799 1802 } 1800 1803 if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP)) 1801 - res0 |= GENMASK(11, 8); 1802 - set_sysreg_masks(kvm, CNTHCTL_EL2, res0, res1); 1804 + resx.res0 |= GENMASK(11, 8); 1805 + set_sysreg_masks(kvm, CNTHCTL_EL2, resx); 1803 1806 1804 1807 /* ICH_HCR_EL2 */ 1805 - res0 = ICH_HCR_EL2_RES0; 1806 - res1 = ICH_HCR_EL2_RES1; 1808 + resx.res0 = ICH_HCR_EL2_RES0; 1809 + resx.res1 = ICH_HCR_EL2_RES1; 1807 1810 if (!(kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_TDS)) 1808 - res0 |= ICH_HCR_EL2_TDIR; 1811 + resx.res0 |= ICH_HCR_EL2_TDIR; 1809 1812 /* No GICv4 is presented to the guest */ 1810 - res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount; 1811 - set_sysreg_masks(kvm, ICH_HCR_EL2, res0, res1); 1813 + resx.res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount; 1814 + set_sysreg_masks(kvm, ICH_HCR_EL2, resx); 1812 1815 1813 1816 /* VNCR_EL2 */ 1814 - set_sysreg_masks(kvm, VNCR_EL2, VNCR_EL2_RES0, VNCR_EL2_RES1); 1817 + resx.res0 = VNCR_EL2_RES0; 1818 + resx.res1 = VNCR_EL2_RES1; 1819 + set_sysreg_masks(kvm, VNCR_EL2, resx); 1815 1820 1816 1821 out: 1817 1822 for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)