Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch kvm-arm64/fwb-for-all into kvmarm-master/next

* kvm-arm64/fwb-for-all:
: .
: Allow pKVM's host stage-2 mappings to use the Force Write Back version
: of the memory attributes by using the "pass-through' encoding.
:
: This avoids having two separate encodings for S2 on a given platform.
: .
KVM: arm64: Simplify PAGE_S2_MEMATTR
KVM: arm64: Kill KVM_PGTABLE_S2_NOFWB
KVM: arm64: Switch pKVM host S2 over to KVM_PGTABLE_S2_AS_S1
KVM: arm64: Add KVM_PGTABLE_S2_AS_S1 flag
arm64: Add MT_S2{,_FWB}_AS_S1 encodings

Signed-off-by: Marc Zyngier <maz@kernel.org>

+30 -22
+3 -4
arch/arm64/include/asm/kvm_pgtable.h
··· 231 231 232 232 /** 233 233 * enum kvm_pgtable_stage2_flags - Stage-2 page-table flags. 234 - * @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have 235 - * ARM64_HAS_STAGE2_FWB. 236 234 * @KVM_PGTABLE_S2_IDMAP: Only use identity mappings. 235 + * @KVM_PGTABLE_S2_AS_S1: Final memory attributes are that of Stage-1. 237 236 */ 238 237 enum kvm_pgtable_stage2_flags { 239 - KVM_PGTABLE_S2_NOFWB = BIT(0), 240 - KVM_PGTABLE_S2_IDMAP = BIT(1), 238 + KVM_PGTABLE_S2_IDMAP = BIT(0), 239 + KVM_PGTABLE_S2_AS_S1 = BIT(1), 241 240 }; 242 241 243 242 /**
+8 -3
arch/arm64/include/asm/memory.h
··· 175 175 #define MT_DEVICE_nGnRE 4 176 176 177 177 /* 178 - * Memory types for Stage-2 translation 178 + * Memory types for Stage-2 translation when HCR_EL2.FWB=0. See R_HMNDG, 179 + * R_TNHFM, R_GQFSF and I_MCQKW for the details on how these attributes get 180 + * combined with Stage-1. 179 181 */ 180 182 #define MT_S2_NORMAL 0xf 181 183 #define MT_S2_NORMAL_NC 0x5 182 184 #define MT_S2_DEVICE_nGnRE 0x1 185 + #define MT_S2_AS_S1 MT_S2_NORMAL 183 186 184 187 /* 185 - * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001 186 - * Stage-2 enforces Normal-WB and Device-nGnRE 188 + * Memory types for Stage-2 translation when HCR_EL2.FWB=1. Stage-2 enforces 189 + * Normal-WB and Device-nGnRE, unless we actively say that S1 wins. See 190 + * R_VRJSW and R_RHWZM for details. 187 191 */ 188 192 #define MT_S2_FWB_NORMAL 6 189 193 #define MT_S2_FWB_NORMAL_NC 5 190 194 #define MT_S2_FWB_DEVICE_nGnRE 1 195 + #define MT_S2_FWB_AS_S1 7 191 196 192 197 #ifdef CONFIG_ARM64_4K_PAGES 193 198 #define IOREMAP_MAX_ORDER (PUD_SHIFT)
+2 -2
arch/arm64/include/asm/pgtable-prot.h
··· 109 109 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC) 110 110 #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_KERNEL_EXEC_CONT) 111 111 112 - #define PAGE_S2_MEMATTR(attr, has_fwb) \ 112 + #define PAGE_S2_MEMATTR(attr) \ 113 113 ({ \ 114 114 u64 __val; \ 115 - if (has_fwb) \ 115 + if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) \ 116 116 __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \ 117 117 else \ 118 118 __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \
+3 -1
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 19 19 #include <nvhe/mem_protect.h> 20 20 #include <nvhe/mm.h> 21 21 22 - #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) 22 + #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_AS_S1 | KVM_PGTABLE_S2_IDMAP) 23 23 24 24 struct host_mmu host_mmu; 25 25 ··· 324 324 params->vttbr = kvm_get_vttbr(mmu); 325 325 params->vtcr = mmu->vtcr; 326 326 params->hcr_el2 |= HCR_VM; 327 + if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 328 + params->hcr_el2 |= HCR_FWB; 327 329 328 330 /* 329 331 * The CMO below not only cleans the updated params to the
+14 -12
arch/arm64/kvm/hyp/pgtable.c
··· 647 647 return vtcr; 648 648 } 649 649 650 - static bool stage2_has_fwb(struct kvm_pgtable *pgt) 651 - { 652 - if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 653 - return false; 654 - 655 - return !(pgt->flags & KVM_PGTABLE_S2_NOFWB); 656 - } 657 - 658 650 void kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, 659 651 phys_addr_t addr, size_t size) 660 652 { ··· 667 675 } 668 676 } 669 677 670 - #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt)) 678 + #define KVM_S2_MEMATTR(pgt, attr) \ 679 + ({ \ 680 + kvm_pte_t __attr; \ 681 + \ 682 + if ((pgt)->flags & KVM_PGTABLE_S2_AS_S1) \ 683 + __attr = PAGE_S2_MEMATTR(AS_S1); \ 684 + else \ 685 + __attr = PAGE_S2_MEMATTR(attr); \ 686 + \ 687 + __attr; \ 688 + }) 671 689 672 690 static int stage2_set_xn_attr(enum kvm_pgtable_prot prot, kvm_pte_t *attr) 673 691 { ··· 886 884 * system supporting FWB as the optimization is entirely 887 885 * pointless when the unmap walker needs to perform CMOs. 888 886 */ 889 - return system_supports_tlb_range() && stage2_has_fwb(pgt); 887 + return system_supports_tlb_range() && cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); 890 888 } 891 889 892 890 static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx, ··· 1166 1164 if (mm_ops->page_count(childp) != 1) 1167 1165 return 0; 1168 1166 } else if (stage2_pte_cacheable(pgt, ctx->old)) { 1169 - need_flush = !stage2_has_fwb(pgt); 1167 + need_flush = !cpus_have_final_cap(ARM64_HAS_STAGE2_FWB); 1170 1168 } 1171 1169 1172 1170 /* ··· 1397 1395 .arg = pgt, 1398 1396 }; 1399 1397 1400 - if (stage2_has_fwb(pgt)) 1398 + if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 1401 1399 return 0; 1402 1400 1403 1401 return kvm_pgtable_walk(pgt, addr, size, &walker);