Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'kvmarm-fixes-7.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 7.0, take #2

- Fix a couple of low-severity bugs in our S2 fault handling path,
affecting the recently introduced LS64 handling and the even more
esoteric handling of hwpoison in a nested context

- Address yet another syzkaller finding in the vgic initialisation,
were we would end-up destroying an uninitialised vgic, with nasty
consequences

- Address an annoying case of pKVM failing to boot when some of the
memblock regions that the host is faulting in are not page-aligned

- Inject some sanity in the NV stage-2 walker by checking the limits
against the advertised PA size, and correctly report the resulting
faults

- Drop an unnecessary ISB when emulating an EL2 S1 address translation

+43 -36
-2
arch/arm64/kvm/at.c
··· 1504 1504 fail = true; 1505 1505 } 1506 1506 1507 - isb(); 1508 - 1509 1507 if (!fail) 1510 1508 par = read_sysreg_par(); 1511 1509
+1 -1
arch/arm64/kvm/hyp/nvhe/mem_protect.c
··· 518 518 granule = kvm_granule_size(level); 519 519 cur.start = ALIGN_DOWN(addr, granule); 520 520 cur.end = cur.start + granule; 521 - if (!range_included(&cur, range)) 521 + if (!range_included(&cur, range) && level < KVM_PGTABLE_LAST_LEVEL) 522 522 continue; 523 523 *range = cur; 524 524 return 0;
+9 -5
arch/arm64/kvm/mmu.c
··· 1751 1751 1752 1752 force_pte = (max_map_size == PAGE_SIZE); 1753 1753 vma_pagesize = min_t(long, vma_pagesize, max_map_size); 1754 + vma_shift = __ffs(vma_pagesize); 1754 1755 } 1755 1756 1756 1757 /* ··· 1838 1837 if (exec_fault && s2_force_noncacheable) 1839 1838 ret = -ENOEXEC; 1840 1839 1841 - if (ret) { 1842 - kvm_release_page_unused(page); 1843 - return ret; 1844 - } 1840 + if (ret) 1841 + goto out_put_page; 1845 1842 1846 1843 /* 1847 1844 * Guest performs atomic/exclusive operations on memory with unsupported ··· 1849 1850 */ 1850 1851 if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) { 1851 1852 kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu)); 1852 - return 1; 1853 + ret = 1; 1854 + goto out_put_page; 1853 1855 } 1854 1856 1855 1857 if (nested) ··· 1936 1936 mark_page_dirty_in_slot(kvm, memslot, gfn); 1937 1937 1938 1938 return ret != -EAGAIN ? ret : 0; 1939 + 1940 + out_put_page: 1941 + kvm_release_page_unused(page); 1942 + return ret; 1939 1943 } 1940 1944 1941 1945 /* Resolve the access fault by making the page young again. */
+16 -11
arch/arm64/kvm/nested.c
··· 152 152 return 64 - wi->t0sz; 153 153 } 154 154 155 - static int check_base_s2_limits(struct s2_walk_info *wi, 155 + static int check_base_s2_limits(struct kvm_vcpu *vcpu, struct s2_walk_info *wi, 156 156 int level, int input_size, int stride) 157 157 { 158 - int start_size, ia_size; 158 + int start_size, pa_max; 159 159 160 - ia_size = get_ia_size(wi); 160 + pa_max = kvm_get_pa_bits(vcpu->kvm); 161 161 162 162 /* Check translation limits */ 163 163 switch (BIT(wi->pgshift)) { 164 164 case SZ_64K: 165 - if (level == 0 || (level == 1 && ia_size <= 42)) 165 + if (level == 0 || (level == 1 && pa_max <= 42)) 166 166 return -EFAULT; 167 167 break; 168 168 case SZ_16K: 169 - if (level == 0 || (level == 1 && ia_size <= 40)) 169 + if (level == 0 || (level == 1 && pa_max <= 40)) 170 170 return -EFAULT; 171 171 break; 172 172 case SZ_4K: 173 - if (level < 0 || (level == 0 && ia_size <= 42)) 173 + if (level < 0 || (level == 0 && pa_max <= 42)) 174 174 return -EFAULT; 175 175 break; 176 176 } 177 177 178 178 /* Check input size limits */ 179 - if (input_size > ia_size) 179 + if (input_size > pa_max) 180 180 return -EFAULT; 181 181 182 182 /* Check number of entries in starting level table */ ··· 269 269 if (input_size > 48 || input_size < 25) 270 270 return -EFAULT; 271 271 272 - ret = check_base_s2_limits(wi, level, input_size, stride); 273 - if (WARN_ON(ret)) 272 + ret = check_base_s2_limits(vcpu, wi, level, input_size, stride); 273 + if (WARN_ON(ret)) { 274 + out->esr = compute_fsc(0, ESR_ELx_FSC_FAULT); 274 275 return ret; 276 + } 275 277 276 278 base_lower_bound = 3 + input_size - ((3 - level) * stride + 277 279 wi->pgshift); 278 280 base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound); 279 281 280 282 if (check_output_size(wi, base_addr)) { 281 - out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ); 283 + /* R_BFHQH */ 284 + out->esr = compute_fsc(0, ESR_ELx_FSC_ADDRSZ); 282 285 return 1; 283 286 } 284 287 ··· 296 293 297 294 paddr = base_addr | index; 298 295 ret = read_guest_s2_desc(vcpu, paddr, &desc, wi); 299 - if (ret < 0) 296 + if (ret < 0) { 297 + out->esr = ESR_ELx_FSC_SEA_TTW(level); 300 298 return ret; 299 + } 301 300 302 301 new_desc = desc; 303 302
+17 -17
arch/arm64/kvm/vgic/vgic-init.c
··· 143 143 kvm->arch.vgic.in_kernel = true; 144 144 kvm->arch.vgic.vgic_model = type; 145 145 kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST; 146 - 147 - kvm_for_each_vcpu(i, vcpu, kvm) { 148 - ret = vgic_allocate_private_irqs_locked(vcpu, type); 149 - if (ret) 150 - break; 151 - } 152 - 153 - if (ret) { 154 - kvm_for_each_vcpu(i, vcpu, kvm) { 155 - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 156 - kfree(vgic_cpu->private_irqs); 157 - vgic_cpu->private_irqs = NULL; 158 - } 159 - 160 - goto out_unlock; 161 - } 162 - 163 146 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF; 164 147 165 148 aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC; ··· 158 175 159 176 kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0); 160 177 kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1); 178 + 179 + kvm_for_each_vcpu(i, vcpu, kvm) { 180 + ret = vgic_allocate_private_irqs_locked(vcpu, type); 181 + if (ret) 182 + break; 183 + } 184 + 185 + if (ret) { 186 + kvm_for_each_vcpu(i, vcpu, kvm) { 187 + struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 188 + kfree(vgic_cpu->private_irqs); 189 + vgic_cpu->private_irqs = NULL; 190 + } 191 + 192 + kvm->arch.vgic.vgic_model = 0; 193 + goto out_unlock; 194 + } 161 195 162 196 if (type == KVM_DEV_TYPE_ARM_VGIC_V3) 163 197 kvm->arch.vgic.nassgicap = system_supports_direct_sgis();