Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'kvm-riscv-fixes-7.0-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv fixes for 7.0, take #1

- Prevent speculative out-of-bounds access using array_index_nospec()
in APLIC interrupt handling, ONE_REG regiser access, AIA CSR access,
float register access, and PMU counter access
- Fix potential use-after-free issues in kvm_riscv_gstage_get_leaf(),
kvm_riscv_aia_aplic_has_attr(), and kvm_riscv_aia_imsic_has_attr()
- Fix potential null pointer dereference in kvm_riscv_vcpu_aia_rmw_topei()
- Fix off-by-one array access in SBI PMU
- Skip THP support check during dirty logging
- Fix error code returned for Smstateen and Ssaia ONE_REG interface
- Check host Ssaia extension when creating AIA irqchip

+109 -44
+13 -2
arch/riscv/kvm/aia.c
··· 13 13 #include <linux/irqchip/riscv-imsic.h> 14 14 #include <linux/irqdomain.h> 15 15 #include <linux/kvm_host.h> 16 + #include <linux/nospec.h> 16 17 #include <linux/percpu.h> 17 18 #include <linux/spinlock.h> 18 19 #include <asm/cpufeature.h> ··· 183 182 unsigned long *out_val) 184 183 { 185 184 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; 185 + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 186 186 187 - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) 187 + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) 188 188 return -ENOENT; 189 + if (reg_num >= regs_max) 190 + return -ENOENT; 191 + 192 + reg_num = array_index_nospec(reg_num, regs_max); 189 193 190 194 *out_val = 0; 191 195 if (kvm_riscv_aia_available()) ··· 204 198 unsigned long val) 205 199 { 206 200 struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr; 201 + unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long); 207 202 208 - if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long)) 203 + if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA)) 209 204 return -ENOENT; 205 + if (reg_num >= regs_max) 206 + return -ENOENT; 207 + 208 + reg_num = array_index_nospec(reg_num, regs_max); 210 209 211 210 if (kvm_riscv_aia_available()) { 212 211 ((unsigned long *)csr)[reg_num] = val;
+12 -11
arch/riscv/kvm/aia_aplic.c
··· 10 10 #include <linux/irqchip/riscv-aplic.h> 11 11 #include <linux/kvm_host.h> 12 12 #include <linux/math.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/spinlock.h> 14 15 #include <linux/swab.h> 15 16 #include <kvm/iodev.h> ··· 46 45 47 46 if (!irq || aplic->nr_irqs <= irq) 48 47 return 0; 49 - irqd = &aplic->irqs[irq]; 48 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 50 49 51 50 raw_spin_lock_irqsave(&irqd->lock, flags); 52 51 ret = irqd->sourcecfg; ··· 62 61 63 62 if (!irq || aplic->nr_irqs <= irq) 64 63 return; 65 - irqd = &aplic->irqs[irq]; 64 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 66 65 67 66 if (val & APLIC_SOURCECFG_D) 68 67 val = 0; ··· 82 81 83 82 if (!irq || aplic->nr_irqs <= irq) 84 83 return 0; 85 - irqd = &aplic->irqs[irq]; 84 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 86 85 87 86 raw_spin_lock_irqsave(&irqd->lock, flags); 88 87 ret = irqd->target; ··· 98 97 99 98 if (!irq || aplic->nr_irqs <= irq) 100 99 return; 101 - irqd = &aplic->irqs[irq]; 100 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 102 101 103 102 val &= APLIC_TARGET_EIID_MASK | 104 103 (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) | ··· 117 116 118 117 if (!irq || aplic->nr_irqs <= irq) 119 118 return false; 120 - irqd = &aplic->irqs[irq]; 119 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 121 120 122 121 raw_spin_lock_irqsave(&irqd->lock, flags); 123 122 ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false; ··· 133 132 134 133 if (!irq || aplic->nr_irqs <= irq) 135 134 return; 136 - irqd = &aplic->irqs[irq]; 135 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 137 136 138 137 raw_spin_lock_irqsave(&irqd->lock, flags); 139 138 ··· 171 170 172 171 if (!irq || aplic->nr_irqs <= irq) 173 172 return false; 174 - irqd = &aplic->irqs[irq]; 173 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 175 174 176 175 raw_spin_lock_irqsave(&irqd->lock, flags); 177 176 ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false; ··· 187 186 188 187 if (!irq || aplic->nr_irqs <= irq) 189 188 return; 190 - irqd = &aplic->irqs[irq]; 189 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 191 190 192 191 raw_spin_lock_irqsave(&irqd->lock, flags); 193 192 if (enabled) ··· 206 205 207 206 if (!irq || aplic->nr_irqs <= irq) 208 207 return false; 209 - irqd = &aplic->irqs[irq]; 208 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 210 209 211 210 raw_spin_lock_irqsave(&irqd->lock, flags); 212 211 ··· 255 254 for (irq = first; irq <= last; irq++) { 256 255 if (!irq || aplic->nr_irqs <= irq) 257 256 continue; 258 - irqd = &aplic->irqs[irq]; 257 + irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)]; 259 258 260 259 raw_spin_lock_irqsave(&irqd->lock, flags); 261 260 ··· 284 283 285 284 if (!aplic || !source || (aplic->nr_irqs <= source)) 286 285 return -ENODEV; 287 - irqd = &aplic->irqs[source]; 286 + irqd = &aplic->irqs[array_index_nospec(source, aplic->nr_irqs)]; 288 287 ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false; 289 288 290 289 raw_spin_lock_irqsave(&irqd->lock, flags);
+14 -4
arch/riscv/kvm/aia_device.c
··· 11 11 #include <linux/irqchip/riscv-imsic.h> 12 12 #include <linux/kvm_host.h> 13 13 #include <linux/uaccess.h> 14 + #include <linux/cpufeature.h> 14 15 15 16 static int aia_create(struct kvm_device *dev, u32 type) 16 17 { ··· 22 21 23 22 if (irqchip_in_kernel(kvm)) 24 23 return -EEXIST; 24 + 25 + if (!riscv_isa_extension_available(NULL, SSAIA)) 26 + return -ENODEV; 25 27 26 28 ret = -EBUSY; 27 29 if (kvm_trylock_all_vcpus(kvm)) ··· 441 437 442 438 static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) 443 439 { 444 - int nr_vcpus; 440 + int nr_vcpus, r = -ENXIO; 445 441 446 442 switch (attr->group) { 447 443 case KVM_DEV_RISCV_AIA_GRP_CONFIG: ··· 470 466 } 471 467 break; 472 468 case KVM_DEV_RISCV_AIA_GRP_APLIC: 473 - return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); 469 + mutex_lock(&dev->kvm->lock); 470 + r = kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr); 471 + mutex_unlock(&dev->kvm->lock); 472 + break; 474 473 case KVM_DEV_RISCV_AIA_GRP_IMSIC: 475 - return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); 474 + mutex_lock(&dev->kvm->lock); 475 + r = kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr); 476 + mutex_unlock(&dev->kvm->lock); 477 + break; 476 478 } 477 479 478 - return -ENXIO; 480 + return r; 479 481 } 480 482 481 483 struct kvm_device_ops kvm_riscv_aia_device_ops = {
+4
arch/riscv/kvm/aia_imsic.c
··· 908 908 int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC; 909 909 struct imsic *imsic = vcpu->arch.aia_context.imsic_state; 910 910 911 + /* If IMSIC vCPU state not initialized then forward to user space */ 912 + if (!imsic) 913 + return KVM_INSN_EXIT_TO_USER_SPACE; 914 + 911 915 if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) { 912 916 /* Read pending and enabled interrupt with highest priority */ 913 917 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
+5 -1
arch/riscv/kvm/mmu.c
··· 245 245 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) 246 246 { 247 247 struct kvm_gstage gstage; 248 + bool mmu_locked; 248 249 249 250 if (!kvm->arch.pgd) 250 251 return false; ··· 254 253 gstage.flags = 0; 255 254 gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid); 256 255 gstage.pgd = kvm->arch.pgd; 256 + mmu_locked = spin_trylock(&kvm->mmu_lock); 257 257 kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT, 258 258 (range->end - range->start) << PAGE_SHIFT, 259 259 range->may_block); 260 + if (mmu_locked) 261 + spin_unlock(&kvm->mmu_lock); 260 262 return false; 261 263 } 262 264 ··· 539 535 goto out_unlock; 540 536 541 537 /* Check if we are backed by a THP and thus use block mapping if possible */ 542 - if (vma_pagesize == PAGE_SIZE) 538 + if (!logging && (vma_pagesize == PAGE_SIZE)) 543 539 vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa); 544 540 545 541 if (writable) {
+13 -4
arch/riscv/kvm/vcpu_fp.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/err.h> 12 12 #include <linux/kvm_host.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/uaccess.h> 14 15 #include <asm/cpufeature.h> 15 16 ··· 94 93 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) 95 94 reg_val = &cntx->fp.f.fcsr; 96 95 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && 97 - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) 96 + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { 97 + reg_num = array_index_nospec(reg_num, 98 + ARRAY_SIZE(cntx->fp.f.f)); 98 99 reg_val = &cntx->fp.f.f[reg_num]; 99 - else 100 + } else 100 101 return -ENOENT; 101 102 } else if ((rtype == KVM_REG_RISCV_FP_D) && 102 103 riscv_isa_extension_available(vcpu->arch.isa, d)) { ··· 110 107 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { 111 108 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 112 109 return -EINVAL; 110 + reg_num = array_index_nospec(reg_num, 111 + ARRAY_SIZE(cntx->fp.d.f)); 113 112 reg_val = &cntx->fp.d.f[reg_num]; 114 113 } else 115 114 return -ENOENT; ··· 143 138 if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) 144 139 reg_val = &cntx->fp.f.fcsr; 145 140 else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && 146 - reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) 141 + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) { 142 + reg_num = array_index_nospec(reg_num, 143 + ARRAY_SIZE(cntx->fp.f.f)); 147 144 reg_val = &cntx->fp.f.f[reg_num]; 148 - else 145 + } else 149 146 return -ENOENT; 150 147 } else if ((rtype == KVM_REG_RISCV_FP_D) && 151 148 riscv_isa_extension_available(vcpu->arch.isa, d)) { ··· 159 152 reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { 160 153 if (KVM_REG_SIZE(reg->id) != sizeof(u64)) 161 154 return -EINVAL; 155 + reg_num = array_index_nospec(reg_num, 156 + ARRAY_SIZE(cntx->fp.d.f)); 162 157 reg_val = &cntx->fp.d.f[reg_num]; 163 158 } else 164 159 return -ENOENT;
+36 -18
arch/riscv/kvm/vcpu_onereg.c
··· 10 10 #include <linux/bitops.h> 11 11 #include <linux/errno.h> 12 12 #include <linux/err.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/uaccess.h> 14 15 #include <linux/kvm_host.h> 15 16 #include <asm/cacheflush.h> ··· 128 127 kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr)) 129 128 return -ENOENT; 130 129 130 + kvm_ext = array_index_nospec(kvm_ext, ARRAY_SIZE(kvm_isa_ext_arr)); 131 131 *guest_ext = kvm_isa_ext_arr[kvm_ext]; 132 132 switch (*guest_ext) { 133 133 case RISCV_ISA_EXT_SMNPM: ··· 445 443 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 446 444 KVM_REG_SIZE_MASK | 447 445 KVM_REG_RISCV_CORE); 446 + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); 448 447 unsigned long reg_val; 449 448 450 449 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 451 450 return -EINVAL; 452 - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 451 + if (reg_num >= regs_max) 453 452 return -ENOENT; 453 + 454 + reg_num = array_index_nospec(reg_num, regs_max); 454 455 455 456 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc)) 456 457 reg_val = cntx->sepc; ··· 481 476 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | 482 477 KVM_REG_SIZE_MASK | 483 478 KVM_REG_RISCV_CORE); 479 + unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long); 484 480 unsigned long reg_val; 485 481 486 482 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) 487 483 return -EINVAL; 488 - if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long)) 484 + if (reg_num >= regs_max) 489 485 return -ENOENT; 486 + 487 + reg_num = array_index_nospec(reg_num, regs_max); 490 488 491 489 if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id))) 492 490 return -EFAULT; ··· 515 507 unsigned long *out_val) 516 508 { 517 509 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 510 + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 518 511 519 - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 512 + if (reg_num >= regs_max) 520 513 return -ENOENT; 514 + 515 + reg_num = array_index_nospec(reg_num, regs_max); 521 516 522 517 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 523 518 kvm_riscv_vcpu_flush_interrupts(vcpu); ··· 537 526 unsigned long reg_val) 538 527 { 539 528 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; 529 + unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long); 540 530 541 - if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long)) 531 + if (reg_num >= regs_max) 542 532 return -ENOENT; 533 + 534 + reg_num = array_index_nospec(reg_num, regs_max); 543 535 544 536 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) { 545 537 reg_val &= VSIP_VALID_MASK; ··· 562 548 unsigned long reg_val) 563 549 { 564 550 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; 551 + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / 552 + sizeof(unsigned long); 565 553 566 - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / 567 - sizeof(unsigned long)) 568 - return -EINVAL; 554 + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) 555 + return -ENOENT; 556 + if (reg_num >= regs_max) 557 + return -ENOENT; 558 + 559 + reg_num = array_index_nospec(reg_num, regs_max); 569 560 570 561 ((unsigned long *)csr)[reg_num] = reg_val; 571 562 return 0; ··· 581 562 unsigned long *out_val) 582 563 { 583 564 struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr; 565 + unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) / 566 + sizeof(unsigned long); 584 567 585 - if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) / 586 - sizeof(unsigned long)) 587 - return -EINVAL; 568 + if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN)) 569 + return -ENOENT; 570 + if (reg_num >= regs_max) 571 + return -ENOENT; 572 + 573 + reg_num = array_index_nospec(reg_num, regs_max); 588 574 589 575 *out_val = ((unsigned long *)csr)[reg_num]; 590 576 return 0; ··· 619 595 rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val); 620 596 break; 621 597 case KVM_REG_RISCV_CSR_SMSTATEEN: 622 - rc = -EINVAL; 623 - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) 624 - rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, 625 - &reg_val); 598 + rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, &reg_val); 626 599 break; 627 600 default: 628 601 rc = -ENOENT; ··· 661 640 rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val); 662 641 break; 663 642 case KVM_REG_RISCV_CSR_SMSTATEEN: 664 - rc = -EINVAL; 665 - if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) 666 - rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, 667 - reg_val); 643 + rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val); 668 644 break; 669 645 default: 670 646 rc = -ENOENT;
+12 -4
arch/riscv/kvm/vcpu_pmu.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/err.h> 12 12 #include <linux/kvm_host.h> 13 + #include <linux/nospec.h> 13 14 #include <linux/perf/riscv_pmu.h> 14 15 #include <asm/csr.h> 15 16 #include <asm/kvm_vcpu_sbi.h> ··· 88 87 89 88 static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code) 90 89 { 91 - return hw_event_perf_map[sbi_event_code]; 90 + return hw_event_perf_map[array_index_nospec(sbi_event_code, 91 + SBI_PMU_HW_GENERAL_MAX)]; 92 92 } 93 93 94 94 static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code) ··· 220 218 return -EINVAL; 221 219 } 222 220 221 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 223 222 pmc = &kvpmu->pmc[cidx]; 224 223 225 224 if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW) ··· 247 244 return -EINVAL; 248 245 } 249 246 247 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 250 248 pmc = &kvpmu->pmc[cidx]; 251 249 252 250 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { ··· 524 520 { 525 521 struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu); 526 522 527 - if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) { 523 + if (cidx >= RISCV_KVM_MAX_COUNTERS || cidx == 1) { 528 524 retdata->err_val = SBI_ERR_INVALID_PARAM; 529 525 return 0; 530 526 } 531 527 528 + cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS); 532 529 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; 533 530 534 531 return 0; ··· 564 559 } 565 560 /* Start the counters that have been configured and requested by the guest */ 566 561 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 567 - pmc_index = i + ctr_base; 562 + pmc_index = array_index_nospec(i + ctr_base, 563 + RISCV_KVM_MAX_COUNTERS); 568 564 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 569 565 continue; 570 566 /* The guest started the counter again. Reset the overflow status */ ··· 636 630 637 631 /* Stop the counters that have been configured and requested by the guest */ 638 632 for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) { 639 - pmc_index = i + ctr_base; 633 + pmc_index = array_index_nospec(i + ctr_base, 634 + RISCV_KVM_MAX_COUNTERS); 640 635 if (!test_bit(pmc_index, kvpmu->pmc_in_use)) 641 636 continue; 642 637 pmc = &kvpmu->pmc[pmc_index]; ··· 768 761 } 769 762 } 770 763 764 + ctr_idx = array_index_nospec(ctr_idx, RISCV_KVM_MAX_COUNTERS); 771 765 pmc = &kvpmu->pmc[ctr_idx]; 772 766 pmc->idx = ctr_idx; 773 767