Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
"Generic:

- Set .owner for various KVM file_operations so that files refcount
the KVM module until KVM is done executing _all_ code, including
the last few instructions of kvm_put_kvm(). And then revert the
misguided attempt to rely on "struct kvm" refcounts to pin
KVM-the-module.

ARM:

- Do not redo the mapping of vLPIs, if they have already been mapped

s390:

- Do not leave bits behind in PTEs

- Properly catch page invalidations that affect the prefix of a
nested guest

x86:

- When checking if a _running_ vCPU is "in-kernel", i.e. running at
CPL0, get the CPL directly instead of relying on
preempted_in_kernel (which is valid if and only if the vCPU was
preempted, i.e. NOT running).

- Fix a benign "return void" that was recently introduced.

Selftests:

- Makefile tweak for dependency generation

- '-Wformat' fix"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: SVM: Update EFER software model on CR0 trap for SEV-ES
KVM: selftests: add -MP to CFLAGS
KVM: selftests: Actually print out magic token in NX hugepages skip message
KVM: x86: Remove 'return void' expression for 'void function'
Revert "KVM: Prevent module exit until all VMs are freed"
KVM: Set file_operations.owner appropriately for all such structures
KVM: x86: Get CPL directly when checking if loaded vCPU is in kernel mode
KVM: arm64: GICv4: Do not perform a map to a mapped vLPI
KVM: s390/mm: Properly reset no-dat
KVM: s390: vsie: fix wrong VIR 37 when MSO is used

+27 -23
+4
arch/arm64/kvm/vgic/vgic-v4.c
··· 436 436 if (ret) 437 437 goto out; 438 438 439 + /* Silently exit if the vLPI is already mapped */ 440 + if (irq->hw) 441 + goto out; 442 + 439 443 /* 440 444 * Emit the mapping request. If it fails, the ITS probably 441 445 * isn't v4 compatible, so let's silently bail out. Holding
-4
arch/s390/kvm/vsie.c
··· 587 587 588 588 if (!gmap_is_shadow(gmap)) 589 589 return; 590 - if (start >= 1UL << 31) 591 - /* We are only interested in prefix pages */ 592 - return; 593 - 594 590 /* 595 591 * Only new shadow blocks are added to the list during runtime, 596 592 * therefore we can safely reference them all the time.
+1 -1
arch/s390/mm/pgtable.c
··· 756 756 pte_clear(mm, addr, ptep); 757 757 } 758 758 if (reset) 759 - pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; 759 + pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT); 760 760 pgste_set_unlock(ptep, pgste); 761 761 preempt_enable(); 762 762 }
+1
arch/x86/kvm/debugfs.c
··· 182 182 } 183 183 184 184 static const struct file_operations mmu_rmaps_stat_fops = { 185 + .owner = THIS_MODULE, 185 186 .open = kvm_mmu_rmaps_stat_open, 186 187 .read = seq_read, 187 188 .llseek = seq_lseek,
+5 -3
arch/x86/kvm/svm/svm.c
··· 1855 1855 bool old_paging = is_paging(vcpu); 1856 1856 1857 1857 #ifdef CONFIG_X86_64 1858 - if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { 1858 + if (vcpu->arch.efer & EFER_LME) { 1859 1859 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1860 1860 vcpu->arch.efer |= EFER_LMA; 1861 - svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1861 + if (!vcpu->arch.guest_state_protected) 1862 + svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1862 1863 } 1863 1864 1864 1865 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 1865 1866 vcpu->arch.efer &= ~EFER_LMA; 1866 - svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1867 + if (!vcpu->arch.guest_state_protected) 1868 + svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1867 1869 } 1868 1870 } 1869 1871 #endif
+6 -3
arch/x86/kvm/x86.c
··· 5518 5518 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, 5519 5519 struct kvm_xsave *guest_xsave) 5520 5520 { 5521 - return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, 5522 - sizeof(guest_xsave->region)); 5521 + kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, 5522 + sizeof(guest_xsave->region)); 5523 5523 } 5524 5524 5525 5525 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, ··· 13031 13031 if (vcpu->arch.guest_state_protected) 13032 13032 return true; 13033 13033 13034 - return vcpu->arch.preempted_in_kernel; 13034 + if (vcpu != kvm_get_running_vcpu()) 13035 + return vcpu->arch.preempted_in_kernel; 13036 + 13037 + return static_call(kvm_x86_get_cpl)(vcpu) == 0; 13035 13038 } 13036 13039 13037 13040 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+1 -1
tools/testing/selftests/kvm/Makefile
··· 224 224 LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include 225 225 endif 226 226 CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ 227 - -Wno-gnu-variable-sized-type-not-at-end -MD\ 227 + -Wno-gnu-variable-sized-type-not-at-end -MD -MP \ 228 228 -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \ 229 229 -fno-builtin-strnlen \ 230 230 -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
+1 -1
tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
··· 259 259 __TEST_REQUIRE(token == MAGIC_TOKEN, 260 260 "This test must be run with the magic token %d.\n" 261 261 "This is done by nx_huge_pages_test.sh, which\n" 262 - "also handles environment setup for the test."); 262 + "also handles environment setup for the test.", MAGIC_TOKEN); 263 263 264 264 run_test(reclaim_period_ms, false, reboot_permissions); 265 265 run_test(reclaim_period_ms, true, reboot_permissions);
+8 -10
virt/kvm/kvm_main.c
··· 115 115 116 116 static const struct file_operations stat_fops_per_vm; 117 117 118 - static struct file_operations kvm_chardev_ops; 119 - 120 118 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl, 121 119 unsigned long arg); 122 120 #ifdef CONFIG_KVM_COMPAT ··· 1155 1157 if (!kvm) 1156 1158 return ERR_PTR(-ENOMEM); 1157 1159 1158 - /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */ 1159 - __module_get(kvm_chardev_ops.owner); 1160 - 1161 1160 KVM_MMU_LOCK_INIT(kvm); 1162 1161 mmgrab(current->mm); 1163 1162 kvm->mm = current->mm; ··· 1274 1279 out_err_no_srcu: 1275 1280 kvm_arch_free_vm(kvm); 1276 1281 mmdrop(current->mm); 1277 - module_put(kvm_chardev_ops.owner); 1278 1282 return ERR_PTR(r); 1279 1283 } 1280 1284 ··· 1342 1348 preempt_notifier_dec(); 1343 1349 hardware_disable_all(); 1344 1350 mmdrop(mm); 1345 - module_put(kvm_chardev_ops.owner); 1346 1351 } 1347 1352 1348 1353 void kvm_get_kvm(struct kvm *kvm) ··· 3880 3887 return 0; 3881 3888 } 3882 3889 3883 - static const struct file_operations kvm_vcpu_fops = { 3890 + static struct file_operations kvm_vcpu_fops = { 3884 3891 .release = kvm_vcpu_release, 3885 3892 .unlocked_ioctl = kvm_vcpu_ioctl, 3886 3893 .mmap = kvm_vcpu_mmap, ··· 4074 4081 } 4075 4082 4076 4083 static const struct file_operations kvm_vcpu_stats_fops = { 4084 + .owner = THIS_MODULE, 4077 4085 .read = kvm_vcpu_stats_read, 4078 4086 .release = kvm_vcpu_stats_release, 4079 4087 .llseek = noop_llseek, ··· 4425 4431 return 0; 4426 4432 } 4427 4433 4428 - static const struct file_operations kvm_device_fops = { 4434 + static struct file_operations kvm_device_fops = { 4429 4435 .unlocked_ioctl = kvm_device_ioctl, 4430 4436 .release = kvm_device_release, 4431 4437 KVM_COMPAT(kvm_device_ioctl), ··· 4753 4759 } 4754 4760 4755 4761 static const struct file_operations kvm_vm_stats_fops = { 4762 + .owner = THIS_MODULE, 4756 4763 .read = kvm_vm_stats_read, 4757 4764 .release = kvm_vm_stats_release, 4758 4765 .llseek = noop_llseek, ··· 5055 5060 } 5056 5061 #endif 5057 5062 5058 - static const struct file_operations kvm_vm_fops = { 5063 + static struct file_operations kvm_vm_fops = { 5059 5064 .release = kvm_vm_release, 5060 5065 .unlocked_ioctl = kvm_vm_ioctl, 5061 5066 .llseek = noop_llseek, ··· 6090 6095 goto err_async_pf; 6091 6096 6092 6097 kvm_chardev_ops.owner = module; 6098 + kvm_vm_fops.owner = module; 6099 + kvm_vcpu_fops.owner = module; 6100 + kvm_device_fops.owner = module; 6093 6101 6094 6102 kvm_preempt_ops.sched_in = kvm_sched_in; 6095 6103 kvm_preempt_ops.sched_out = kvm_sched_out;