Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Avi Kivity:
"Fixing a scheduling-while-atomic bug in the ppc code, and a bug which
allowed pci bridges to be assigned to guests."

* git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: PPC: Book3S HV: Drop locks around call to kvmppc_pin_guest_page
KVM: Fix PCI header check on device assignment

+67 -33
+66 -30
arch/powerpc/kvm/book3s_hv.c
··· 268 268 return err; 269 269 } 270 270 271 - static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) 271 + static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) 272 272 { 273 + struct kvm *kvm = vcpu->kvm; 273 274 void *va; 274 275 unsigned long nb; 276 + unsigned long gpa; 277 + 278 + /* 279 + * We need to pin the page pointed to by vpap->next_gpa, 280 + * but we can't call kvmppc_pin_guest_page under the lock 281 + * as it does get_user_pages() and down_read(). So we 282 + * have to drop the lock, pin the page, then get the lock 283 + * again and check that a new area didn't get registered 284 + * in the meantime. 285 + */ 286 + for (;;) { 287 + gpa = vpap->next_gpa; 288 + spin_unlock(&vcpu->arch.vpa_update_lock); 289 + va = NULL; 290 + nb = 0; 291 + if (gpa) 292 + va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); 293 + spin_lock(&vcpu->arch.vpa_update_lock); 294 + if (gpa == vpap->next_gpa) 295 + break; 296 + /* sigh... unpin that one and try again */ 297 + if (va) 298 + kvmppc_unpin_guest_page(kvm, va); 299 + } 275 300 276 301 vpap->update_pending = 0; 277 - va = NULL; 278 - if (vpap->next_gpa) { 279 - va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); 280 - if (nb < vpap->len) { 281 - /* 282 - * If it's now too short, it must be that userspace 283 - * has changed the mappings underlying guest memory, 284 - * so unregister the region. 285 - */ 286 - kvmppc_unpin_guest_page(kvm, va); 287 - va = NULL; 288 - } 302 + if (va && nb < vpap->len) { 303 + /* 304 + * If it's now too short, it must be that userspace 305 + * has changed the mappings underlying guest memory, 306 + * so unregister the region. 307 + */ 308 + kvmppc_unpin_guest_page(kvm, va); 309 + va = NULL; 289 310 } 290 311 if (vpap->pinned_addr) 291 312 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); ··· 317 296 318 297 static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 319 298 { 320 - struct kvm *kvm = vcpu->kvm; 321 - 322 299 spin_lock(&vcpu->arch.vpa_update_lock); 323 300 if (vcpu->arch.vpa.update_pending) { 324 - kvmppc_update_vpa(kvm, &vcpu->arch.vpa); 301 + kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); 325 302 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 326 303 } 327 304 if (vcpu->arch.dtl.update_pending) { 328 - kvmppc_update_vpa(kvm, &vcpu->arch.dtl); 305 + kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); 329 306 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 330 307 vcpu->arch.dtl_index = 0; 331 308 } 332 309 if (vcpu->arch.slb_shadow.update_pending) 333 - kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); 310 + kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); 334 311 spin_unlock(&vcpu->arch.vpa_update_lock); 335 312 } 336 313 ··· 819 800 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 820 801 long ret; 821 802 u64 now; 822 - int ptid, i; 803 + int ptid, i, need_vpa_update; 823 804 824 805 /* don't start if any threads have a signal pending */ 825 - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 806 + need_vpa_update = 0; 807 + list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 826 808 if (signal_pending(vcpu->arch.run_task)) 827 809 return 0; 810 + need_vpa_update |= vcpu->arch.vpa.update_pending | 811 + vcpu->arch.slb_shadow.update_pending | 812 + vcpu->arch.dtl.update_pending; 813 + } 814 + 815 + /* 816 + * Initialize *vc, in particular vc->vcore_state, so we can 817 + * drop the vcore lock if necessary. 818 + */ 819 + vc->n_woken = 0; 820 + vc->nap_count = 0; 821 + vc->entry_exit_count = 0; 822 + vc->vcore_state = VCORE_RUNNING; 823 + vc->in_guest = 0; 824 + vc->napping_threads = 0; 825 + 826 + /* 827 + * Updating any of the vpas requires calling kvmppc_pin_guest_page, 828 + * which can't be called with any spinlocks held. 829 + */ 830 + if (need_vpa_update) { 831 + spin_unlock(&vc->lock); 832 + list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 833 + kvmppc_update_vpas(vcpu); 834 + spin_lock(&vc->lock); 835 + } 828 836 829 837 /* 830 838 * Make sure we are running on thread 0, and that ··· 884 838 if (vcpu->arch.ceded) 885 839 vcpu->arch.ptid = ptid++; 886 840 887 - vc->n_woken = 0; 888 - vc->nap_count = 0; 889 - vc->entry_exit_count = 0; 890 - vc->vcore_state = VCORE_RUNNING; 891 841 vc->stolen_tb += mftb() - vc->preempt_tb; 892 - vc->in_guest = 0; 893 842 vc->pcpu = smp_processor_id(); 894 - vc->napping_threads = 0; 895 843 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 896 844 kvmppc_start_thread(vcpu); 897 - if (vcpu->arch.vpa.update_pending || 898 - vcpu->arch.slb_shadow.update_pending || 899 - vcpu->arch.dtl.update_pending) 900 - kvmppc_update_vpas(vcpu); 901 845 kvmppc_create_dtl_entry(vcpu, vc); 902 846 } 903 847 /* Grab any remaining hw threads so they can't go into the kernel */
+1 -3
virt/kvm/assigned-dev.c
··· 635 635 int r = 0, idx; 636 636 struct kvm_assigned_dev_kernel *match; 637 637 struct pci_dev *dev; 638 - u8 header_type; 639 638 640 639 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) 641 640 return -EINVAL; ··· 667 668 } 668 669 669 670 /* Don't allow bridges to be assigned */ 670 - pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); 671 - if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) { 671 + if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) { 672 672 r = -EPERM; 673 673 goto out_put; 674 674 }