Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch kvm-arm64/mmio-rcu into kvmarm-master/next

* kvm-arm64/mmio-rcu:
: .
: Speed up MMIO registration by avoiding unnecessary RCU synchronisation,
: courtesy of Keir Fraser (20250909100007.3136249-1-keirf@google.com).
: .
KVM: Avoid synchronize_srcu() in kvm_io_bus_register_dev()
KVM: Implement barriers before accessing kvm->buses[] on SRCU read paths
KVM: arm64: vgic: Explicitly implement vgic_dist::ready ordering
KVM: arm64: vgic-init: Remove vgic_ready() macro

Signed-off-by: Marc Zyngier <maz@kernel.org>

+53 -23
+3 -11
arch/arm64/kvm/vgic/vgic-init.c
··· 554 554 * Also map the virtual CPU interface into the VM. 555 555 * v2 calls vgic_init() if not already done. 556 556 * v3 and derivatives return an error if the VGIC is not initialized. 557 - * vgic_ready() returns true if this function has succeeded. 558 557 */ 559 558 int kvm_vgic_map_resources(struct kvm *kvm) 560 559 { ··· 562 563 gpa_t dist_base; 563 564 int ret = 0; 564 565 565 - if (likely(vgic_ready(kvm))) 566 + if (likely(smp_load_acquire(&dist->ready))) 566 567 return 0; 567 568 568 569 mutex_lock(&kvm->slots_lock); 569 570 mutex_lock(&kvm->arch.config_lock); 570 - if (vgic_ready(kvm)) 571 + if (dist->ready) 571 572 goto out; 572 573 573 574 if (!irqchip_in_kernel(kvm)) ··· 593 594 goto out_slots; 594 595 } 595 596 596 - /* 597 - * kvm_io_bus_register_dev() guarantees all readers see the new MMIO 598 - * registration before returning through synchronize_srcu(), which also 599 - * implies a full memory barrier. As such, marking the distributor as 600 - * 'ready' here is guaranteed to be ordered after all vCPUs having seen 601 - * a completely configured distributor. 602 - */ 603 - dist->ready = true; 597 + smp_store_release(&dist->ready, true); 604 598 goto out_slots; 605 599 out: 606 600 mutex_unlock(&kvm->arch.config_lock);
+7
arch/x86/kvm/vmx/vmx.c
··· 5785 5785 if (kvm_test_request(KVM_REQ_EVENT, vcpu)) 5786 5786 return 1; 5787 5787 5788 + /* 5789 + * Ensure that any updates to kvm->buses[] observed by the 5790 + * previous instruction (emulated or otherwise) are also 5791 + * visible to the instruction KVM is about to emulate. 5792 + */ 5793 + smp_rmb(); 5794 + 5788 5795 if (!kvm_emulate_instruction(vcpu, 0)) 5789 5796 return 0; 5790 5797
-1
include/kvm/arm_vgic.h
··· 406 406 407 407 #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 408 408 #define vgic_initialized(k) ((k)->arch.vgic.initialized) 409 - #define vgic_ready(k) ((k)->arch.vgic.ready) 410 409 #define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \ 411 410 ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) 412 411
+8 -3
include/linux/kvm_host.h
··· 206 206 struct kvm_io_bus { 207 207 int dev_count; 208 208 int ioeventfd_count; 209 + struct rcu_head rcu; 209 210 struct kvm_io_range range[]; 210 211 }; 211 212 ··· 967 966 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); 968 967 } 969 968 969 + /* 970 + * Get a bus reference under the update-side lock. No long-term SRCU reader 971 + * references are permitted, to avoid stale reads vs concurrent IO 972 + * registrations. 973 + */ 970 974 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) 971 975 { 972 - return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, 973 - lockdep_is_held(&kvm->slots_lock) || 974 - !refcount_read(&kvm->users_count)); 976 + return rcu_dereference_protected(kvm->buses[idx], 977 + lockdep_is_held(&kvm->slots_lock)); 975 978 } 976 979 977 980 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
+35 -8
virt/kvm/kvm_main.c
··· 1103 1103 { 1104 1104 } 1105 1105 1106 + /* Called only on cleanup and destruction paths when there are no users. */ 1107 + static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm, 1108 + enum kvm_bus idx) 1109 + { 1110 + return rcu_dereference_protected(kvm->buses[idx], 1111 + !refcount_read(&kvm->users_count)); 1112 + } 1113 + 1106 1114 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) 1107 1115 { 1108 1116 struct kvm *kvm = kvm_arch_alloc_vm(); ··· 1236 1228 out_err_no_arch_destroy_vm: 1237 1229 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); 1238 1230 for (i = 0; i < KVM_NR_BUSES; i++) 1239 - kfree(kvm_get_bus(kvm, i)); 1231 + kfree(kvm_get_bus_for_destruction(kvm, i)); 1240 1232 kvm_free_irq_routing(kvm); 1241 1233 out_err_no_irq_routing: 1242 1234 cleanup_srcu_struct(&kvm->irq_srcu); ··· 1284 1276 1285 1277 kvm_free_irq_routing(kvm); 1286 1278 for (i = 0; i < KVM_NR_BUSES; i++) { 1287 - struct kvm_io_bus *bus = kvm_get_bus(kvm, i); 1279 + struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i); 1288 1280 1289 1281 if (bus) 1290 1282 kvm_io_bus_destroy(bus); ··· 1320 1312 kvm_free_memslots(kvm, &kvm->__memslots[i][1]); 1321 1313 } 1322 1314 cleanup_srcu_struct(&kvm->irq_srcu); 1315 + srcu_barrier(&kvm->srcu); 1323 1316 cleanup_srcu_struct(&kvm->srcu); 1324 1317 #ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 1325 1318 xa_destroy(&kvm->mem_attr_array); ··· 5852 5843 return -EOPNOTSUPP; 5853 5844 } 5854 5845 5846 + static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx) 5847 + { 5848 + /* 5849 + * Ensure that any updates to kvm_buses[] observed by the previous vCPU 5850 + * machine instruction are also visible to the vCPU machine instruction 5851 + * that triggered this call. 5852 + */ 5853 + smp_mb__after_srcu_read_lock(); 5854 + 5855 + return srcu_dereference(kvm->buses[idx], &kvm->srcu); 5856 + } 5857 + 5855 5858 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 5856 5859 int len, const void *val) 5857 5860 { ··· 5876 5855 .len = len, 5877 5856 }; 5878 5857 5879 - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5858 + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); 5880 5859 if (!bus) 5881 5860 return -ENOMEM; 5882 5861 r = __kvm_io_bus_write(vcpu, bus, &range, val); ··· 5895 5874 .len = len, 5896 5875 }; 5897 5876 5898 - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5877 + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); 5899 5878 if (!bus) 5900 5879 return -ENOMEM; 5901 5880 ··· 5945 5924 .len = len, 5946 5925 }; 5947 5926 5948 - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); 5927 + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); 5949 5928 if (!bus) 5950 5929 return -ENOMEM; 5951 5930 r = __kvm_io_bus_read(vcpu, bus, &range, val); 5952 5931 return r < 0 ? r : 0; 5953 5932 } 5954 5933 EXPORT_SYMBOL_GPL(kvm_io_bus_read); 5934 + 5935 + static void __free_bus(struct rcu_head *rcu) 5936 + { 5937 + struct kvm_io_bus *bus = container_of(rcu, struct kvm_io_bus, rcu); 5938 + 5939 + kfree(bus); 5940 + } 5955 5941 5956 5942 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 5957 5943 int len, struct kvm_io_device *dev) ··· 5998 5970 memcpy(new_bus->range + i + 1, bus->range + i, 5999 5971 (bus->dev_count - i) * sizeof(struct kvm_io_range)); 6000 5972 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 6001 - synchronize_srcu_expedited(&kvm->srcu); 6002 - kfree(bus); 5973 + call_srcu(&kvm->srcu, &bus->rcu, __free_bus); 6003 5974 6004 5975 return 0; 6005 5976 } ··· 6060 6033 6061 6034 srcu_idx = srcu_read_lock(&kvm->srcu); 6062 6035 6063 - bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); 6036 + bus = kvm_get_bus_srcu(kvm, bus_idx); 6064 6037 if (!bus) 6065 6038 goto out_unlock; 6066 6039