Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Radim Krčmář:
"ARM:
- A number of issues in the vgic discovered using SMATCH
- A bit one-off calculation in out stage base address mask (32-bit
and 64-bit)
- Fixes to single-step debugging instructions that trap for other
reasons such as MMMIO aborts
- Printing unavailable hyp mode as error
- Potential spinlock deadlock in the vgic
- Avoid calling vgic vcpu free more than once
- Broken bit calculation for big endian systems

s390:
- SPDX tags
- Fence storage key accesses from problem state
- Make sure that irq_state.flags is not used in the future

x86:
- Intercept port 0x80 accesses to prevent host instability (CVE)
- Use userspace FPU context for guest FPU (mainly an optimization
that fixes a double use of kernel FPU)
- Do not leak one page per module load
- Flush APIC page address cache from MMU invalidation notifiers"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (28 commits)
KVM: x86: fix APIC page invalidation
KVM: s390: Fix skey emulation permission check
KVM: s390: mark irq_state.flags as non-usable
KVM: s390: Remove redundant license text
KVM: s390: add SPDX identifiers to the remaining files
KVM: VMX: fix page leak in hardware_setup()
KVM: VMX: remove I/O port 0x80 bypass on Intel hosts
x86,kvm: remove KVM emulator get_fpu / put_fpu
x86,kvm: move qemu/guest FPU switching out to vcpu_run
KVM: arm/arm64: Fix broken GICH_ELRSR big endian conversion
KVM: arm/arm64: kvm_arch_destroy_vm cleanups
KVM: arm/arm64: Fix spinlock acquisition in vgic_set_owner
kvm: arm: don't treat unavailable HYP mode as an error
KVM: arm/arm64: Avoid attempting to load timer vgic state without a vgic
kvm: arm64: handle single-step of hyp emulated mmio instructions
kvm: arm64: handle single-step during SError exceptions
kvm: arm64: handle single-step of userspace mmio instructions
kvm: arm64: handle single-stepping trapped instructions
KVM: arm/arm64: debug: Introduce helper for single-step
arm: KVM: Fix VTTBR_BADDR_MASK BUG_ON off-by-one
...

+239 -201
+12 -3
Documentation/virtual/kvm/api.txt
··· 2901 2901 2902 2902 struct kvm_s390_irq_state { 2903 2903 __u64 buf; 2904 - __u32 flags; 2904 + __u32 flags; /* will stay unused for compatibility reasons */ 2905 2905 __u32 len; 2906 - __u32 reserved[4]; 2906 + __u32 reserved[4]; /* will stay unused for compatibility reasons */ 2907 2907 }; 2908 2908 2909 2909 Userspace passes in the above struct and for each pending interrupt a 2910 2910 struct kvm_s390_irq is copied to the provided buffer. 2911 + 2912 + The structure contains a flags and a reserved field for future extensions. As 2913 + the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and 2914 + reserved, these fields can not be used in the future without breaking 2915 + compatibility. 2911 2916 2912 2917 If -ENOBUFS is returned the buffer provided was too small and userspace 2913 2918 may retry with a bigger buffer. ··· 2937 2932 2938 2933 struct kvm_s390_irq_state { 2939 2934 __u64 buf; 2935 + __u32 flags; /* will stay unused for compatibility reasons */ 2940 2936 __u32 len; 2941 - __u32 pad; 2937 + __u32 reserved[4]; /* will stay unused for compatibility reasons */ 2942 2938 }; 2939 + 2940 + The restrictions for flags and reserved apply as well. 2941 + (see KVM_S390_GET_IRQ_STATE) 2943 2942 2944 2943 The userspace memory referenced by buf contains a struct kvm_s390_irq 2945 2944 for each interrupt to be injected into the guest.
+1 -2
arch/arm/include/asm/kvm_arm.h
··· 161 161 #else 162 162 #define VTTBR_X (5 - KVM_T0SZ) 163 163 #endif 164 - #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 165 - #define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) 164 + #define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X) 166 165 #define VTTBR_VMID_SHIFT _AC(48, ULL) 167 166 #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 168 167
+5
arch/arm/include/asm/kvm_host.h
··· 285 285 static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} 286 286 static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} 287 287 static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} 288 + static inline bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, 289 + struct kvm_run *run) 290 + { 291 + return false; 292 + } 288 293 289 294 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 290 295 struct kvm_device_attr *attr);
+1 -2
arch/arm64/include/asm/kvm_arm.h
··· 170 170 #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS) 171 171 #define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA) 172 172 173 - #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) 174 - #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) 173 + #define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X) 175 174 #define VTTBR_VMID_SHIFT (UL(48)) 176 175 #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT) 177 176
+1
arch/arm64/include/asm/kvm_host.h
··· 370 370 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); 371 371 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); 372 372 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); 373 + bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run); 373 374 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 374 375 struct kvm_device_attr *attr); 375 376 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+21
arch/arm64/kvm/debug.c
··· 221 221 } 222 222 } 223 223 } 224 + 225 + 226 + /* 227 + * After successfully emulating an instruction, we might want to 228 + * return to user space with a KVM_EXIT_DEBUG. We can only do this 229 + * once the emulation is complete, though, so for userspace emulations 230 + * we have to wait until we have re-entered KVM before calling this 231 + * helper. 232 + * 233 + * Return true (and set exit_reason) to return to userspace or false 234 + * if no further action is required. 235 + */ 236 + bool kvm_arm_handle_step_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) 237 + { 238 + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 239 + run->exit_reason = KVM_EXIT_DEBUG; 240 + run->debug.arch.hsr = ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT; 241 + return true; 242 + } 243 + return false; 244 + }
+42 -15
arch/arm64/kvm/handle_exit.c
··· 28 28 #include <asm/kvm_emulate.h> 29 29 #include <asm/kvm_mmu.h> 30 30 #include <asm/kvm_psci.h> 31 + #include <asm/debug-monitors.h> 31 32 32 33 #define CREATE_TRACE_POINTS 33 34 #include "trace.h" ··· 188 187 } 189 188 190 189 /* 190 + * We may be single-stepping an emulated instruction. If the emulation 191 + * has been completed in the kernel, we can return to userspace with a 192 + * KVM_EXIT_DEBUG, otherwise userspace needs to complete its 193 + * emulation first. 194 + */ 195 + static int handle_trap_exceptions(struct kvm_vcpu *vcpu, struct kvm_run *run) 196 + { 197 + int handled; 198 + 199 + /* 200 + * See ARM ARM B1.14.1: "Hyp traps on instructions 201 + * that fail their condition code check" 202 + */ 203 + if (!kvm_condition_valid(vcpu)) { 204 + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 205 + handled = 1; 206 + } else { 207 + exit_handle_fn exit_handler; 208 + 209 + exit_handler = kvm_get_exit_handler(vcpu); 210 + handled = exit_handler(vcpu, run); 211 + } 212 + 213 + /* 214 + * kvm_arm_handle_step_debug() sets the exit_reason on the kvm_run 215 + * structure if we need to return to userspace. 216 + */ 217 + if (handled > 0 && kvm_arm_handle_step_debug(vcpu, run)) 218 + handled = 0; 219 + 220 + return handled; 221 + } 222 + 223 + /* 191 224 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on 192 225 * proper exit to userspace. 193 226 */ 194 227 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, 195 228 int exception_index) 196 229 { 197 - exit_handle_fn exit_handler; 198 - 199 230 if (ARM_SERROR_PENDING(exception_index)) { 200 231 u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu)); 201 232 ··· 253 220 return 1; 254 221 case ARM_EXCEPTION_EL1_SERROR: 255 222 kvm_inject_vabt(vcpu); 256 - return 1; 257 - case ARM_EXCEPTION_TRAP: 258 - /* 259 - * See ARM ARM B1.14.1: "Hyp traps on instructions 260 - * that fail their condition code check" 261 - */ 262 - if (!kvm_condition_valid(vcpu)) { 263 - kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 223 + /* We may still need to return for single-step */ 224 + if (!(*vcpu_cpsr(vcpu) & DBG_SPSR_SS) 225 + && kvm_arm_handle_step_debug(vcpu, run)) 226 + return 0; 227 + else 264 228 return 1; 265 - } 266 - 267 - exit_handler = kvm_get_exit_handler(vcpu); 268 - 269 - return exit_handler(vcpu, run); 229 + case ARM_EXCEPTION_TRAP: 230 + return handle_trap_exceptions(vcpu, run); 270 231 case ARM_EXCEPTION_HYP_GONE: 271 232 /* 272 233 * EL2 has been reset to the hyp-stub. This happens when a guest
+30 -7
arch/arm64/kvm/hyp/switch.c
··· 22 22 #include <asm/kvm_emulate.h> 23 23 #include <asm/kvm_hyp.h> 24 24 #include <asm/fpsimd.h> 25 + #include <asm/debug-monitors.h> 25 26 26 27 static bool __hyp_text __fpsimd_enabled_nvhe(void) 27 28 { ··· 270 269 return true; 271 270 } 272 271 273 - static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) 272 + /* Skip an instruction which has been emulated. Returns true if 273 + * execution can continue or false if we need to exit hyp mode because 274 + * single-step was in effect. 275 + */ 276 + static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu) 274 277 { 275 278 *vcpu_pc(vcpu) = read_sysreg_el2(elr); 276 279 ··· 287 282 } 288 283 289 284 write_sysreg_el2(*vcpu_pc(vcpu), elr); 285 + 286 + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 287 + vcpu->arch.fault.esr_el2 = 288 + (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22; 289 + return false; 290 + } else { 291 + return true; 292 + } 290 293 } 291 294 292 295 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) ··· 355 342 int ret = __vgic_v2_perform_cpuif_access(vcpu); 356 343 357 344 if (ret == 1) { 358 - __skip_instr(vcpu); 359 - goto again; 345 + if (__skip_instr(vcpu)) 346 + goto again; 347 + else 348 + exit_code = ARM_EXCEPTION_TRAP; 360 349 } 361 350 362 351 if (ret == -1) { 363 - /* Promote an illegal access to an SError */ 364 - __skip_instr(vcpu); 352 + /* Promote an illegal access to an 353 + * SError. If we would be returning 354 + * due to single-step clear the SS 355 + * bit so handle_exit knows what to 356 + * do after dealing with the error. 357 + */ 358 + if (!__skip_instr(vcpu)) 359 + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; 365 360 exit_code = ARM_EXCEPTION_EL1_SERROR; 366 361 } 367 362 ··· 384 363 int ret = __vgic_v3_perform_cpuif_access(vcpu); 385 364 386 365 if (ret == 1) { 387 - __skip_instr(vcpu); 388 - goto again; 366 + if (__skip_instr(vcpu)) 367 + goto again; 368 + else 369 + exit_code = ARM_EXCEPTION_TRAP; 389 370 } 390 371 391 372 /* 0 falls through to be handled out of EL2 */
+1 -4
arch/s390/kvm/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 1 2 # Makefile for kernel virtual machines on s390 2 3 # 3 4 # Copyright IBM Corp. 2008 4 - # 5 - # This program is free software; you can redistribute it and/or modify 6 - # it under the terms of the GNU General Public License (version 2 only) 7 - # as published by the Free Software Foundation. 8 5 9 6 KVM := ../../../virt/kvm 10 7 common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
+1 -4
arch/s390/kvm/diag.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * handling diagnose instructions 3 4 * 4 5 * Copyright IBM Corp. 2008, 2011 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 * Christian Borntraeger <borntraeger@de.ibm.com>
+1 -4
arch/s390/kvm/gaccess.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * access guest memory 3 4 * 4 5 * Copyright IBM Corp. 2008, 2014 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 */
+1 -4
arch/s390/kvm/guestdbg.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * kvm guest debug support 3 4 * 4 5 * Copyright IBM Corp. 2014 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 11 8 */
+1 -4
arch/s390/kvm/intercept.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * in-kernel handling for sie intercepts 3 4 * 4 5 * Copyright IBM Corp. 2008, 2014 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 * Christian Borntraeger <borntraeger@de.ibm.com>
+1 -4
arch/s390/kvm/interrupt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * handling kvm guest interrupts 3 4 * 4 5 * Copyright IBM Corp. 2008, 2015 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 */
+1 -4
arch/s390/kvm/irq.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * s390 irqchip routines 3 4 * 4 5 * Copyright IBM Corp. 2014 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 11 8 */
+5 -6
arch/s390/kvm/kvm-s390.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 - * hosting zSeries kernel virtual machines 3 + * hosting IBM Z kernel virtual machines (s390x) 3 4 * 4 - * Copyright IBM Corp. 2008, 2009 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 5 + * Copyright IBM Corp. 2008, 2017 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 * Christian Borntraeger <borntraeger@de.ibm.com> ··· 3805 3808 r = -EINVAL; 3806 3809 break; 3807 3810 } 3811 + /* do not use irq_state.flags, it will break old QEMUs */ 3808 3812 r = kvm_s390_set_irq_state(vcpu, 3809 3813 (void __user *) irq_state.buf, 3810 3814 irq_state.len); ··· 3821 3823 r = -EINVAL; 3822 3824 break; 3823 3825 } 3826 + /* do not use irq_state.flags, it will break old QEMUs */ 3824 3827 r = kvm_s390_get_irq_state(vcpu, 3825 3828 (__u8 __user *) irq_state.buf, 3826 3829 irq_state.len);
+1 -4
arch/s390/kvm/kvm-s390.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 1 2 /* 2 3 * definition for kvm on s390 3 4 * 4 5 * Copyright IBM Corp. 2008, 2009 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 * Christian Borntraeger <borntraeger@de.ibm.com>
+10 -6
arch/s390/kvm/priv.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * handling privileged instructions 3 4 * 4 5 * Copyright IBM Corp. 2008, 2013 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 * Christian Borntraeger <borntraeger@de.ibm.com> ··· 232 235 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); 233 236 return -EAGAIN; 234 237 } 235 - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 236 - return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 237 238 return 0; 238 239 } 239 240 ··· 241 246 unsigned char key; 242 247 int reg1, reg2; 243 248 int rc; 249 + 250 + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 251 + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 244 252 245 253 rc = try_handle_skey(vcpu); 246 254 if (rc) ··· 273 275 unsigned long addr; 274 276 int reg1, reg2; 275 277 int rc; 278 + 279 + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 280 + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 276 281 277 282 rc = try_handle_skey(vcpu); 278 283 if (rc) ··· 311 310 unsigned char key, oldkey; 312 311 int reg1, reg2; 313 312 int rc; 313 + 314 + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 315 + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 314 316 315 317 rc = try_handle_skey(vcpu); 316 318 if (rc)
+1 -4
arch/s390/kvm/sigp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * handling interprocessor communication 3 4 * 4 5 * Copyright IBM Corp. 2008, 2013 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 11 8 * Christian Borntraeger <borntraeger@de.ibm.com>
+1 -4
arch/s390/kvm/vsie.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * kvm nested virtualization support for s390x 3 4 * 4 5 * Copyright IBM Corp. 2016 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License (version 2 only) 8 - * as published by the Free Software Foundation. 9 6 * 10 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 11 8 */
-2
arch/x86/include/asm/kvm_emulate.h
··· 214 214 void (*halt)(struct x86_emulate_ctxt *ctxt); 215 215 void (*wbinvd)(struct x86_emulate_ctxt *ctxt); 216 216 int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt); 217 - void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */ 218 - void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */ 219 217 int (*intercept)(struct x86_emulate_ctxt *ctxt, 220 218 struct x86_instruction_info *info, 221 219 enum x86_intercept_stage stage);
+16
arch/x86/include/asm/kvm_host.h
··· 536 536 struct kvm_mmu_memory_cache mmu_page_cache; 537 537 struct kvm_mmu_memory_cache mmu_page_header_cache; 538 538 539 + /* 540 + * QEMU userspace and the guest each have their own FPU state. 541 + * In vcpu_run, we switch between the user and guest FPU contexts. 542 + * While running a VCPU, the VCPU thread will have the guest FPU 543 + * context. 544 + * 545 + * Note that while the PKRU state lives inside the fpu registers, 546 + * it is switched out separately at VMENTER and VMEXIT time. The 547 + * "guest_fpu" state here contains the guest FPU context, with the 548 + * host PRKU bits. 549 + */ 550 + struct fpu user_fpu; 539 551 struct fpu guest_fpu; 552 + 540 553 u64 xcr0; 541 554 u64 guest_supported_xcr0; 542 555 u32 guest_xstate_size; ··· 1447 1434 1448 1435 #define put_smstate(type, buf, offset, val) \ 1449 1436 *(type *)((buf) + (offset) - 0x7e00) = val 1437 + 1438 + void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 1439 + unsigned long start, unsigned long end); 1450 1440 1451 1441 #endif /* _ASM_X86_KVM_HOST_H */
-24
arch/x86/kvm/emulate.c
··· 1046 1046 1047 1047 static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) 1048 1048 { 1049 - ctxt->ops->get_fpu(ctxt); 1050 1049 switch (reg) { 1051 1050 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break; 1052 1051 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break; ··· 1067 1068 #endif 1068 1069 default: BUG(); 1069 1070 } 1070 - ctxt->ops->put_fpu(ctxt); 1071 1071 } 1072 1072 1073 1073 static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, 1074 1074 int reg) 1075 1075 { 1076 - ctxt->ops->get_fpu(ctxt); 1077 1076 switch (reg) { 1078 1077 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break; 1079 1078 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break; ··· 1093 1096 #endif 1094 1097 default: BUG(); 1095 1098 } 1096 - ctxt->ops->put_fpu(ctxt); 1097 1099 } 1098 1100 1099 1101 static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1100 1102 { 1101 - ctxt->ops->get_fpu(ctxt); 1102 1103 switch (reg) { 1103 1104 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break; 1104 1105 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break; ··· 1108 1113 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break; 1109 1114 default: BUG(); 1110 1115 } 1111 - ctxt->ops->put_fpu(ctxt); 1112 1116 } 1113 1117 1114 1118 static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg) 1115 1119 { 1116 - ctxt->ops->get_fpu(ctxt); 1117 1120 switch (reg) { 1118 1121 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break; 1119 1122 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break; ··· 1123 1130 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break; 1124 1131 default: BUG(); 1125 1132 } 1126 - ctxt->ops->put_fpu(ctxt); 1127 1133 } 1128 1134 1129 1135 static int em_fninit(struct x86_emulate_ctxt *ctxt) ··· 1130 1138 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1131 1139 return emulate_nm(ctxt); 1132 1140 1133 - ctxt->ops->get_fpu(ctxt); 1134 1141 asm volatile("fninit"); 1135 - ctxt->ops->put_fpu(ctxt); 1136 1142 return X86EMUL_CONTINUE; 1137 1143 } 1138 1144 ··· 1141 1151 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1142 1152 return emulate_nm(ctxt); 1143 1153 1144 - ctxt->ops->get_fpu(ctxt); 1145 1154 asm volatile("fnstcw %0": "+m"(fcw)); 1146 - ctxt->ops->put_fpu(ctxt); 1147 1155 1148 1156 ctxt->dst.val = fcw; 1149 1157 ··· 1155 1167 if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM)) 1156 1168 return emulate_nm(ctxt); 1157 1169 1158 - ctxt->ops->get_fpu(ctxt); 1159 1170 asm volatile("fnstsw %0": "+m"(fsw)); 1160 - ctxt->ops->put_fpu(ctxt); 1161 1171 1162 1172 ctxt->dst.val = fsw; 1163 1173 ··· 3987 4001 if (rc != X86EMUL_CONTINUE) 3988 4002 return rc; 3989 4003 3990 - ctxt->ops->get_fpu(ctxt); 3991 - 3992 4004 rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state)); 3993 - 3994 - ctxt->ops->put_fpu(ctxt); 3995 4005 3996 4006 if (rc != X86EMUL_CONTINUE) 3997 4007 return rc; ··· 4031 4049 if (rc != X86EMUL_CONTINUE) 4032 4050 return rc; 4033 4051 4034 - ctxt->ops->get_fpu(ctxt); 4035 - 4036 4052 if (size < __fxstate_size(16)) { 4037 4053 rc = fxregs_fixup(&fx_state, size); 4038 4054 if (rc != X86EMUL_CONTINUE) ··· 4046 4066 rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state)); 4047 4067 4048 4068 out: 4049 - ctxt->ops->put_fpu(ctxt); 4050 - 4051 4069 return rc; 4052 4070 } 4053 4071 ··· 5295 5317 { 5296 5318 int rc; 5297 5319 5298 - ctxt->ops->get_fpu(ctxt); 5299 5320 rc = asm_safe("fwait"); 5300 - ctxt->ops->put_fpu(ctxt); 5301 5321 5302 5322 if (unlikely(rc != X86EMUL_CONTINUE)) 5303 5323 return emulate_exception(ctxt, MF_VECTOR, 0, false);
-6
arch/x86/kvm/vmx.c
··· 6751 6751 goto out; 6752 6752 } 6753 6753 6754 - vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL); 6755 6754 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 6756 6755 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 6757 6756 6758 - /* 6759 - * Allow direct access to the PC debug port (it is often used for I/O 6760 - * delays, but the vmexits simply slow things down). 6761 - */ 6762 6757 memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE); 6763 - clear_bit(0x80, vmx_io_bitmap_a); 6764 6758 6765 6759 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); 6766 6760
+31 -32
arch/x86/kvm/x86.c
··· 2937 2937 srcu_read_unlock(&vcpu->kvm->srcu, idx); 2938 2938 pagefault_enable(); 2939 2939 kvm_x86_ops->vcpu_put(vcpu); 2940 - kvm_put_guest_fpu(vcpu); 2941 2940 vcpu->arch.last_host_tsc = rdtsc(); 2942 2941 } 2943 2942 ··· 5251 5252 emul_to_vcpu(ctxt)->arch.halt_request = 1; 5252 5253 } 5253 5254 5254 - static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) 5255 - { 5256 - preempt_disable(); 5257 - kvm_load_guest_fpu(emul_to_vcpu(ctxt)); 5258 - } 5259 - 5260 - static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt) 5261 - { 5262 - preempt_enable(); 5263 - } 5264 - 5265 5255 static int emulator_intercept(struct x86_emulate_ctxt *ctxt, 5266 5256 struct x86_instruction_info *info, 5267 5257 enum x86_intercept_stage stage) ··· 5328 5340 .halt = emulator_halt, 5329 5341 .wbinvd = emulator_wbinvd, 5330 5342 .fix_hypercall = emulator_fix_hypercall, 5331 - .get_fpu = emulator_get_fpu, 5332 - .put_fpu = emulator_put_fpu, 5333 5343 .intercept = emulator_intercept, 5334 5344 .get_cpuid = emulator_get_cpuid, 5335 5345 .set_nmi_mask = emulator_set_nmi_mask, ··· 6764 6778 kvm_x86_ops->tlb_flush(vcpu); 6765 6779 } 6766 6780 6781 + void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 6782 + unsigned long start, unsigned long end) 6783 + { 6784 + unsigned long apic_address; 6785 + 6786 + /* 6787 + * The physical address of apic access page is stored in the VMCS. 6788 + * Update it when it becomes invalid. 6789 + */ 6790 + apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); 6791 + if (start <= apic_address && apic_address < end) 6792 + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD); 6793 + } 6794 + 6767 6795 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) 6768 6796 { 6769 6797 struct page *page = NULL; ··· 6952 6952 preempt_disable(); 6953 6953 6954 6954 kvm_x86_ops->prepare_guest_switch(vcpu); 6955 - kvm_load_guest_fpu(vcpu); 6956 6955 6957 6956 /* 6958 6957 * Disable IRQs before setting IN_GUEST_MODE. Posted interrupt ··· 7296 7297 } 7297 7298 } 7298 7299 7300 + kvm_load_guest_fpu(vcpu); 7301 + 7299 7302 if (unlikely(vcpu->arch.complete_userspace_io)) { 7300 7303 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; 7301 7304 vcpu->arch.complete_userspace_io = NULL; 7302 7305 r = cui(vcpu); 7303 7306 if (r <= 0) 7304 - goto out; 7307 + goto out_fpu; 7305 7308 } else 7306 7309 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); 7307 7310 ··· 7312 7311 else 7313 7312 r = vcpu_run(vcpu); 7314 7313 7314 + out_fpu: 7315 + kvm_put_guest_fpu(vcpu); 7315 7316 out: 7316 7317 post_kvm_run_save(vcpu); 7317 7318 kvm_sigset_deactivate(vcpu); ··· 7707 7704 vcpu->arch.cr0 |= X86_CR0_ET; 7708 7705 } 7709 7706 7707 + /* Swap (qemu) user FPU context for the guest FPU context. */ 7710 7708 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) 7711 7709 { 7712 - if (vcpu->guest_fpu_loaded) 7713 - return; 7714 - 7715 - /* 7716 - * Restore all possible states in the guest, 7717 - * and assume host would use all available bits. 7718 - * Guest xcr0 would be loaded later. 7719 - */ 7720 - vcpu->guest_fpu_loaded = 1; 7721 - __kernel_fpu_begin(); 7710 + preempt_disable(); 7711 + copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); 7722 7712 /* PKRU is separately restored in kvm_x86_ops->run. */ 7723 7713 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, 7724 7714 ~XFEATURE_MASK_PKRU); 7715 + preempt_enable(); 7725 7716 trace_kvm_fpu(1); 7726 7717 } 7727 7718 7719 + /* When vcpu_run ends, restore user space FPU context. */ 7728 7720 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) 7729 7721 { 7730 - if (!vcpu->guest_fpu_loaded) 7731 - return; 7732 - 7733 - vcpu->guest_fpu_loaded = 0; 7722 + preempt_disable(); 7734 7723 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); 7735 - __kernel_fpu_end(); 7724 + copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); 7725 + preempt_enable(); 7736 7726 ++vcpu->stat.fpu_reload; 7737 7727 trace_kvm_fpu(0); 7738 7728 } ··· 7842 7846 * To avoid have the INIT path from kvm_apic_has_events() that be 7843 7847 * called with loaded FPU and does not let userspace fix the state. 7844 7848 */ 7845 - kvm_put_guest_fpu(vcpu); 7849 + if (init_event) 7850 + kvm_put_guest_fpu(vcpu); 7846 7851 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, 7847 7852 XFEATURE_MASK_BNDREGS); 7848 7853 if (mpx_state_buffer) ··· 7852 7855 XFEATURE_MASK_BNDCSR); 7853 7856 if (mpx_state_buffer) 7854 7857 memset(mpx_state_buffer, 0, sizeof(struct mpx_bndcsr)); 7858 + if (init_event) 7859 + kvm_load_guest_fpu(vcpu); 7855 7860 } 7856 7861 7857 7862 if (!init_event) {
-3
include/kvm/arm_arch_timer.h
··· 93 93 #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) 94 94 #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) 95 95 96 - void enable_el1_phys_timer_access(void); 97 - void disable_el1_phys_timer_access(void); 98 - 99 96 #endif
+1 -1
include/linux/kvm_host.h
··· 232 232 struct mutex mutex; 233 233 struct kvm_run *run; 234 234 235 - int guest_fpu_loaded, guest_xcr0_loaded; 235 + int guest_xcr0_loaded; 236 236 struct swait_queue_head wq; 237 237 struct pid __rcu *pid; 238 238 int sigset_active;
+2 -2
include/uapi/linux/kvm.h
··· 630 630 631 631 struct kvm_s390_irq_state { 632 632 __u64 buf; 633 - __u32 flags; 633 + __u32 flags; /* will stay unused for compatibility reasons */ 634 634 __u32 len; 635 - __u32 reserved[4]; 635 + __u32 reserved[4]; /* will stay unused for compatibility reasons */ 636 636 }; 637 637 638 638 /* for KVM_SET_GUEST_DEBUG */
+4 -7
virt/kvm/arm/arch_timer.c
··· 479 479 480 480 vtimer_restore_state(vcpu); 481 481 482 - if (has_vhe()) 483 - disable_el1_phys_timer_access(); 484 - 485 482 /* Set the background timer for the physical timer emulation. */ 486 483 phys_timer_emulate(vcpu); 487 484 } ··· 506 509 507 510 if (unlikely(!timer->enabled)) 508 511 return; 509 - 510 - if (has_vhe()) 511 - enable_el1_phys_timer_access(); 512 512 513 513 vtimer_save_state(vcpu); 514 514 ··· 835 841 no_vgic: 836 842 preempt_disable(); 837 843 timer->enabled = 1; 838 - kvm_timer_vcpu_load_vgic(vcpu); 844 + if (!irqchip_in_kernel(vcpu->kvm)) 845 + kvm_timer_vcpu_load_user(vcpu); 846 + else 847 + kvm_timer_vcpu_load_vgic(vcpu); 839 848 preempt_enable(); 840 849 841 850 return 0;
+5 -2
virt/kvm/arm/arm.c
··· 188 188 kvm->vcpus[i] = NULL; 189 189 } 190 190 } 191 + atomic_set(&kvm->online_vcpus, 0); 191 192 } 192 193 193 194 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ··· 297 296 { 298 297 kvm_mmu_free_memory_caches(vcpu); 299 298 kvm_timer_vcpu_terminate(vcpu); 300 - kvm_vgic_vcpu_destroy(vcpu); 301 299 kvm_pmu_vcpu_destroy(vcpu); 302 300 kvm_vcpu_uninit(vcpu); 303 301 kmem_cache_free(kvm_vcpu_cache, vcpu); ··· 627 627 ret = kvm_handle_mmio_return(vcpu, vcpu->run); 628 628 if (ret) 629 629 return ret; 630 + if (kvm_arm_handle_step_debug(vcpu, vcpu->run)) 631 + return 0; 632 + 630 633 } 631 634 632 635 if (run->immediate_exit) ··· 1505 1502 bool in_hyp_mode; 1506 1503 1507 1504 if (!is_hyp_mode_available()) { 1508 - kvm_err("HYP mode not available\n"); 1505 + kvm_info("HYP mode not available\n"); 1509 1506 return -ENODEV; 1510 1507 } 1511 1508
+20 -28
virt/kvm/arm/hyp/timer-sr.c
··· 27 27 write_sysreg(cntvoff, cntvoff_el2); 28 28 } 29 29 30 - void __hyp_text enable_el1_phys_timer_access(void) 31 - { 32 - u64 val; 33 - 34 - /* Allow physical timer/counter access for the host */ 35 - val = read_sysreg(cnthctl_el2); 36 - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; 37 - write_sysreg(val, cnthctl_el2); 38 - } 39 - 40 - void __hyp_text disable_el1_phys_timer_access(void) 41 - { 42 - u64 val; 43 - 44 - /* 45 - * Disallow physical timer access for the guest 46 - * Physical counter access is allowed 47 - */ 48 - val = read_sysreg(cnthctl_el2); 49 - val &= ~CNTHCTL_EL1PCEN; 50 - val |= CNTHCTL_EL1PCTEN; 51 - write_sysreg(val, cnthctl_el2); 52 - } 53 - 54 30 void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) 55 31 { 56 32 /* 57 33 * We don't need to do this for VHE since the host kernel runs in EL2 58 34 * with HCR_EL2.TGE ==1, which makes those bits have no impact. 59 35 */ 60 - if (!has_vhe()) 61 - enable_el1_phys_timer_access(); 36 + if (!has_vhe()) { 37 + u64 val; 38 + 39 + /* Allow physical timer/counter access for the host */ 40 + val = read_sysreg(cnthctl_el2); 41 + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; 42 + write_sysreg(val, cnthctl_el2); 43 + } 62 44 } 63 45 64 46 void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) 65 47 { 66 - if (!has_vhe()) 67 - disable_el1_phys_timer_access(); 48 + if (!has_vhe()) { 49 + u64 val; 50 + 51 + /* 52 + * Disallow physical timer access for the guest 53 + * Physical counter access is allowed 54 + */ 55 + val = read_sysreg(cnthctl_el2); 56 + val &= ~CNTHCTL_EL1PCEN; 57 + val |= CNTHCTL_EL1PCTEN; 58 + write_sysreg(val, cnthctl_el2); 59 + } 68 60 }
-4
virt/kvm/arm/hyp/vgic-v2-sr.c
··· 34 34 else 35 35 elrsr1 = 0; 36 36 37 - #ifdef CONFIG_CPU_BIG_ENDIAN 38 - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; 39 - #else 40 37 cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; 41 - #endif 42 38 } 43 39 44 40 static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
+1 -2
virt/kvm/arm/vgic/vgic-irqfd.c
··· 112 112 u32 nr = dist->nr_spis; 113 113 int i, ret; 114 114 115 - entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry), 116 - GFP_KERNEL); 115 + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL); 117 116 if (!entries) 118 117 return -ENOMEM; 119 118
+3 -1
virt/kvm/arm/vgic/vgic-its.c
··· 421 421 u32 *intids; 422 422 int nr_irqs, i; 423 423 unsigned long flags; 424 + u8 pendmask; 424 425 425 426 nr_irqs = vgic_copy_lpi_list(vcpu, &intids); 426 427 if (nr_irqs < 0) ··· 429 428 430 429 for (i = 0; i < nr_irqs; i++) { 431 430 int byte_offset, bit_nr; 432 - u8 pendmask; 433 431 434 432 byte_offset = intids[i] / BITS_PER_BYTE; 435 433 bit_nr = intids[i] % BITS_PER_BYTE; ··· 821 821 return E_ITS_MAPC_COLLECTION_OOR; 822 822 823 823 collection = kzalloc(sizeof(*collection), GFP_KERNEL); 824 + if (!collection) 825 + return -ENOMEM; 824 826 825 827 collection->collection_id = coll_id; 826 828 collection->target_addr = COLLECTION_NOT_MAPPED;
+1 -1
virt/kvm/arm/vgic/vgic-v3.c
··· 327 327 int last_byte_offset = -1; 328 328 struct vgic_irq *irq; 329 329 int ret; 330 + u8 val; 330 331 331 332 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 332 333 int byte_offset, bit_nr; 333 334 struct kvm_vcpu *vcpu; 334 335 gpa_t pendbase, ptr; 335 336 bool stored; 336 - u8 val; 337 337 338 338 vcpu = irq->target_vcpu; 339 339 if (!vcpu)
+4 -2
virt/kvm/arm/vgic/vgic-v4.c
··· 337 337 goto out; 338 338 339 339 WARN_ON(!(irq->hw && irq->host_irq == virq)); 340 - irq->hw = false; 341 - ret = its_unmap_vlpi(virq); 340 + if (irq->hw) { 341 + irq->hw = false; 342 + ret = its_unmap_vlpi(virq); 343 + } 342 344 343 345 out: 344 346 mutex_unlock(&its->its_lock);
+5 -3
virt/kvm/arm/vgic/vgic.c
··· 492 492 int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) 493 493 { 494 494 struct vgic_irq *irq; 495 + unsigned long flags; 495 496 int ret = 0; 496 497 497 498 if (!vgic_initialized(vcpu->kvm)) ··· 503 502 return -EINVAL; 504 503 505 504 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); 506 - spin_lock(&irq->irq_lock); 505 + spin_lock_irqsave(&irq->irq_lock, flags); 507 506 if (irq->owner && irq->owner != owner) 508 507 ret = -EEXIST; 509 508 else 510 509 irq->owner = owner; 511 - spin_unlock(&irq->irq_lock); 510 + spin_unlock_irqrestore(&irq->irq_lock, flags); 512 511 513 512 return ret; 514 513 } ··· 824 823 825 824 bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) 826 825 { 827 - struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 826 + struct vgic_irq *irq; 828 827 bool map_is_active; 829 828 unsigned long flags; 830 829 831 830 if (!vgic_initialized(vcpu->kvm)) 832 831 return false; 833 832 833 + irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); 834 834 spin_lock_irqsave(&irq->irq_lock, flags); 835 835 map_is_active = irq->hw && irq->active; 836 836 spin_unlock_irqrestore(&irq->irq_lock, flags);
+8
virt/kvm/kvm_main.c
··· 135 135 static unsigned long long kvm_createvm_count; 136 136 static unsigned long long kvm_active_vms; 137 137 138 + __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 139 + unsigned long start, unsigned long end) 140 + { 141 + } 142 + 138 143 bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 139 144 { 140 145 if (pfn_valid(pfn)) ··· 365 360 kvm_flush_remote_tlbs(kvm); 366 361 367 362 spin_unlock(&kvm->mmu_lock); 363 + 364 + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 365 + 368 366 srcu_read_unlock(&kvm->srcu, idx); 369 367 } 370 368