Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 703ccb63ae9f7444d6ff876d024e17f628103c69 2618 lines 81 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2#ifndef __KVM_HOST_H 3#define __KVM_HOST_H 4 5#include <linux/entry-virt.h> 6#include <linux/types.h> 7#include <linux/hardirq.h> 8#include <linux/list.h> 9#include <linux/mutex.h> 10#include <linux/spinlock.h> 11#include <linux/signal.h> 12#include <linux/sched.h> 13#include <linux/sched/stat.h> 14#include <linux/bug.h> 15#include <linux/minmax.h> 16#include <linux/mm.h> 17#include <linux/mmu_notifier.h> 18#include <linux/preempt.h> 19#include <linux/msi.h> 20#include <linux/slab.h> 21#include <linux/vmalloc.h> 22#include <linux/rcupdate.h> 23#include <linux/ratelimit.h> 24#include <linux/err.h> 25#include <linux/irqflags.h> 26#include <linux/context_tracking.h> 27#include <linux/irqbypass.h> 28#include <linux/rcuwait.h> 29#include <linux/refcount.h> 30#include <linux/nospec.h> 31#include <linux/notifier.h> 32#include <linux/ftrace.h> 33#include <linux/hashtable.h> 34#include <linux/instrumentation.h> 35#include <linux/interval_tree.h> 36#include <linux/rbtree.h> 37#include <linux/xarray.h> 38#include <asm/signal.h> 39 40#include <linux/kvm.h> 41#include <linux/kvm_para.h> 42 43#include <linux/kvm_types.h> 44 45#include <asm/kvm_host.h> 46#include <linux/kvm_dirty_ring.h> 47 48#ifndef KVM_MAX_VCPU_IDS 49#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS 50#endif 51 52/* 53 * The bit 16 ~ bit 31 of kvm_userspace_memory_region::flags are internally 54 * used in kvm, other bits are visible for userspace which are defined in 55 * include/uapi/linux/kvm.h. 56 */ 57#define KVM_MEMSLOT_INVALID (1UL << 16) 58#define KVM_MEMSLOT_GMEM_ONLY (1UL << 17) 59 60/* 61 * Bit 63 of the memslot generation number is an "update in-progress flag", 62 * e.g. is temporarily set for the duration of kvm_swap_active_memslots(). 63 * This flag effectively creates a unique generation number that is used to 64 * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, 65 * i.e. may (or may not) have come from the previous memslots generation. 66 * 67 * This is necessary because the actual memslots update is not atomic with 68 * respect to the generation number update. Updating the generation number 69 * first would allow a vCPU to cache a spte from the old memslots using the 70 * new generation number, and updating the generation number after switching 71 * to the new memslots would allow cache hits using the old generation number 72 * to reference the defunct memslots. 73 * 74 * This mechanism is used to prevent getting hits in KVM's caches while a 75 * memslot update is in-progress, and to prevent cache hits *after* updating 76 * the actual generation number against accesses that were inserted into the 77 * cache *before* the memslots were updated. 78 */ 79#define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) 80 81/* Two fragments for cross MMIO pages. */ 82#define KVM_MAX_MMIO_FRAGMENTS 2 83 84#ifndef KVM_MAX_NR_ADDRESS_SPACES 85#define KVM_MAX_NR_ADDRESS_SPACES 1 86#endif 87 88/* 89 * For the normal pfn, the highest 12 bits should be zero, 90 * so we can mask bit 62 ~ bit 52 to indicate the error pfn, 91 * mask bit 63 to indicate the noslot pfn. 92 */ 93#define KVM_PFN_ERR_MASK (0x7ffULL << 52) 94#define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) 95#define KVM_PFN_NOSLOT (0x1ULL << 63) 96 97#define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) 98#define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) 99#define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) 100#define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3) 101#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4) 102 103/* 104 * error pfns indicate that the gfn is in slot but faild to 105 * translate it to pfn on host. 106 */ 107static inline bool is_error_pfn(kvm_pfn_t pfn) 108{ 109 return !!(pfn & KVM_PFN_ERR_MASK); 110} 111 112/* 113 * KVM_PFN_ERR_SIGPENDING indicates that fetching the PFN was interrupted 114 * by a pending signal. Note, the signal may or may not be fatal. 115 */ 116static inline bool is_sigpending_pfn(kvm_pfn_t pfn) 117{ 118 return pfn == KVM_PFN_ERR_SIGPENDING; 119} 120 121/* 122 * error_noslot pfns indicate that the gfn can not be 123 * translated to pfn - it is not in slot or failed to 124 * translate it to pfn. 125 */ 126static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) 127{ 128 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); 129} 130 131/* noslot pfn indicates that the gfn is not in slot. */ 132static inline bool is_noslot_pfn(kvm_pfn_t pfn) 133{ 134 return pfn == KVM_PFN_NOSLOT; 135} 136 137/* 138 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) 139 * provide own defines and kvm_is_error_hva 140 */ 141#ifndef KVM_HVA_ERR_BAD 142 143#define KVM_HVA_ERR_BAD (PAGE_OFFSET) 144#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) 145 146static inline bool kvm_is_error_hva(unsigned long addr) 147{ 148 return addr >= PAGE_OFFSET; 149} 150 151#endif 152 153static inline bool kvm_is_error_gpa(gpa_t gpa) 154{ 155 return gpa == INVALID_GPA; 156} 157 158#define KVM_REQUEST_MASK GENMASK(7,0) 159#define KVM_REQUEST_NO_WAKEUP BIT(8) 160#define KVM_REQUEST_WAIT BIT(9) 161#define KVM_REQUEST_NO_ACTION BIT(10) 162/* 163 * Architecture-independent vcpu->requests bit members 164 * Bits 3-7 are reserved for more arch-independent bits. 165 */ 166#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 167#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 168#define KVM_REQ_UNBLOCK 2 169#define KVM_REQ_DIRTY_RING_SOFT_FULL 3 170#define KVM_REQUEST_ARCH_BASE 8 171 172/* 173 * KVM_REQ_OUTSIDE_GUEST_MODE exists is purely as way to force the vCPU to 174 * OUTSIDE_GUEST_MODE. KVM_REQ_OUTSIDE_GUEST_MODE differs from a vCPU "kick" 175 * in that it ensures the vCPU has reached OUTSIDE_GUEST_MODE before continuing 176 * on. A kick only guarantees that the vCPU is on its way out, e.g. a previous 177 * kick may have set vcpu->mode to EXITING_GUEST_MODE, and so there's no 178 * guarantee the vCPU received an IPI and has actually exited guest mode. 179 */ 180#define KVM_REQ_OUTSIDE_GUEST_MODE (KVM_REQUEST_NO_ACTION | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) 181 182#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ 183 BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ 184 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ 185}) 186#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) 187 188bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, 189 unsigned long *vcpu_bitmap); 190bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); 191 192#define KVM_USERSPACE_IRQ_SOURCE_ID 0 193#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 194#define KVM_PIT_IRQ_SOURCE_ID 2 195 196extern struct mutex kvm_lock; 197extern struct list_head vm_list; 198 199struct kvm_io_range { 200 gpa_t addr; 201 int len; 202 struct kvm_io_device *dev; 203}; 204 205#define NR_IOBUS_DEVS 1000 206 207struct kvm_io_bus { 208 int dev_count; 209 int ioeventfd_count; 210 struct rcu_head rcu; 211 struct kvm_io_range range[]; 212}; 213 214enum kvm_bus { 215 KVM_MMIO_BUS, 216 KVM_PIO_BUS, 217 KVM_VIRTIO_CCW_NOTIFY_BUS, 218 KVM_FAST_MMIO_BUS, 219 KVM_IOCSR_BUS, 220 KVM_NR_BUSES 221}; 222 223int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 224 int len, const void *val); 225int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, 226 gpa_t addr, int len, const void *val, long cookie); 227int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, 228 int len, void *val); 229int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, 230 int len, struct kvm_io_device *dev); 231int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, 232 struct kvm_io_device *dev); 233struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, 234 gpa_t addr); 235 236#ifdef CONFIG_KVM_ASYNC_PF 237struct kvm_async_pf { 238 struct work_struct work; 239 struct list_head link; 240 struct list_head queue; 241 struct kvm_vcpu *vcpu; 242 gpa_t cr2_or_gpa; 243 unsigned long addr; 244 struct kvm_arch_async_pf arch; 245 bool wakeup_all; 246 bool notpresent_injected; 247}; 248 249void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); 250void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); 251bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, 252 unsigned long hva, struct kvm_arch_async_pf *arch); 253int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 254#endif 255 256union kvm_mmu_notifier_arg { 257 unsigned long attributes; 258}; 259 260enum kvm_gfn_range_filter { 261 KVM_FILTER_SHARED = BIT(0), 262 KVM_FILTER_PRIVATE = BIT(1), 263}; 264 265struct kvm_gfn_range { 266 struct kvm_memory_slot *slot; 267 gfn_t start; 268 gfn_t end; 269 union kvm_mmu_notifier_arg arg; 270 enum kvm_gfn_range_filter attr_filter; 271 bool may_block; 272 bool lockless; 273}; 274bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 275bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 276bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); 277 278enum { 279 OUTSIDE_GUEST_MODE, 280 IN_GUEST_MODE, 281 EXITING_GUEST_MODE, 282 READING_SHADOW_PAGE_TABLES, 283}; 284 285struct kvm_host_map { 286 /* 287 * Only valid if the 'pfn' is managed by the host kernel (i.e. There is 288 * a 'struct page' for it. When using mem= kernel parameter some memory 289 * can be used as guest memory but they are not managed by host 290 * kernel). 291 */ 292 struct page *pinned_page; 293 struct page *page; 294 void *hva; 295 kvm_pfn_t pfn; 296 kvm_pfn_t gfn; 297 bool writable; 298}; 299 300/* 301 * Used to check if the mapping is valid or not. Never use 'kvm_host_map' 302 * directly to check for that. 303 */ 304static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) 305{ 306 return !!map->hva; 307} 308 309static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) 310{ 311 return single_task_running() && !need_resched() && ktime_before(cur, stop); 312} 313 314/* 315 * Sometimes a large or cross-page mmio needs to be broken up into separate 316 * exits for userspace servicing. 317 */ 318struct kvm_mmio_fragment { 319 gpa_t gpa; 320 void *data; 321 unsigned len; 322}; 323 324struct kvm_vcpu { 325 struct kvm *kvm; 326#ifdef CONFIG_PREEMPT_NOTIFIERS 327 struct preempt_notifier preempt_notifier; 328#endif 329 int cpu; 330 int vcpu_id; /* id given by userspace at creation */ 331 int vcpu_idx; /* index into kvm->vcpu_array */ 332 int ____srcu_idx; /* Don't use this directly. You've been warned. */ 333#ifdef CONFIG_PROVE_RCU 334 int srcu_depth; 335#endif 336 int mode; 337 u64 requests; 338 unsigned long guest_debug; 339 340 struct mutex mutex; 341 struct kvm_run *run; 342 343#ifndef __KVM_HAVE_ARCH_WQP 344 struct rcuwait wait; 345#endif 346 struct pid *pid; 347 rwlock_t pid_lock; 348 int sigset_active; 349 sigset_t sigset; 350 unsigned int halt_poll_ns; 351 bool valid_wakeup; 352 353#ifdef CONFIG_HAS_IOMEM 354 int mmio_needed; 355 int mmio_read_completed; 356 int mmio_is_write; 357 int mmio_cur_fragment; 358 int mmio_nr_fragments; 359 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; 360#endif 361 362#ifdef CONFIG_KVM_ASYNC_PF 363 struct { 364 u32 queued; 365 struct list_head queue; 366 struct list_head done; 367 spinlock_t lock; 368 } async_pf; 369#endif 370 371#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 372 /* 373 * Cpu relax intercept or pause loop exit optimization 374 * in_spin_loop: set when a vcpu does a pause loop exit 375 * or cpu relax intercepted. 376 * dy_eligible: indicates whether vcpu is eligible for directed yield. 377 */ 378 struct { 379 bool in_spin_loop; 380 bool dy_eligible; 381 } spin_loop; 382#endif 383 bool wants_to_run; 384 bool preempted; 385 bool ready; 386 bool scheduled_out; 387 struct kvm_vcpu_arch arch; 388 struct kvm_vcpu_stat stat; 389 char stats_id[KVM_STATS_NAME_SIZE]; 390 struct kvm_dirty_ring dirty_ring; 391 392 /* 393 * The most recently used memslot by this vCPU and the slots generation 394 * for which it is valid. 395 * No wraparound protection is needed since generations won't overflow in 396 * thousands of years, even assuming 1M memslot operations per second. 397 */ 398 struct kvm_memory_slot *last_used_slot; 399 u64 last_used_slot_gen; 400}; 401 402/* 403 * Start accounting time towards a guest. 404 * Must be called before entering guest context. 405 */ 406static __always_inline void guest_timing_enter_irqoff(void) 407{ 408 /* 409 * This is running in ioctl context so its safe to assume that it's the 410 * stime pending cputime to flush. 411 */ 412 instrumentation_begin(); 413 vtime_account_guest_enter(); 414 instrumentation_end(); 415} 416 417/* 418 * Enter guest context and enter an RCU extended quiescent state. 419 * 420 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is 421 * unsafe to use any code which may directly or indirectly use RCU, tracing 422 * (including IRQ flag tracing), or lockdep. All code in this period must be 423 * non-instrumentable. 424 */ 425static __always_inline void guest_context_enter_irqoff(void) 426{ 427 /* 428 * KVM does not hold any references to rcu protected data when it 429 * switches CPU into a guest mode. In fact switching to a guest mode 430 * is very similar to exiting to userspace from rcu point of view. In 431 * addition CPU may stay in a guest mode for quite a long time (up to 432 * one time slice). Lets treat guest mode as quiescent state, just like 433 * we do with user-mode execution. 434 */ 435 if (!context_tracking_guest_enter()) { 436 instrumentation_begin(); 437 rcu_virt_note_context_switch(); 438 instrumentation_end(); 439 } 440} 441 442/* 443 * Deprecated. Architectures should move to guest_timing_enter_irqoff() and 444 * guest_state_enter_irqoff(). 445 */ 446static __always_inline void guest_enter_irqoff(void) 447{ 448 guest_timing_enter_irqoff(); 449 guest_context_enter_irqoff(); 450} 451 452/** 453 * guest_state_enter_irqoff - Fixup state when entering a guest 454 * 455 * Entry to a guest will enable interrupts, but the kernel state is interrupts 456 * disabled when this is invoked. Also tell RCU about it. 457 * 458 * 1) Trace interrupts on state 459 * 2) Invoke context tracking if enabled to adjust RCU state 460 * 3) Tell lockdep that interrupts are enabled 461 * 462 * Invoked from architecture specific code before entering a guest. 463 * Must be called with interrupts disabled and the caller must be 464 * non-instrumentable. 465 * The caller has to invoke guest_timing_enter_irqoff() before this. 466 * 467 * Note: this is analogous to exit_to_user_mode(). 468 */ 469static __always_inline void guest_state_enter_irqoff(void) 470{ 471 instrumentation_begin(); 472 trace_hardirqs_on_prepare(); 473 lockdep_hardirqs_on_prepare(); 474 instrumentation_end(); 475 476 guest_context_enter_irqoff(); 477 lockdep_hardirqs_on(CALLER_ADDR0); 478} 479 480/* 481 * Exit guest context and exit an RCU extended quiescent state. 482 * 483 * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is 484 * unsafe to use any code which may directly or indirectly use RCU, tracing 485 * (including IRQ flag tracing), or lockdep. All code in this period must be 486 * non-instrumentable. 487 */ 488static __always_inline void guest_context_exit_irqoff(void) 489{ 490 /* 491 * Guest mode is treated as a quiescent state, see 492 * guest_context_enter_irqoff() for more details. 493 */ 494 if (!context_tracking_guest_exit()) { 495 instrumentation_begin(); 496 rcu_virt_note_context_switch(); 497 instrumentation_end(); 498 } 499} 500 501/* 502 * Stop accounting time towards a guest. 503 * Must be called after exiting guest context. 504 */ 505static __always_inline void guest_timing_exit_irqoff(void) 506{ 507 instrumentation_begin(); 508 /* Flush the guest cputime we spent on the guest */ 509 vtime_account_guest_exit(); 510 instrumentation_end(); 511} 512 513/* 514 * Deprecated. Architectures should move to guest_state_exit_irqoff() and 515 * guest_timing_exit_irqoff(). 516 */ 517static __always_inline void guest_exit_irqoff(void) 518{ 519 guest_context_exit_irqoff(); 520 guest_timing_exit_irqoff(); 521} 522 523static inline void guest_exit(void) 524{ 525 unsigned long flags; 526 527 local_irq_save(flags); 528 guest_exit_irqoff(); 529 local_irq_restore(flags); 530} 531 532/** 533 * guest_state_exit_irqoff - Establish state when returning from guest mode 534 * 535 * Entry from a guest disables interrupts, but guest mode is traced as 536 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. 537 * 538 * 1) Tell lockdep that interrupts are disabled 539 * 2) Invoke context tracking if enabled to reactivate RCU 540 * 3) Trace interrupts off state 541 * 542 * Invoked from architecture specific code after exiting a guest. 543 * Must be invoked with interrupts disabled and the caller must be 544 * non-instrumentable. 545 * The caller has to invoke guest_timing_exit_irqoff() after this. 546 * 547 * Note: this is analogous to enter_from_user_mode(). 548 */ 549static __always_inline void guest_state_exit_irqoff(void) 550{ 551 lockdep_hardirqs_off(CALLER_ADDR0); 552 guest_context_exit_irqoff(); 553 554 instrumentation_begin(); 555 trace_hardirqs_off_finish(); 556 instrumentation_end(); 557} 558 559static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) 560{ 561 /* 562 * The memory barrier ensures a previous write to vcpu->requests cannot 563 * be reordered with the read of vcpu->mode. It pairs with the general 564 * memory barrier following the write of vcpu->mode in VCPU RUN. 565 */ 566 smp_mb__before_atomic(); 567 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); 568} 569 570/* 571 * Some of the bitops functions do not support too long bitmaps. 572 * This number must be determined not to exceed such limits. 573 */ 574#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) 575 576/* 577 * Since at idle each memslot belongs to two memslot sets it has to contain 578 * two embedded nodes for each data structure that it forms a part of. 579 * 580 * Two memslot sets (one active and one inactive) are necessary so the VM 581 * continues to run on one memslot set while the other is being modified. 582 * 583 * These two memslot sets normally point to the same set of memslots. 584 * They can, however, be desynchronized when performing a memslot management 585 * operation by replacing the memslot to be modified by its copy. 586 * After the operation is complete, both memslot sets once again point to 587 * the same, common set of memslot data. 588 * 589 * The memslots themselves are independent of each other so they can be 590 * individually added or deleted. 591 */ 592struct kvm_memory_slot { 593 struct hlist_node id_node[2]; 594 struct interval_tree_node hva_node[2]; 595 struct rb_node gfn_node[2]; 596 gfn_t base_gfn; 597 unsigned long npages; 598 unsigned long *dirty_bitmap; 599 struct kvm_arch_memory_slot arch; 600 unsigned long userspace_addr; 601 u32 flags; 602 short id; 603 u16 as_id; 604 605#ifdef CONFIG_KVM_GUEST_MEMFD 606 struct { 607 /* 608 * Writes protected by kvm->slots_lock. Acquiring a 609 * reference via kvm_gmem_get_file() is protected by 610 * either kvm->slots_lock or kvm->srcu. 611 */ 612 struct file *file; 613 pgoff_t pgoff; 614 } gmem; 615#endif 616}; 617 618static inline bool kvm_slot_has_gmem(const struct kvm_memory_slot *slot) 619{ 620 return slot && (slot->flags & KVM_MEM_GUEST_MEMFD); 621} 622 623static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot) 624{ 625 return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; 626} 627 628static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) 629{ 630 return ALIGN(memslot->npages, BITS_PER_LONG) / 8; 631} 632 633static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) 634{ 635 unsigned long len = kvm_dirty_bitmap_bytes(memslot); 636 637 return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); 638} 639 640#ifndef KVM_DIRTY_LOG_MANUAL_CAPS 641#define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE 642#endif 643 644struct kvm_s390_adapter_int { 645 u64 ind_addr; 646 u64 ind_gaddr; 647 u64 summary_addr; 648 u64 summary_gaddr; 649 u64 ind_offset; 650 u32 summary_offset; 651 u32 adapter_id; 652}; 653 654struct kvm_hv_sint { 655 u32 vcpu; 656 u32 sint; 657}; 658 659struct kvm_xen_evtchn { 660 u32 port; 661 u32 vcpu_id; 662 int vcpu_idx; 663 u32 priority; 664}; 665 666struct kvm_kernel_irq_routing_entry { 667 u32 gsi; 668 u32 type; 669 int (*set)(struct kvm_kernel_irq_routing_entry *e, 670 struct kvm *kvm, int irq_source_id, int level, 671 bool line_status); 672 union { 673 struct { 674 unsigned irqchip; 675 unsigned pin; 676 } irqchip; 677 struct { 678 u32 address_lo; 679 u32 address_hi; 680 u32 data; 681 u32 flags; 682 u32 devid; 683 } msi; 684 struct kvm_s390_adapter_int adapter; 685 struct kvm_hv_sint hv_sint; 686 struct kvm_xen_evtchn xen_evtchn; 687 }; 688 struct hlist_node link; 689}; 690 691#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 692struct kvm_irq_routing_table { 693 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; 694 u32 nr_rt_entries; 695 /* 696 * Array indexed by gsi. Each entry contains list of irq chips 697 * the gsi is connected to. 698 */ 699 struct hlist_head map[] __counted_by(nr_rt_entries); 700}; 701#endif 702 703bool kvm_arch_irqchip_in_kernel(struct kvm *kvm); 704 705#ifndef KVM_INTERNAL_MEM_SLOTS 706#define KVM_INTERNAL_MEM_SLOTS 0 707#endif 708 709#define KVM_MEM_SLOTS_NUM SHRT_MAX 710#define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_INTERNAL_MEM_SLOTS) 711 712#if KVM_MAX_NR_ADDRESS_SPACES == 1 713static inline int kvm_arch_nr_memslot_as_ids(struct kvm *kvm) 714{ 715 return KVM_MAX_NR_ADDRESS_SPACES; 716} 717 718static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) 719{ 720 return 0; 721} 722#endif 723 724#ifndef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 725static inline bool kvm_arch_has_private_mem(struct kvm *kvm) 726{ 727 return false; 728} 729#endif 730 731#ifdef CONFIG_KVM_GUEST_MEMFD 732bool kvm_arch_supports_gmem_init_shared(struct kvm *kvm); 733 734static inline u64 kvm_gmem_get_supported_flags(struct kvm *kvm) 735{ 736 u64 flags = GUEST_MEMFD_FLAG_MMAP; 737 738 if (!kvm || kvm_arch_supports_gmem_init_shared(kvm)) 739 flags |= GUEST_MEMFD_FLAG_INIT_SHARED; 740 741 return flags; 742} 743#endif 744 745#ifndef kvm_arch_has_readonly_mem 746static inline bool kvm_arch_has_readonly_mem(struct kvm *kvm) 747{ 748 return IS_ENABLED(CONFIG_HAVE_KVM_READONLY_MEM); 749} 750#endif 751 752struct kvm_memslots { 753 u64 generation; 754 atomic_long_t last_used_slot; 755 struct rb_root_cached hva_tree; 756 struct rb_root gfn_tree; 757 /* 758 * The mapping table from slot id to memslot. 759 * 760 * 7-bit bucket count matches the size of the old id to index array for 761 * 512 slots, while giving good performance with this slot count. 762 * Higher bucket counts bring only small performance improvements but 763 * always result in higher memory usage (even for lower memslot counts). 764 */ 765 DECLARE_HASHTABLE(id_hash, 7); 766 int node_idx; 767}; 768 769struct kvm { 770#ifdef KVM_HAVE_MMU_RWLOCK 771 rwlock_t mmu_lock; 772#else 773 spinlock_t mmu_lock; 774#endif /* KVM_HAVE_MMU_RWLOCK */ 775 776 struct mutex slots_lock; 777 778 /* 779 * Protects the arch-specific fields of struct kvm_memory_slots in 780 * use by the VM. To be used under the slots_lock (above) or in a 781 * kvm->srcu critical section where acquiring the slots_lock would 782 * lead to deadlock with the synchronize_srcu in 783 * kvm_swap_active_memslots(). 784 */ 785 struct mutex slots_arch_lock; 786 struct mm_struct *mm; /* userspace tied to this vm */ 787 unsigned long nr_memslot_pages; 788 /* The two memslot sets - active and inactive (per address space) */ 789 struct kvm_memslots __memslots[KVM_MAX_NR_ADDRESS_SPACES][2]; 790 /* The current active memslot set for each address space */ 791 struct kvm_memslots __rcu *memslots[KVM_MAX_NR_ADDRESS_SPACES]; 792 struct xarray vcpu_array; 793 /* 794 * Protected by slots_lock, but can be read outside if an 795 * incorrect answer is acceptable. 796 */ 797 atomic_t nr_memslots_dirty_logging; 798 799 /* Used to wait for completion of MMU notifiers. */ 800 spinlock_t mn_invalidate_lock; 801 unsigned long mn_active_invalidate_count; 802 struct rcuwait mn_memslots_update_rcuwait; 803 804 /* For management / invalidation of gfn_to_pfn_caches */ 805 spinlock_t gpc_lock; 806 struct list_head gpc_list; 807 808 /* 809 * created_vcpus is protected by kvm->lock, and is incremented 810 * at the beginning of KVM_CREATE_VCPU. online_vcpus is only 811 * incremented after storing the kvm_vcpu pointer in vcpus, 812 * and is accessed atomically. 813 */ 814 atomic_t online_vcpus; 815 int max_vcpus; 816 int created_vcpus; 817 int last_boosted_vcpu; 818 struct list_head vm_list; 819 struct mutex lock; 820 struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; 821#ifdef CONFIG_HAVE_KVM_IRQCHIP 822 struct { 823 spinlock_t lock; 824 struct list_head items; 825 /* resampler_list update side is protected by resampler_lock. */ 826 struct list_head resampler_list; 827 struct mutex resampler_lock; 828 } irqfds; 829#endif 830 struct list_head ioeventfds; 831 struct kvm_vm_stat stat; 832 struct kvm_arch arch; 833 refcount_t users_count; 834#ifdef CONFIG_KVM_MMIO 835 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; 836 spinlock_t ring_lock; 837 struct list_head coalesced_zones; 838#endif 839 840 struct mutex irq_lock; 841#ifdef CONFIG_HAVE_KVM_IRQCHIP 842 /* 843 * Update side is protected by irq_lock. 844 */ 845 struct kvm_irq_routing_table __rcu *irq_routing; 846 847 struct hlist_head irq_ack_notifier_list; 848#endif 849 850 struct mmu_notifier mmu_notifier; 851 unsigned long mmu_invalidate_seq; 852 long mmu_invalidate_in_progress; 853 gfn_t mmu_invalidate_range_start; 854 gfn_t mmu_invalidate_range_end; 855 856 struct list_head devices; 857 u64 manual_dirty_log_protect; 858 struct dentry *debugfs_dentry; 859 struct kvm_stat_data **debugfs_stat_data; 860 struct srcu_struct srcu; 861 struct srcu_struct irq_srcu; 862 pid_t userspace_pid; 863 bool override_halt_poll_ns; 864 unsigned int max_halt_poll_ns; 865 u32 dirty_ring_size; 866 bool dirty_ring_with_bitmap; 867 bool vm_bugged; 868 bool vm_dead; 869 870#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 871 struct notifier_block pm_notifier; 872#endif 873#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 874 /* Protected by slots_lock (for writes) and RCU (for reads) */ 875 struct xarray mem_attr_array; 876#endif 877 char stats_id[KVM_STATS_NAME_SIZE]; 878}; 879 880#define kvm_err(fmt, ...) \ 881 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 882#define kvm_info(fmt, ...) \ 883 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 884#define kvm_debug(fmt, ...) \ 885 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) 886#define kvm_debug_ratelimited(fmt, ...) \ 887 pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ 888 ## __VA_ARGS__) 889#define kvm_pr_unimpl(fmt, ...) \ 890 pr_err_ratelimited("kvm [%i]: " fmt, \ 891 task_tgid_nr(current), ## __VA_ARGS__) 892 893/* The guest did something we don't support. */ 894#define vcpu_unimpl(vcpu, fmt, ...) \ 895 kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ 896 (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) 897 898#define vcpu_debug(vcpu, fmt, ...) \ 899 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 900#define vcpu_debug_ratelimited(vcpu, fmt, ...) \ 901 kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ 902 ## __VA_ARGS__) 903#define vcpu_err(vcpu, fmt, ...) \ 904 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) 905 906static inline void kvm_vm_dead(struct kvm *kvm) 907{ 908 kvm->vm_dead = true; 909 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD); 910} 911 912static inline void kvm_vm_bugged(struct kvm *kvm) 913{ 914 kvm->vm_bugged = true; 915 kvm_vm_dead(kvm); 916} 917 918 919#define KVM_BUG(cond, kvm, fmt...) \ 920({ \ 921 bool __ret = !!(cond); \ 922 \ 923 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ 924 kvm_vm_bugged(kvm); \ 925 unlikely(__ret); \ 926}) 927 928#define KVM_BUG_ON(cond, kvm) \ 929({ \ 930 bool __ret = !!(cond); \ 931 \ 932 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ 933 kvm_vm_bugged(kvm); \ 934 unlikely(__ret); \ 935}) 936 937/* 938 * Note, "data corruption" refers to corruption of host kernel data structures, 939 * not guest data. Guest data corruption, suspected or confirmed, that is tied 940 * and contained to a single VM should *never* BUG() and potentially panic the 941 * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure 942 * is corrupted and that corruption can have a cascading effect to other parts 943 * of the hosts and/or to other VMs. 944 */ 945#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ 946({ \ 947 bool __ret = !!(cond); \ 948 \ 949 if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \ 950 BUG_ON(__ret); \ 951 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ 952 kvm_vm_bugged(kvm); \ 953 unlikely(__ret); \ 954}) 955 956static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu) 957{ 958#ifdef CONFIG_PROVE_RCU 959 WARN_ONCE(vcpu->srcu_depth++, 960 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1); 961#endif 962 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 963} 964 965static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu) 966{ 967 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); 968 969#ifdef CONFIG_PROVE_RCU 970 WARN_ONCE(--vcpu->srcu_depth, 971 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth); 972#endif 973} 974 975static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) 976{ 977 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); 978} 979 980/* 981 * Get a bus reference under the update-side lock. No long-term SRCU reader 982 * references are permitted, to avoid stale reads vs concurrent IO 983 * registrations. 984 */ 985static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) 986{ 987 return rcu_dereference_protected(kvm->buses[idx], 988 lockdep_is_held(&kvm->slots_lock)); 989} 990 991static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) 992{ 993 int num_vcpus = atomic_read(&kvm->online_vcpus); 994 995 /* 996 * Explicitly verify the target vCPU is online, as the anti-speculation 997 * logic only limits the CPU's ability to speculate, e.g. given a "bad" 998 * index, clamping the index to 0 would return vCPU0, not NULL. 999 */ 1000 if (i >= num_vcpus) 1001 return NULL; 1002 1003 i = array_index_nospec(i, num_vcpus); 1004 1005 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ 1006 smp_rmb(); 1007 return xa_load(&kvm->vcpu_array, i); 1008} 1009 1010#define kvm_for_each_vcpu(idx, vcpup, kvm) \ 1011 if (atomic_read(&kvm->online_vcpus)) \ 1012 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \ 1013 (atomic_read(&kvm->online_vcpus) - 1)) 1014 1015static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) 1016{ 1017 struct kvm_vcpu *vcpu = NULL; 1018 unsigned long i; 1019 1020 if (id < 0) 1021 return NULL; 1022 if (id < KVM_MAX_VCPUS) 1023 vcpu = kvm_get_vcpu(kvm, id); 1024 if (vcpu && vcpu->vcpu_id == id) 1025 return vcpu; 1026 kvm_for_each_vcpu(i, vcpu, kvm) 1027 if (vcpu->vcpu_id == id) 1028 return vcpu; 1029 return NULL; 1030} 1031 1032void kvm_destroy_vcpus(struct kvm *kvm); 1033 1034int kvm_trylock_all_vcpus(struct kvm *kvm); 1035int kvm_lock_all_vcpus(struct kvm *kvm); 1036void kvm_unlock_all_vcpus(struct kvm *kvm); 1037 1038void vcpu_load(struct kvm_vcpu *vcpu); 1039void vcpu_put(struct kvm_vcpu *vcpu); 1040 1041#ifdef CONFIG_KVM_IOAPIC 1042void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); 1043#else 1044static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) 1045{ 1046} 1047#endif 1048 1049#ifdef CONFIG_HAVE_KVM_IRQCHIP 1050int kvm_irqfd_init(void); 1051void kvm_irqfd_exit(void); 1052#else 1053static inline int kvm_irqfd_init(void) 1054{ 1055 return 0; 1056} 1057 1058static inline void kvm_irqfd_exit(void) 1059{ 1060} 1061#endif 1062int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module); 1063void kvm_exit(void); 1064 1065void kvm_get_kvm(struct kvm *kvm); 1066bool kvm_get_kvm_safe(struct kvm *kvm); 1067void kvm_put_kvm(struct kvm *kvm); 1068bool file_is_kvm(struct file *file); 1069void kvm_put_kvm_no_destroy(struct kvm *kvm); 1070 1071static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) 1072{ 1073 as_id = array_index_nospec(as_id, KVM_MAX_NR_ADDRESS_SPACES); 1074 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, 1075 lockdep_is_held(&kvm->slots_lock) || 1076 !refcount_read(&kvm->users_count)); 1077} 1078 1079static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) 1080{ 1081 return __kvm_memslots(kvm, 0); 1082} 1083 1084static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) 1085{ 1086 int as_id = kvm_arch_vcpu_memslots_id(vcpu); 1087 1088 return __kvm_memslots(vcpu->kvm, as_id); 1089} 1090 1091static inline bool kvm_memslots_empty(struct kvm_memslots *slots) 1092{ 1093 return RB_EMPTY_ROOT(&slots->gfn_tree); 1094} 1095 1096bool kvm_are_all_memslots_empty(struct kvm *kvm); 1097 1098#define kvm_for_each_memslot(memslot, bkt, slots) \ 1099 hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \ 1100 if (WARN_ON_ONCE(!memslot->npages)) { \ 1101 } else 1102 1103static inline 1104struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) 1105{ 1106 struct kvm_memory_slot *slot; 1107 int idx = slots->node_idx; 1108 1109 hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) { 1110 if (slot->id == id) 1111 return slot; 1112 } 1113 1114 return NULL; 1115} 1116 1117/* Iterator used for walking memslots that overlap a gfn range. */ 1118struct kvm_memslot_iter { 1119 struct kvm_memslots *slots; 1120 struct rb_node *node; 1121 struct kvm_memory_slot *slot; 1122}; 1123 1124static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter) 1125{ 1126 iter->node = rb_next(iter->node); 1127 if (!iter->node) 1128 return; 1129 1130 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]); 1131} 1132 1133static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter, 1134 struct kvm_memslots *slots, 1135 gfn_t start) 1136{ 1137 int idx = slots->node_idx; 1138 struct rb_node *tmp; 1139 struct kvm_memory_slot *slot; 1140 1141 iter->slots = slots; 1142 1143 /* 1144 * Find the so called "upper bound" of a key - the first node that has 1145 * its key strictly greater than the searched one (the start gfn in our case). 1146 */ 1147 iter->node = NULL; 1148 for (tmp = slots->gfn_tree.rb_node; tmp; ) { 1149 slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]); 1150 if (start < slot->base_gfn) { 1151 iter->node = tmp; 1152 tmp = tmp->rb_left; 1153 } else { 1154 tmp = tmp->rb_right; 1155 } 1156 } 1157 1158 /* 1159 * Find the slot with the lowest gfn that can possibly intersect with 1160 * the range, so we'll ideally have slot start <= range start 1161 */ 1162 if (iter->node) { 1163 /* 1164 * A NULL previous node means that the very first slot 1165 * already has a higher start gfn. 1166 * In this case slot start > range start. 1167 */ 1168 tmp = rb_prev(iter->node); 1169 if (tmp) 1170 iter->node = tmp; 1171 } else { 1172 /* a NULL node below means no slots */ 1173 iter->node = rb_last(&slots->gfn_tree); 1174 } 1175 1176 if (iter->node) { 1177 iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]); 1178 1179 /* 1180 * It is possible in the slot start < range start case that the 1181 * found slot ends before or at range start (slot end <= range start) 1182 * and so it does not overlap the requested range. 1183 * 1184 * In such non-overlapping case the next slot (if it exists) will 1185 * already have slot start > range start, otherwise the logic above 1186 * would have found it instead of the current slot. 1187 */ 1188 if (iter->slot->base_gfn + iter->slot->npages <= start) 1189 kvm_memslot_iter_next(iter); 1190 } 1191} 1192 1193static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) 1194{ 1195 if (!iter->node) 1196 return false; 1197 1198 /* 1199 * If this slot starts beyond or at the end of the range so does 1200 * every next one 1201 */ 1202 return iter->slot->base_gfn < end; 1203} 1204 1205/* Iterate over each memslot at least partially intersecting [start, end) range */ 1206#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \ 1207 for (kvm_memslot_iter_start(iter, slots, start); \ 1208 kvm_memslot_iter_is_valid(iter, end); \ 1209 kvm_memslot_iter_next(iter)) 1210 1211struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 1212struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); 1213struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); 1214 1215/* 1216 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: 1217 * - create a new memory slot 1218 * - delete an existing memory slot 1219 * - modify an existing memory slot 1220 * -- move it in the guest physical memory space 1221 * -- just change its flags 1222 * 1223 * Since flags can be changed by some of these operations, the following 1224 * differentiation is the best we can do for kvm_set_memory_region(): 1225 */ 1226enum kvm_mr_change { 1227 KVM_MR_CREATE, 1228 KVM_MR_DELETE, 1229 KVM_MR_MOVE, 1230 KVM_MR_FLAGS_ONLY, 1231}; 1232 1233int kvm_set_internal_memslot(struct kvm *kvm, 1234 const struct kvm_userspace_memory_region2 *mem); 1235void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); 1236void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); 1237int kvm_arch_prepare_memory_region(struct kvm *kvm, 1238 const struct kvm_memory_slot *old, 1239 struct kvm_memory_slot *new, 1240 enum kvm_mr_change change); 1241void kvm_arch_commit_memory_region(struct kvm *kvm, 1242 struct kvm_memory_slot *old, 1243 const struct kvm_memory_slot *new, 1244 enum kvm_mr_change change); 1245/* flush all memory translations */ 1246void kvm_arch_flush_shadow_all(struct kvm *kvm); 1247/* flush memory translations pointing to 'slot' */ 1248void kvm_arch_flush_shadow_memslot(struct kvm *kvm, 1249 struct kvm_memory_slot *slot); 1250 1251int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, 1252 struct page **pages, int nr_pages); 1253 1254struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write); 1255static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) 1256{ 1257 return __gfn_to_page(kvm, gfn, true); 1258} 1259 1260unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 1261unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 1262unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 1263unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 1264 bool *writable); 1265 1266static inline void kvm_release_page_unused(struct page *page) 1267{ 1268 if (!page) 1269 return; 1270 1271 put_page(page); 1272} 1273 1274void kvm_release_page_clean(struct page *page); 1275void kvm_release_page_dirty(struct page *page); 1276 1277static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page, 1278 bool unused, bool dirty) 1279{ 1280 lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused); 1281 1282 if (!page) 1283 return; 1284 1285 /* 1286 * If the page that KVM got from the *primary MMU* is writable, and KVM 1287 * installed or reused a SPTE, mark the page/folio dirty. Note, this 1288 * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if 1289 * the GFN is write-protected. Folios can't be safely marked dirty 1290 * outside of mmu_lock as doing so could race with writeback on the 1291 * folio. As a result, KVM can't mark folios dirty in the fast page 1292 * fault handler, and so KVM must (somewhat) speculatively mark the 1293 * folio dirty if KVM could locklessly make the SPTE writable. 1294 */ 1295 if (unused) 1296 kvm_release_page_unused(page); 1297 else if (dirty) 1298 kvm_release_page_dirty(page); 1299 else 1300 kvm_release_page_clean(page); 1301} 1302 1303kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, 1304 unsigned int foll, bool *writable, 1305 struct page **refcounted_page); 1306 1307static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, 1308 bool write, bool *writable, 1309 struct page **refcounted_page) 1310{ 1311 return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, 1312 write ? FOLL_WRITE : 0, writable, refcounted_page); 1313} 1314 1315int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, 1316 int len); 1317int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); 1318int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1319 void *data, unsigned long len); 1320int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1321 void *data, unsigned int offset, 1322 unsigned long len); 1323int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, 1324 int offset, int len); 1325int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, 1326 unsigned long len); 1327int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1328 void *data, unsigned long len); 1329int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1330 void *data, unsigned int offset, 1331 unsigned long len); 1332int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, 1333 gpa_t gpa, unsigned long len); 1334 1335#define __kvm_get_guest(kvm, gfn, offset, v) \ 1336({ \ 1337 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 1338 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 1339 int __ret = -EFAULT; \ 1340 \ 1341 if (!kvm_is_error_hva(__addr)) \ 1342 __ret = get_user(v, __uaddr); \ 1343 __ret; \ 1344}) 1345 1346#define kvm_get_guest(kvm, gpa, v) \ 1347({ \ 1348 gpa_t __gpa = gpa; \ 1349 struct kvm *__kvm = kvm; \ 1350 \ 1351 __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ 1352 offset_in_page(__gpa), v); \ 1353}) 1354 1355#define __kvm_put_guest(kvm, gfn, offset, v) \ 1356({ \ 1357 unsigned long __addr = gfn_to_hva(kvm, gfn); \ 1358 typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ 1359 int __ret = -EFAULT; \ 1360 \ 1361 if (!kvm_is_error_hva(__addr)) \ 1362 __ret = put_user(v, __uaddr); \ 1363 if (!__ret) \ 1364 mark_page_dirty(kvm, gfn); \ 1365 __ret; \ 1366}) 1367 1368#define kvm_put_guest(kvm, gpa, v) \ 1369({ \ 1370 gpa_t __gpa = gpa; \ 1371 struct kvm *__kvm = kvm; \ 1372 \ 1373 __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ 1374 offset_in_page(__gpa), v); \ 1375}) 1376 1377int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); 1378bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); 1379bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 1380unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); 1381void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); 1382void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 1383void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); 1384 1385int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map, 1386 bool writable); 1387void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map); 1388 1389static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, 1390 struct kvm_host_map *map) 1391{ 1392 return __kvm_vcpu_map(vcpu, gpa, map, true); 1393} 1394 1395static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa, 1396 struct kvm_host_map *map) 1397{ 1398 return __kvm_vcpu_map(vcpu, gpa, map, false); 1399} 1400 1401static inline void kvm_vcpu_map_mark_dirty(struct kvm_vcpu *vcpu, 1402 struct kvm_host_map *map) 1403{ 1404 if (kvm_vcpu_mapped(map)) 1405 kvm_vcpu_mark_page_dirty(vcpu, map->gfn); 1406} 1407 1408unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); 1409unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); 1410int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, 1411 int len); 1412int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 1413 unsigned long len); 1414int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, 1415 unsigned long len); 1416int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, 1417 int offset, int len); 1418int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, 1419 unsigned long len); 1420 1421/** 1422 * kvm_gpc_init - initialize gfn_to_pfn_cache. 1423 * 1424 * @gpc: struct gfn_to_pfn_cache object. 1425 * @kvm: pointer to kvm instance. 1426 * 1427 * This sets up a gfn_to_pfn_cache by initializing locks and assigning the 1428 * immutable attributes. Note, the cache must be zero-allocated (or zeroed by 1429 * the caller before init). 1430 */ 1431void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm); 1432 1433/** 1434 * kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest 1435 * physical address. 1436 * 1437 * @gpc: struct gfn_to_pfn_cache object. 1438 * @gpa: guest physical address to map. 1439 * @len: sanity check; the range being access must fit a single page. 1440 * 1441 * @return: 0 for success. 1442 * -EINVAL for a mapping which would cross a page boundary. 1443 * -EFAULT for an untranslatable guest physical address. 1444 * 1445 * This primes a gfn_to_pfn_cache and links it into the @gpc->kvm's list for 1446 * invalidations to be processed. Callers are required to use kvm_gpc_check() 1447 * to ensure that the cache is valid before accessing the target page. 1448 */ 1449int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len); 1450 1451/** 1452 * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA. 1453 * 1454 * @gpc: struct gfn_to_pfn_cache object. 1455 * @hva: userspace virtual address to map. 1456 * @len: sanity check; the range being access must fit a single page. 1457 * 1458 * @return: 0 for success. 1459 * -EINVAL for a mapping which would cross a page boundary. 1460 * -EFAULT for an untranslatable guest physical address. 1461 * 1462 * The semantics of this function are the same as those of kvm_gpc_activate(). It 1463 * merely bypasses a layer of address translation. 1464 */ 1465int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len); 1466 1467/** 1468 * kvm_gpc_check - check validity of a gfn_to_pfn_cache. 1469 * 1470 * @gpc: struct gfn_to_pfn_cache object. 1471 * @len: sanity check; the range being access must fit a single page. 1472 * 1473 * @return: %true if the cache is still valid and the address matches. 1474 * %false if the cache is not valid. 1475 * 1476 * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock 1477 * while calling this function, and then continue to hold the lock until the 1478 * access is complete. 1479 * 1480 * Callers in IN_GUEST_MODE may do so without locking, although they should 1481 * still hold a read lock on kvm->scru for the memslot checks. 1482 */ 1483bool kvm_gpc_check(struct gfn_to_pfn_cache *gpc, unsigned long len); 1484 1485/** 1486 * kvm_gpc_refresh - update a previously initialized cache. 1487 * 1488 * @gpc: struct gfn_to_pfn_cache object. 1489 * @len: sanity check; the range being access must fit a single page. 1490 * 1491 * @return: 0 for success. 1492 * -EINVAL for a mapping which would cross a page boundary. 1493 * -EFAULT for an untranslatable guest physical address. 1494 * 1495 * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful 1496 * return from this function does not mean the page can be immediately 1497 * accessed because it may have raced with an invalidation. Callers must 1498 * still lock and check the cache status, as this function does not return 1499 * with the lock still held to permit access. 1500 */ 1501int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len); 1502 1503/** 1504 * kvm_gpc_deactivate - deactivate and unlink a gfn_to_pfn_cache. 1505 * 1506 * @gpc: struct gfn_to_pfn_cache object. 1507 * 1508 * This removes a cache from the VM's list to be processed on MMU notifier 1509 * invocation. 1510 */ 1511void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc); 1512 1513static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc) 1514{ 1515 return gpc->active && !kvm_is_error_gpa(gpc->gpa); 1516} 1517 1518static inline bool kvm_gpc_is_hva_active(struct gfn_to_pfn_cache *gpc) 1519{ 1520 return gpc->active && kvm_is_error_gpa(gpc->gpa); 1521} 1522 1523void kvm_sigset_activate(struct kvm_vcpu *vcpu); 1524void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); 1525 1526void kvm_vcpu_halt(struct kvm_vcpu *vcpu); 1527bool kvm_vcpu_block(struct kvm_vcpu *vcpu); 1528void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); 1529void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); 1530bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); 1531 1532#ifndef CONFIG_S390 1533void __kvm_vcpu_kick(struct kvm_vcpu *vcpu, bool wait); 1534 1535static inline void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 1536{ 1537 __kvm_vcpu_kick(vcpu, false); 1538} 1539#endif 1540 1541int kvm_vcpu_yield_to(struct kvm_vcpu *target); 1542void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode); 1543 1544void kvm_flush_remote_tlbs(struct kvm *kvm); 1545void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); 1546void kvm_flush_remote_tlbs_memslot(struct kvm *kvm, 1547 const struct kvm_memory_slot *memslot); 1548 1549#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 1550int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); 1551int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min); 1552int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); 1553void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); 1554void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); 1555#endif 1556 1557void kvm_mmu_invalidate_begin(struct kvm *kvm); 1558void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); 1559void kvm_mmu_invalidate_end(struct kvm *kvm); 1560bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); 1561 1562long kvm_arch_dev_ioctl(struct file *filp, 1563 unsigned int ioctl, unsigned long arg); 1564long kvm_arch_vcpu_ioctl(struct file *filp, 1565 unsigned int ioctl, unsigned long arg); 1566long kvm_arch_vcpu_unlocked_ioctl(struct file *filp, 1567 unsigned int ioctl, unsigned long arg); 1568vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); 1569 1570int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); 1571 1572void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, 1573 struct kvm_memory_slot *slot, 1574 gfn_t gfn_offset, 1575 unsigned long mask); 1576void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); 1577 1578#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT 1579int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); 1580int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, 1581 int *is_dirty, struct kvm_memory_slot **memslot); 1582#endif 1583 1584int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, 1585 bool line_status); 1586int kvm_vm_ioctl_enable_cap(struct kvm *kvm, 1587 struct kvm_enable_cap *cap); 1588int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); 1589long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl, 1590 unsigned long arg); 1591 1592int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 1593int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); 1594 1595int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1596 struct kvm_translation *tr); 1597 1598int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 1599int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); 1600int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1601 struct kvm_sregs *sregs); 1602int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1603 struct kvm_sregs *sregs); 1604int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1605 struct kvm_mp_state *mp_state); 1606int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 1607 struct kvm_mp_state *mp_state); 1608int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 1609 struct kvm_guest_debug *dbg); 1610int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); 1611 1612void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 1613void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 1614int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); 1615int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); 1616void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 1617void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 1618 1619#ifdef CONFIG_HAVE_KVM_PM_NOTIFIER 1620int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state); 1621#endif 1622 1623#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS 1624void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); 1625#else 1626static inline void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu) {} 1627#endif 1628 1629#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 1630/* 1631 * kvm_arch_{enable,disable}_virtualization() are called on one CPU, under 1632 * kvm_usage_lock, immediately after/before 0=>1 and 1=>0 transitions of 1633 * kvm_usage_count, i.e. at the beginning of the generic hardware enabling 1634 * sequence, and at the end of the generic hardware disabling sequence. 1635 */ 1636void kvm_arch_enable_virtualization(void); 1637void kvm_arch_disable_virtualization(void); 1638/* 1639 * kvm_arch_{enable,disable}_virtualization_cpu() are called on "every" CPU to 1640 * do the actual twiddling of hardware bits. The hooks are called on all 1641 * online CPUs when KVM enables/disabled virtualization, and on a single CPU 1642 * when that CPU is onlined/offlined (including for Resume/Suspend). 1643 */ 1644int kvm_arch_enable_virtualization_cpu(void); 1645void kvm_arch_disable_virtualization_cpu(void); 1646#endif 1647bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu); 1648int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 1649bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); 1650int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); 1651bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); 1652bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); 1653bool kvm_arch_vcpu_preempted_in_kernel(struct kvm_vcpu *vcpu); 1654void kvm_arch_pre_destroy_vm(struct kvm *kvm); 1655void kvm_arch_create_vm_debugfs(struct kvm *kvm); 1656 1657#ifndef __KVM_HAVE_ARCH_VM_ALLOC 1658/* 1659 * All architectures that want to use vzalloc currently also 1660 * need their own kvm_arch_alloc_vm implementation. 1661 */ 1662static inline struct kvm *kvm_arch_alloc_vm(void) 1663{ 1664 return kzalloc_obj(struct kvm, GFP_KERNEL_ACCOUNT); 1665} 1666#endif 1667 1668static inline void __kvm_arch_free_vm(struct kvm *kvm) 1669{ 1670 kvfree(kvm); 1671} 1672 1673#ifndef __KVM_HAVE_ARCH_VM_FREE 1674static inline void kvm_arch_free_vm(struct kvm *kvm) 1675{ 1676 __kvm_arch_free_vm(kvm); 1677} 1678#endif 1679 1680#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS 1681static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) 1682{ 1683 return -ENOTSUPP; 1684} 1685#else 1686int kvm_arch_flush_remote_tlbs(struct kvm *kvm); 1687#endif 1688 1689#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE 1690static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, 1691 gfn_t gfn, u64 nr_pages) 1692{ 1693 return -EOPNOTSUPP; 1694} 1695#else 1696int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages); 1697#endif 1698 1699#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA 1700void kvm_arch_register_noncoherent_dma(struct kvm *kvm); 1701void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); 1702bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); 1703#else 1704static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) 1705{ 1706} 1707 1708static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) 1709{ 1710} 1711 1712static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) 1713{ 1714 return false; 1715} 1716#endif 1717 1718static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) 1719{ 1720#ifdef __KVM_HAVE_ARCH_WQP 1721 return vcpu->arch.waitp; 1722#else 1723 return &vcpu->wait; 1724#endif 1725} 1726 1727/* 1728 * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns 1729 * true if the vCPU was blocking and was awakened, false otherwise. 1730 */ 1731static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu) 1732{ 1733 return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu)); 1734} 1735 1736static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu) 1737{ 1738 return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu)); 1739} 1740 1741#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED 1742/* 1743 * returns true if the virtual interrupt controller is initialized and 1744 * ready to accept virtual IRQ. On some architectures the virtual interrupt 1745 * controller is dynamically instantiated and this is not always true. 1746 */ 1747bool kvm_arch_intc_initialized(struct kvm *kvm); 1748#else 1749static inline bool kvm_arch_intc_initialized(struct kvm *kvm) 1750{ 1751 return true; 1752} 1753#endif 1754 1755#ifdef CONFIG_GUEST_PERF_EVENTS 1756unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu); 1757 1758void __kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void), 1759 void (*mediated_pmi_handler)(void)); 1760 1761static inline void kvm_register_perf_callbacks(void) 1762{ 1763 __kvm_register_perf_callbacks(NULL, NULL); 1764} 1765 1766void kvm_unregister_perf_callbacks(void); 1767#else 1768static inline void kvm_register_perf_callbacks(void) {} 1769static inline void kvm_unregister_perf_callbacks(void) {} 1770#endif /* CONFIG_GUEST_PERF_EVENTS */ 1771 1772int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 1773void kvm_arch_destroy_vm(struct kvm *kvm); 1774 1775int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 1776 1777struct kvm_irq_ack_notifier { 1778 struct hlist_node link; 1779 unsigned gsi; 1780 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 1781}; 1782 1783int kvm_irq_map_gsi(struct kvm *kvm, 1784 struct kvm_kernel_irq_routing_entry *entries, int gsi); 1785int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 1786 1787int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, 1788 bool line_status); 1789int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, 1790 int irq_source_id, int level, bool line_status); 1791int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, 1792 struct kvm *kvm, int irq_source_id, 1793 int level, bool line_status); 1794bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); 1795void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); 1796void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); 1797void kvm_register_irq_ack_notifier(struct kvm *kvm, 1798 struct kvm_irq_ack_notifier *kian); 1799void kvm_unregister_irq_ack_notifier(struct kvm *kvm, 1800 struct kvm_irq_ack_notifier *kian); 1801bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); 1802 1803/* 1804 * Returns a pointer to the memslot if it contains gfn. 1805 * Otherwise returns NULL. 1806 */ 1807static inline struct kvm_memory_slot * 1808try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn) 1809{ 1810 if (!slot) 1811 return NULL; 1812 1813 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) 1814 return slot; 1815 else 1816 return NULL; 1817} 1818 1819/* 1820 * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL. 1821 * 1822 * With "approx" set returns the memslot also when the address falls 1823 * in a hole. In that case one of the memslots bordering the hole is 1824 * returned. 1825 */ 1826static inline struct kvm_memory_slot * 1827search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx) 1828{ 1829 struct kvm_memory_slot *slot; 1830 struct rb_node *node; 1831 int idx = slots->node_idx; 1832 1833 slot = NULL; 1834 for (node = slots->gfn_tree.rb_node; node; ) { 1835 slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]); 1836 if (gfn >= slot->base_gfn) { 1837 if (gfn < slot->base_gfn + slot->npages) 1838 return slot; 1839 node = node->rb_right; 1840 } else 1841 node = node->rb_left; 1842 } 1843 1844 return approx ? slot : NULL; 1845} 1846 1847static inline struct kvm_memory_slot * 1848____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx) 1849{ 1850 struct kvm_memory_slot *slot; 1851 1852 slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot); 1853 slot = try_get_memslot(slot, gfn); 1854 if (slot) 1855 return slot; 1856 1857 slot = search_memslots(slots, gfn, approx); 1858 if (slot) { 1859 atomic_long_set(&slots->last_used_slot, (unsigned long)slot); 1860 return slot; 1861 } 1862 1863 return NULL; 1864} 1865 1866/* 1867 * __gfn_to_memslot() and its descendants are here to allow arch code to inline 1868 * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline 1869 * because that would bloat other code too much. 1870 */ 1871static inline struct kvm_memory_slot * 1872__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) 1873{ 1874 return ____gfn_to_memslot(slots, gfn, false); 1875} 1876 1877static inline unsigned long 1878__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) 1879{ 1880 /* 1881 * The index was checked originally in search_memslots. To avoid 1882 * that a malicious guest builds a Spectre gadget out of e.g. page 1883 * table walks, do not let the processor speculate loads outside 1884 * the guest's registered memslots. 1885 */ 1886 unsigned long offset = gfn - slot->base_gfn; 1887 offset = array_index_nospec(offset, slot->npages); 1888 return slot->userspace_addr + offset * PAGE_SIZE; 1889} 1890 1891static inline int memslot_id(struct kvm *kvm, gfn_t gfn) 1892{ 1893 return gfn_to_memslot(kvm, gfn)->id; 1894} 1895 1896static inline gfn_t 1897hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) 1898{ 1899 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; 1900 1901 return slot->base_gfn + gfn_offset; 1902} 1903 1904static inline gpa_t gfn_to_gpa(gfn_t gfn) 1905{ 1906 return (gpa_t)gfn << PAGE_SHIFT; 1907} 1908 1909static inline gfn_t gpa_to_gfn(gpa_t gpa) 1910{ 1911 return (gfn_t)(gpa >> PAGE_SHIFT); 1912} 1913 1914static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) 1915{ 1916 return (hpa_t)pfn << PAGE_SHIFT; 1917} 1918 1919static inline bool kvm_is_gpa_in_memslot(struct kvm *kvm, gpa_t gpa) 1920{ 1921 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); 1922 1923 return !kvm_is_error_hva(hva); 1924} 1925 1926static inline void kvm_gpc_mark_dirty_in_slot(struct gfn_to_pfn_cache *gpc) 1927{ 1928 lockdep_assert_held(&gpc->lock); 1929 1930 if (!gpc->memslot) 1931 return; 1932 1933 mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpa_to_gfn(gpc->gpa)); 1934} 1935 1936enum kvm_stat_kind { 1937 KVM_STAT_VM, 1938 KVM_STAT_VCPU, 1939}; 1940 1941struct kvm_stat_data { 1942 struct kvm *kvm; 1943 const struct _kvm_stats_desc *desc; 1944 enum kvm_stat_kind kind; 1945}; 1946 1947struct _kvm_stats_desc { 1948 struct kvm_stats_desc desc; 1949 char name[KVM_STATS_NAME_SIZE]; 1950}; 1951 1952#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ 1953 .flags = type | unit | base | \ 1954 BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ 1955 BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ 1956 BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ 1957 .exponent = exp, \ 1958 .size = sz, \ 1959 .bucket_size = bsz 1960 1961#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1962 { \ 1963 { \ 1964 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1965 .offset = offsetof(struct kvm_vm_stat, generic.stat) \ 1966 }, \ 1967 .name = #stat, \ 1968 } 1969#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1970 { \ 1971 { \ 1972 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1973 .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ 1974 }, \ 1975 .name = #stat, \ 1976 } 1977#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1978 { \ 1979 { \ 1980 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1981 .offset = offsetof(struct kvm_vm_stat, stat) \ 1982 }, \ 1983 .name = #stat, \ 1984 } 1985#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ 1986 { \ 1987 { \ 1988 STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ 1989 .offset = offsetof(struct kvm_vcpu_stat, stat) \ 1990 }, \ 1991 .name = #stat, \ 1992 } 1993/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ 1994#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ 1995 SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) 1996 1997#define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ 1998 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ 1999 unit, base, exponent, 1, 0) 2000#define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ 2001 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ 2002 unit, base, exponent, 1, 0) 2003#define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ 2004 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ 2005 unit, base, exponent, 1, 0) 2006#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ 2007 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ 2008 unit, base, exponent, sz, bsz) 2009#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ 2010 STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ 2011 unit, base, exponent, sz, 0) 2012 2013/* Cumulative counter, read/write */ 2014#define STATS_DESC_COUNTER(SCOPE, name) \ 2015 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_NONE, \ 2016 KVM_STATS_BASE_POW10, 0) 2017/* Instantaneous counter, read only */ 2018#define STATS_DESC_ICOUNTER(SCOPE, name) \ 2019 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_NONE, \ 2020 KVM_STATS_BASE_POW10, 0) 2021/* Peak counter, read/write */ 2022#define STATS_DESC_PCOUNTER(SCOPE, name) \ 2023 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_NONE, \ 2024 KVM_STATS_BASE_POW10, 0) 2025 2026/* Instantaneous boolean value, read only */ 2027#define STATS_DESC_IBOOLEAN(SCOPE, name) \ 2028 STATS_DESC_INSTANT(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ 2029 KVM_STATS_BASE_POW10, 0) 2030/* Peak (sticky) boolean value, read/write */ 2031#define STATS_DESC_PBOOLEAN(SCOPE, name) \ 2032 STATS_DESC_PEAK(SCOPE, name, KVM_STATS_UNIT_BOOLEAN, \ 2033 KVM_STATS_BASE_POW10, 0) 2034 2035/* Cumulative time in nanosecond */ 2036#define STATS_DESC_TIME_NSEC(SCOPE, name) \ 2037 STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 2038 KVM_STATS_BASE_POW10, -9) 2039/* Linear histogram for time in nanosecond */ 2040#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ 2041 STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 2042 KVM_STATS_BASE_POW10, -9, sz, bsz) 2043/* Logarithmic histogram for time in nanosecond */ 2044#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ 2045 STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ 2046 KVM_STATS_BASE_POW10, -9, sz) 2047 2048#define KVM_GENERIC_VM_STATS() \ 2049 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ 2050 STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) 2051 2052#define KVM_GENERIC_VCPU_STATS() \ 2053 STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ 2054 STATS_DESC_COUNTER(VCPU_GENERIC, halt_attempted_poll), \ 2055 STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ 2056 STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ 2057 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ 2058 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ 2059 STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ 2060 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ 2061 HALT_POLL_HIST_COUNT), \ 2062 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ 2063 HALT_POLL_HIST_COUNT), \ 2064 STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ 2065 HALT_POLL_HIST_COUNT), \ 2066 STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) 2067 2068ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, 2069 const struct _kvm_stats_desc *desc, 2070 void *stats, size_t size_stats, 2071 char __user *user_buffer, size_t size, loff_t *offset); 2072 2073/** 2074 * kvm_stats_linear_hist_update() - Update bucket value for linear histogram 2075 * statistics data. 2076 * 2077 * @data: start address of the stats data 2078 * @size: the number of bucket of the stats data 2079 * @value: the new value used to update the linear histogram's bucket 2080 * @bucket_size: the size (width) of a bucket 2081 */ 2082static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, 2083 u64 value, size_t bucket_size) 2084{ 2085 size_t index = div64_u64(value, bucket_size); 2086 2087 index = min(index, size - 1); 2088 ++data[index]; 2089} 2090 2091/** 2092 * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram 2093 * statistics data. 2094 * 2095 * @data: start address of the stats data 2096 * @size: the number of bucket of the stats data 2097 * @value: the new value used to update the logarithmic histogram's bucket 2098 */ 2099static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) 2100{ 2101 size_t index = fls64(value); 2102 2103 index = min(index, size - 1); 2104 ++data[index]; 2105} 2106 2107#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ 2108 kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) 2109#define KVM_STATS_LOG_HIST_UPDATE(array, value) \ 2110 kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) 2111 2112 2113extern const struct kvm_stats_header kvm_vm_stats_header; 2114extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; 2115extern const struct kvm_stats_header kvm_vcpu_stats_header; 2116extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[]; 2117 2118static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) 2119{ 2120 if (unlikely(kvm->mmu_invalidate_in_progress)) 2121 return 1; 2122 /* 2123 * Ensure the read of mmu_invalidate_in_progress happens before 2124 * the read of mmu_invalidate_seq. This interacts with the 2125 * smp_wmb() in mmu_notifier_invalidate_range_end to make sure 2126 * that the caller either sees the old (non-zero) value of 2127 * mmu_invalidate_in_progress or the new (incremented) value of 2128 * mmu_invalidate_seq. 2129 * 2130 * PowerPC Book3s HV KVM calls this under a per-page lock rather 2131 * than under kvm->mmu_lock, for scalability, so can't rely on 2132 * kvm->mmu_lock to keep things ordered. 2133 */ 2134 smp_rmb(); 2135 if (kvm->mmu_invalidate_seq != mmu_seq) 2136 return 1; 2137 return 0; 2138} 2139 2140static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, 2141 unsigned long mmu_seq, 2142 gfn_t gfn) 2143{ 2144 lockdep_assert_held(&kvm->mmu_lock); 2145 /* 2146 * If mmu_invalidate_in_progress is non-zero, then the range maintained 2147 * by kvm_mmu_notifier_invalidate_range_start contains all addresses 2148 * that might be being invalidated. Note that it may include some false 2149 * positives, due to shortcuts when handing concurrent invalidations. 2150 */ 2151 if (unlikely(kvm->mmu_invalidate_in_progress)) { 2152 /* 2153 * Dropping mmu_lock after bumping mmu_invalidate_in_progress 2154 * but before updating the range is a KVM bug. 2155 */ 2156 if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || 2157 kvm->mmu_invalidate_range_end == INVALID_GPA)) 2158 return 1; 2159 2160 if (gfn >= kvm->mmu_invalidate_range_start && 2161 gfn < kvm->mmu_invalidate_range_end) 2162 return 1; 2163 } 2164 2165 if (kvm->mmu_invalidate_seq != mmu_seq) 2166 return 1; 2167 return 0; 2168} 2169 2170/* 2171 * This lockless version of the range-based retry check *must* be paired with a 2172 * call to the locked version after acquiring mmu_lock, i.e. this is safe to 2173 * use only as a pre-check to avoid contending mmu_lock. This version *will* 2174 * get false negatives and false positives. 2175 */ 2176static inline bool mmu_invalidate_retry_gfn_unsafe(struct kvm *kvm, 2177 unsigned long mmu_seq, 2178 gfn_t gfn) 2179{ 2180 /* 2181 * Use READ_ONCE() to ensure the in-progress flag and sequence counter 2182 * are always read from memory, e.g. so that checking for retry in a 2183 * loop won't result in an infinite retry loop. Don't force loads for 2184 * start+end, as the key to avoiding infinite retry loops is observing 2185 * the 1=>0 transition of in-progress, i.e. getting false negatives 2186 * due to stale start+end values is acceptable. 2187 */ 2188 if (unlikely(READ_ONCE(kvm->mmu_invalidate_in_progress)) && 2189 gfn >= kvm->mmu_invalidate_range_start && 2190 gfn < kvm->mmu_invalidate_range_end) 2191 return true; 2192 2193 return READ_ONCE(kvm->mmu_invalidate_seq) != mmu_seq; 2194} 2195 2196#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING 2197 2198#define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ 2199 2200bool kvm_arch_can_set_irq_routing(struct kvm *kvm); 2201int kvm_set_irq_routing(struct kvm *kvm, 2202 const struct kvm_irq_routing_entry *entries, 2203 unsigned nr, 2204 unsigned flags); 2205int kvm_init_irq_routing(struct kvm *kvm); 2206int kvm_set_routing_entry(struct kvm *kvm, 2207 struct kvm_kernel_irq_routing_entry *e, 2208 const struct kvm_irq_routing_entry *ue); 2209void kvm_free_irq_routing(struct kvm *kvm); 2210 2211#else 2212 2213static inline void kvm_free_irq_routing(struct kvm *kvm) {} 2214 2215static inline int kvm_init_irq_routing(struct kvm *kvm) 2216{ 2217 return 0; 2218} 2219 2220#endif 2221 2222int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); 2223 2224void kvm_eventfd_init(struct kvm *kvm); 2225int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 2226 2227#ifdef CONFIG_HAVE_KVM_IRQCHIP 2228int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); 2229void kvm_irqfd_release(struct kvm *kvm); 2230bool kvm_notify_irqfd_resampler(struct kvm *kvm, 2231 unsigned int irqchip, 2232 unsigned int pin); 2233void kvm_irq_routing_update(struct kvm *); 2234#else 2235static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) 2236{ 2237 return -EINVAL; 2238} 2239 2240static inline void kvm_irqfd_release(struct kvm *kvm) {} 2241 2242static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, 2243 unsigned int irqchip, 2244 unsigned int pin) 2245{ 2246 return false; 2247} 2248#endif /* CONFIG_HAVE_KVM_IRQCHIP */ 2249 2250void kvm_arch_irq_routing_update(struct kvm *kvm); 2251 2252static inline void __kvm_make_request(int req, struct kvm_vcpu *vcpu) 2253{ 2254 /* 2255 * Ensure the rest of the request is published to kvm_check_request's 2256 * caller. Paired with the smp_mb__after_atomic in kvm_check_request. 2257 */ 2258 smp_wmb(); 2259 set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2260} 2261 2262static __always_inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 2263{ 2264 /* 2265 * Request that don't require vCPU action should never be logged in 2266 * vcpu->requests. The vCPU won't clear the request, so it will stay 2267 * logged indefinitely and prevent the vCPU from entering the guest. 2268 */ 2269 BUILD_BUG_ON(!__builtin_constant_p(req) || 2270 (req & KVM_REQUEST_NO_ACTION)); 2271 2272 __kvm_make_request(req, vcpu); 2273} 2274 2275#ifndef CONFIG_S390 2276static inline void kvm_make_request_and_kick(int req, struct kvm_vcpu *vcpu) 2277{ 2278 kvm_make_request(req, vcpu); 2279 __kvm_vcpu_kick(vcpu, req & KVM_REQUEST_WAIT); 2280} 2281#endif 2282 2283static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) 2284{ 2285 return READ_ONCE(vcpu->requests); 2286} 2287 2288static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) 2289{ 2290 return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2291} 2292 2293static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) 2294{ 2295 clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); 2296} 2297 2298static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) 2299{ 2300 if (kvm_test_request(req, vcpu)) { 2301 kvm_clear_request(req, vcpu); 2302 2303 /* 2304 * Ensure the rest of the request is visible to kvm_check_request's 2305 * caller. Paired with the smp_wmb in kvm_make_request. 2306 */ 2307 smp_mb__after_atomic(); 2308 return true; 2309 } else { 2310 return false; 2311 } 2312} 2313 2314#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 2315extern bool enable_virt_at_load; 2316extern bool kvm_rebooting; 2317#endif 2318 2319extern unsigned int halt_poll_ns; 2320extern unsigned int halt_poll_ns_grow; 2321extern unsigned int halt_poll_ns_grow_start; 2322extern unsigned int halt_poll_ns_shrink; 2323 2324struct kvm_device { 2325 const struct kvm_device_ops *ops; 2326 struct kvm *kvm; 2327 void *private; 2328 struct list_head vm_node; 2329}; 2330 2331/* create, destroy, and name are mandatory */ 2332struct kvm_device_ops { 2333 const char *name; 2334 2335 /* 2336 * create is called holding kvm->lock and any operations not suitable 2337 * to do while holding the lock should be deferred to init (see 2338 * below). 2339 */ 2340 int (*create)(struct kvm_device *dev, u32 type); 2341 2342 /* 2343 * init is called after create if create is successful and is called 2344 * outside of holding kvm->lock. 2345 */ 2346 void (*init)(struct kvm_device *dev); 2347 2348 /* 2349 * Destroy is responsible for freeing dev. 2350 * 2351 * Destroy may be called before or after destructors are called 2352 * on emulated I/O regions, depending on whether a reference is 2353 * held by a vcpu or other kvm component that gets destroyed 2354 * after the emulated I/O. 2355 */ 2356 void (*destroy)(struct kvm_device *dev); 2357 2358 /* 2359 * Release is an alternative method to free the device. It is 2360 * called when the device file descriptor is closed. Once 2361 * release is called, the destroy method will not be called 2362 * anymore as the device is removed from the device list of 2363 * the VM. kvm->lock is held. 2364 */ 2365 void (*release)(struct kvm_device *dev); 2366 2367 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2368 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2369 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); 2370 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, 2371 unsigned long arg); 2372 int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); 2373}; 2374 2375struct kvm_device *kvm_device_from_filp(struct file *filp); 2376int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); 2377void kvm_unregister_device_ops(u32 type); 2378 2379extern struct kvm_device_ops kvm_mpic_ops; 2380extern struct kvm_device_ops kvm_arm_vgic_v2_ops; 2381extern struct kvm_device_ops kvm_arm_vgic_v3_ops; 2382 2383#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 2384 2385static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 2386{ 2387 vcpu->spin_loop.in_spin_loop = val; 2388} 2389static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 2390{ 2391 vcpu->spin_loop.dy_eligible = val; 2392} 2393 2394#else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 2395 2396static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) 2397{ 2398} 2399 2400static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) 2401{ 2402} 2403#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ 2404 2405static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) 2406{ 2407 return (memslot && memslot->id < KVM_USER_MEM_SLOTS && 2408 !(memslot->flags & KVM_MEMSLOT_INVALID)); 2409} 2410 2411struct kvm_vcpu *kvm_get_running_vcpu(void); 2412struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); 2413 2414#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS) 2415struct kvm_kernel_irqfd; 2416 2417bool kvm_arch_has_irq_bypass(void); 2418int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, 2419 struct irq_bypass_producer *); 2420void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, 2421 struct irq_bypass_producer *); 2422void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); 2423void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); 2424void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd, 2425 struct kvm_kernel_irq_routing_entry *old, 2426 struct kvm_kernel_irq_routing_entry *new); 2427#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ 2428 2429#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS 2430/* If we wakeup during the poll time, was it a sucessful poll? */ 2431static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 2432{ 2433 return vcpu->valid_wakeup; 2434} 2435 2436#else 2437static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) 2438{ 2439 return true; 2440} 2441#endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ 2442 2443#ifdef CONFIG_HAVE_KVM_NO_POLL 2444/* Callback that tells if we must not poll */ 2445bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); 2446#else 2447static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) 2448{ 2449 return false; 2450} 2451#endif /* CONFIG_HAVE_KVM_NO_POLL */ 2452 2453void kvm_arch_guest_memory_reclaimed(struct kvm *kvm); 2454 2455#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE 2456int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); 2457#else 2458static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) 2459{ 2460 return 0; 2461} 2462#endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ 2463 2464#ifdef CONFIG_VIRT_XFER_TO_GUEST_WORK 2465static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) 2466{ 2467 vcpu->run->exit_reason = KVM_EXIT_INTR; 2468 vcpu->stat.signal_exits++; 2469} 2470 2471static inline int kvm_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu) 2472{ 2473 int r = xfer_to_guest_mode_handle_work(); 2474 2475 if (r) { 2476 WARN_ON_ONCE(r != -EINTR); 2477 kvm_handle_signal_exit(vcpu); 2478 } 2479 return r; 2480} 2481#endif /* CONFIG_VIRT_XFER_TO_GUEST_WORK */ 2482 2483/* 2484 * If more than one page is being (un)accounted, @virt must be the address of 2485 * the first page of a block of pages what were allocated together (i.e 2486 * accounted together). 2487 * 2488 * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state() 2489 * is thread-safe. 2490 */ 2491static inline void kvm_account_pgtable_pages(void *virt, int nr) 2492{ 2493 mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr); 2494} 2495 2496/* 2497 * This defines how many reserved entries we want to keep before we 2498 * kick the vcpu to the userspace to avoid dirty ring full. This 2499 * value can be tuned to higher if e.g. PML is enabled on the host. 2500 */ 2501#define KVM_DIRTY_RING_RSVD_ENTRIES 64 2502 2503/* Max number of entries allowed for each kvm dirty ring */ 2504#define KVM_DIRTY_RING_MAX_ENTRIES 65536 2505 2506static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, 2507 gpa_t gpa, gpa_t size, 2508 bool is_write, bool is_exec, 2509 bool is_private) 2510{ 2511 vcpu->run->exit_reason = KVM_EXIT_MEMORY_FAULT; 2512 vcpu->run->memory_fault.gpa = gpa; 2513 vcpu->run->memory_fault.size = size; 2514 2515 /* RWX flags are not (yet) defined or communicated to userspace. */ 2516 vcpu->run->memory_fault.flags = 0; 2517 if (is_private) 2518 vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE; 2519} 2520 2521static inline bool kvm_memslot_is_gmem_only(const struct kvm_memory_slot *slot) 2522{ 2523 if (!IS_ENABLED(CONFIG_KVM_GUEST_MEMFD)) 2524 return false; 2525 2526 return slot->flags & KVM_MEMSLOT_GMEM_ONLY; 2527} 2528 2529#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES 2530static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn) 2531{ 2532 return xa_to_value(xa_load(&kvm->mem_attr_array, gfn)); 2533} 2534 2535bool kvm_range_has_memory_attributes(struct kvm *kvm, gfn_t start, gfn_t end, 2536 unsigned long mask, unsigned long attrs); 2537bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm, 2538 struct kvm_gfn_range *range); 2539bool kvm_arch_post_set_memory_attributes(struct kvm *kvm, 2540 struct kvm_gfn_range *range); 2541 2542static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) 2543{ 2544 return kvm_get_memory_attributes(kvm, gfn) & KVM_MEMORY_ATTRIBUTE_PRIVATE; 2545} 2546#else 2547static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) 2548{ 2549 return false; 2550} 2551#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */ 2552 2553#ifdef CONFIG_KVM_GUEST_MEMFD 2554int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, 2555 gfn_t gfn, kvm_pfn_t *pfn, struct page **page, 2556 int *max_order); 2557#else 2558static inline int kvm_gmem_get_pfn(struct kvm *kvm, 2559 struct kvm_memory_slot *slot, gfn_t gfn, 2560 kvm_pfn_t *pfn, struct page **page, 2561 int *max_order) 2562{ 2563 KVM_BUG_ON(1, kvm); 2564 return -EIO; 2565} 2566#endif /* CONFIG_KVM_GUEST_MEMFD */ 2567 2568#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE 2569int kvm_arch_gmem_prepare(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, int max_order); 2570#endif 2571 2572#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_POPULATE 2573/** 2574 * kvm_gmem_populate() - Populate/prepare a GPA range with guest data 2575 * 2576 * @kvm: KVM instance 2577 * @gfn: starting GFN to be populated 2578 * @src: userspace-provided buffer containing data to copy into GFN range 2579 * (passed to @post_populate, and incremented on each iteration 2580 * if not NULL). Must be page-aligned. 2581 * @npages: number of pages to copy from userspace-buffer 2582 * @post_populate: callback to issue for each gmem page that backs the GPA 2583 * range 2584 * @opaque: opaque data to pass to @post_populate callback 2585 * 2586 * This is primarily intended for cases where a gmem-backed GPA range needs 2587 * to be initialized with userspace-provided data prior to being mapped into 2588 * the guest as a private page. This should be called with the slots->lock 2589 * held so that caller-enforced invariants regarding the expected memory 2590 * attributes of the GPA range do not race with KVM_SET_MEMORY_ATTRIBUTES. 2591 * 2592 * Returns the number of pages that were populated. 2593 */ 2594typedef int (*kvm_gmem_populate_cb)(struct kvm *kvm, gfn_t gfn, kvm_pfn_t pfn, 2595 struct page *page, void *opaque); 2596 2597long kvm_gmem_populate(struct kvm *kvm, gfn_t gfn, void __user *src, long npages, 2598 kvm_gmem_populate_cb post_populate, void *opaque); 2599#endif 2600 2601#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_INVALIDATE 2602void kvm_arch_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); 2603#endif 2604 2605#ifdef CONFIG_KVM_GENERIC_PRE_FAULT_MEMORY 2606long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu, 2607 struct kvm_pre_fault_memory *range); 2608#endif 2609 2610#ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING 2611int kvm_enable_virtualization(void); 2612void kvm_disable_virtualization(void); 2613#else 2614static inline int kvm_enable_virtualization(void) { return 0; } 2615static inline void kvm_disable_virtualization(void) { } 2616#endif 2617 2618#endif