Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'hyperv-next-signed-20260421' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull Hyper-V updates from Wei Liu:

- Fix cross-compilation for hv tools (Aditya Garg)

- Fix vmemmap_shift exceeding MAX_FOLIO_ORDER in mshv_vtl (Naman Jain)

- Limit channel interrupt scan to relid high water mark (Michael
Kelley)

- Export hv_vmbus_exists() and use it in pci-hyperv (Dexuan Cui)

- Fix cleanup and shutdown issues for MSHV (Jork Loeser)

- Introduce more tracing support for MSHV (Stanislav Kinsburskii)

* tag 'hyperv-next-signed-20260421' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
x86/hyperv: Skip LP/VP creation on kexec
x86/hyperv: move stimer cleanup to hv_machine_shutdown()
Drivers: hv: vmbus: fix hyperv_cpuhp_online variable shadowing
mshv: Add tracepoint for GPA intercept handling
mshv_vtl: Fix vmemmap_shift exceeding MAX_FOLIO_ORDER
tools: hv: Fix cross-compilation
Drivers: hv: vmbus: Export hv_vmbus_exists() and use it in pci-hyperv
mshv: Introduce tracing support
Drivers: hv: vmbus: Limit channel interrupt scan to relid high water mark

+783 -51
+13 -2
arch/x86/kernel/cpu/mshyperv.c
··· 237 237 #ifdef CONFIG_KEXEC_CORE 238 238 static void hv_machine_shutdown(void) 239 239 { 240 - if (kexec_in_progress && hv_kexec_handler) 241 - hv_kexec_handler(); 240 + if (kexec_in_progress) { 241 + hv_stimer_global_cleanup(); 242 + 243 + if (hv_kexec_handler) 244 + hv_kexec_handler(); 245 + } 242 246 243 247 /* 244 248 * Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor ··· 431 427 } 432 428 433 429 #ifdef CONFIG_X86_64 430 + /* If AP LPs exist, we are in a kexec'd kernel and VPs already exist */ 431 + if (num_present_cpus() == 1 || hv_lp_exists(1)) 432 + return; 433 + 434 434 for_each_present_cpu(i) { 435 435 if (i == 0) 436 436 continue; 437 437 ret = hv_call_add_logical_proc(numa_cpu_node(i), i, cpu_physical_id(i)); 438 438 BUG_ON(ret); 439 439 } 440 + 441 + ret = hv_call_notify_all_processors_started(); 442 + WARN_ON(ret); 440 443 441 444 for_each_present_cpu(i) { 442 445 if (i == 0)
+1
drivers/hv/Makefile
··· 16 16 mshv_root-y := mshv_root_main.o mshv_synic.o mshv_eventfd.o mshv_irq.o \ 17 17 mshv_root_hv_call.o mshv_portid_table.o mshv_regions.o 18 18 mshv_root-$(CONFIG_DEBUG_FS) += mshv_debugfs.o 19 + mshv_root-$(CONFIG_TRACEPOINTS) += mshv_trace.o 19 20 mshv_vtl-y := mshv_vtl_main.o 20 21 21 22 # Code that must be built-in
+12 -4
drivers/hv/channel_mgmt.c
··· 384 384 385 385 void vmbus_channel_map_relid(struct vmbus_channel *channel) 386 386 { 387 - if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS)) 387 + u32 new_relid = channel->offermsg.child_relid; 388 + 389 + if (WARN_ON(new_relid >= MAX_CHANNEL_RELIDS)) 388 390 return; 391 + 392 + /* 393 + * This function is always called in the tasklet for the connect CPU. 394 + * So updating the relid hiwater mark does not need to be atomic. 395 + */ 396 + if (new_relid > READ_ONCE(vmbus_connection.relid_hiwater)) 397 + WRITE_ONCE(vmbus_connection.relid_hiwater, new_relid); 398 + 389 399 /* 390 400 * The mapping of the channel's relid is visible from the CPUs that 391 401 * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will ··· 421 411 * of the VMBus driver and vmbus_chan_sched() can not run before 422 412 * vmbus_bus_resume() has completed execution (cf. resume_noirq). 423 413 */ 424 - virt_store_mb( 425 - vmbus_connection.channels[channel->offermsg.child_relid], 426 - channel); 414 + virt_store_mb(vmbus_connection.channels[new_relid], channel); 427 415 } 428 416 429 417 void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
+47
drivers/hv/hv_proc.c
··· 239 239 return ret; 240 240 } 241 241 EXPORT_SYMBOL_GPL(hv_call_create_vp); 242 + 243 + int hv_call_notify_all_processors_started(void) 244 + { 245 + struct hv_input_notify_partition_event *input; 246 + u64 status; 247 + unsigned long irq_flags; 248 + int ret = 0; 249 + 250 + local_irq_save(irq_flags); 251 + input = *this_cpu_ptr(hyperv_pcpu_input_arg); 252 + memset(input, 0, sizeof(*input)); 253 + input->event = HV_PARTITION_ALL_LOGICAL_PROCESSORS_STARTED; 254 + status = hv_do_hypercall(HVCALL_NOTIFY_PARTITION_EVENT, 255 + input, NULL); 256 + local_irq_restore(irq_flags); 257 + 258 + if (!hv_result_success(status)) { 259 + hv_status_err(status, "\n"); 260 + ret = hv_result_to_errno(status); 261 + } 262 + return ret; 263 + } 264 + 265 + bool hv_lp_exists(u32 lp_index) 266 + { 267 + struct hv_input_get_logical_processor_run_time *input; 268 + struct hv_output_get_logical_processor_run_time *output; 269 + unsigned long flags; 270 + u64 status; 271 + 272 + local_irq_save(flags); 273 + input = *this_cpu_ptr(hyperv_pcpu_input_arg); 274 + output = *this_cpu_ptr(hyperv_pcpu_output_arg); 275 + 276 + input->lp_index = lp_index; 277 + status = hv_do_hypercall(HVCALL_GET_LOGICAL_PROCESSOR_RUN_TIME, 278 + input, output); 279 + local_irq_restore(flags); 280 + 281 + if (!hv_result_success(status) && 282 + hv_result(status) != HV_STATUS_INVALID_LP_INDEX) { 283 + hv_status_err(status, "\n"); 284 + BUG(); 285 + } 286 + 287 + return hv_result_success(status); 288 + }
+2 -1
drivers/hv/hyperv_vmbus.h
··· 276 276 struct list_head chn_list; 277 277 struct mutex channel_mutex; 278 278 279 - /* Array of channels */ 279 + /* Array of channel pointers, indexed by relid */ 280 280 struct vmbus_channel **channels; 281 + u32 relid_hiwater; 281 282 282 283 /* 283 284 * An offer message is handled first on the work_queue, and then
+14
drivers/hv/mshv_eventfd.c
··· 733 733 ret = mshv_register_doorbell(pt->pt_id, ioeventfd_mmio_write, 734 734 (void *)pt, p->iovntfd_addr, 735 735 p->iovntfd_datamatch, doorbell_flags); 736 + 737 + trace_mshv_assign_ioeventfd(pt->pt_id, p->iovntfd_addr, 738 + p->iovntfd_length, 739 + p->iovntfd_datamatch, 740 + p->iovntfd_wildcard, 741 + p->iovntfd_eventfd, 742 + ret); 743 + 736 744 if (ret < 0) 737 745 goto unlock_fail; 738 746 ··· 787 779 if (!p->iovntfd_wildcard && 788 780 p->iovntfd_datamatch != args->datamatch) 789 781 continue; 782 + 783 + trace_mshv_deassign_ioeventfd(pt->pt_id, p->iovntfd_addr, 784 + p->iovntfd_length, 785 + p->iovntfd_datamatch, 786 + p->iovntfd_wildcard, 787 + p->iovntfd_eventfd); 790 788 791 789 hlist_del_rcu(&p->iovntfd_hnode); 792 790 synchronize_rcu();
+4
drivers/hv/mshv_irq.c
··· 71 71 mutex_unlock(&partition->pt_irq_lock); 72 72 73 73 synchronize_srcu_expedited(&partition->pt_irq_srcu); 74 + 75 + trace_mshv_update_routing_table(partition->pt_id, 76 + old, new, numents); 77 + 74 78 new = old; 75 79 76 80 out:
+1
drivers/hv/mshv_root.h
··· 17 17 #include <linux/build_bug.h> 18 18 #include <linux/mmu_notifier.h> 19 19 #include <uapi/linux/mshv.h> 20 + #include "mshv_trace.h" 20 21 21 22 /* 22 23 * Hypervisor must be between these version numbers (inclusive)
+17 -5
drivers/hv/mshv_root_hv_call.c
··· 45 45 struct hv_output_withdraw_memory *output_page; 46 46 struct page *page; 47 47 u16 completed; 48 - unsigned long remaining = count; 49 - u64 status; 48 + u64 status, withdrawn = 0; 50 49 int i; 51 50 unsigned long flags; 52 51 ··· 54 55 return -ENOMEM; 55 56 output_page = page_address(page); 56 57 57 - while (remaining) { 58 + while (withdrawn < count) { 58 59 local_irq_save(flags); 59 60 60 61 input_page = *this_cpu_ptr(hyperv_pcpu_input_arg); ··· 62 63 memset(input_page, 0, sizeof(*input_page)); 63 64 input_page->partition_id = partition_id; 64 65 status = hv_do_rep_hypercall(HVCALL_WITHDRAW_MEMORY, 65 - min(remaining, HV_WITHDRAW_BATCH_SIZE), 66 + min(count - withdrawn, HV_WITHDRAW_BATCH_SIZE), 66 67 0, input_page, output_page); 67 68 68 69 local_irq_restore(flags); ··· 78 79 break; 79 80 } 80 81 81 - remaining -= completed; 82 + withdrawn += completed; 82 83 } 83 84 free_page((unsigned long)output_page); 85 + 86 + trace_mshv_hvcall_withdraw_memory(partition_id, withdrawn, status); 84 87 85 88 return hv_result_to_errno(status); 86 89 } ··· 127 126 ret = hv_deposit_memory(hv_current_partition_id, status); 128 127 } while (!ret); 129 128 129 + trace_mshv_hvcall_create_partition(flags, ret ? ret : *partition_id); 130 + 130 131 return ret; 131 132 } 132 133 ··· 156 153 ret = hv_deposit_memory(partition_id, status); 157 154 } while (!ret); 158 155 156 + trace_mshv_hvcall_initialize_partition(partition_id, status); 157 + 159 158 return ret; 160 159 } 161 160 ··· 170 165 status = hv_do_fast_hypercall8(HVCALL_FINALIZE_PARTITION, 171 166 *(u64 *)&input); 172 167 168 + trace_mshv_hvcall_finalize_partition(partition_id, status); 169 + 173 170 return hv_result_to_errno(status); 174 171 } 175 172 ··· 182 175 183 176 input.partition_id = partition_id; 184 177 status = hv_do_fast_hypercall8(HVCALL_DELETE_PARTITION, *(u64 *)&input); 178 + 179 + trace_mshv_hvcall_delete_partition(partition_id, status); 185 180 186 181 return hv_result_to_errno(status); 187 182 } ··· 581 572 582 573 ret = hv_deposit_memory(partition_id, status); 583 574 } while (!ret); 575 + 576 + trace_mshv_hvcall_map_vp_state_page(partition_id, vp_index, 577 + type, status); 584 578 585 579 return ret; 586 580 }
+72 -12
drivers/hv/mshv_root_main.c
··· 429 429 status = hv_do_hypercall(HVCALL_DISPATCH_VP, input, output); 430 430 vp->run.flags.root_sched_dispatched = 0; 431 431 432 + trace_mshv_hvcall_dispatch_vp(vp->vp_partition->pt_id, 433 + vp->vp_index, flags, 434 + output->dispatch_state, 435 + output->dispatch_event, 436 + #if defined(CONFIG_X86_64) 437 + vp->vp_register_page->interrupt_vectors.as_uint64, 438 + #else 439 + 0, 440 + #endif 441 + status); 442 + 432 443 *res = *output; 433 444 preempt_enable(); 434 445 ··· 461 450 462 451 ret = mshv_set_vp_registers(vp->vp_index, vp->vp_partition->pt_id, 463 452 1, &explicit_suspend); 453 + 454 + trace_mshv_vp_clear_explicit_suspend(vp->vp_partition->pt_id, 455 + vp->vp_index, ret); 464 456 465 457 if (ret) 466 458 vp_err(vp, "Failed to unsuspend\n"); ··· 507 493 if (ret) 508 494 return -EINTR; 509 495 496 + trace_mshv_vp_wait_for_hv_kick(vp->vp_partition->pt_id, 497 + vp->vp_index, 498 + vp->run.kicked_by_hv, 499 + mshv_vp_dispatch_thread_blocked(vp), 500 + mshv_vp_interrupt_pending(vp)); 501 + 510 502 vp->run.flags.root_sched_blocked = 0; 511 503 vp->run.kicked_by_hv = 0; 512 504 ··· 541 521 542 522 if (__xfer_to_guest_mode_work_pending()) { 543 523 ret = xfer_to_guest_mode_handle_work(); 524 + 525 + trace_mshv_xfer_to_guest_mode_work(vp->vp_partition->pt_id, 526 + vp->vp_index, 527 + read_thread_flags(), 528 + ret); 529 + 544 530 if (ret) 545 531 break; 546 532 } ··· 674 648 675 649 region = mshv_partition_region_by_gfn_get(p, gfn); 676 650 if (!region) 677 - return false; 651 + goto out; 678 652 679 653 if (access_type == HV_INTERCEPT_ACCESS_WRITE && 680 654 !(region->hv_map_flags & HV_MAP_GPA_WRITABLE)) ··· 690 664 691 665 put_region: 692 666 mshv_region_put(region); 693 - 667 + out: 668 + trace_mshv_handle_gpa_intercept(p->pt_id, vp->vp_index, gfn, 669 + access_type, ret); 694 670 return ret; 695 671 } 696 672 ··· 709 681 { 710 682 long rc; 711 683 684 + trace_mshv_run_vp_entry(vp->vp_partition->pt_id, vp->vp_index); 685 + 712 686 do { 713 687 if (hv_scheduler_type == HV_SCHEDULER_TYPE_ROOT) 714 688 rc = mshv_run_vp_with_root_scheduler(vp); 715 689 else 716 690 rc = mshv_run_vp_with_hyp_scheduler(vp); 717 691 } while (rc == 0 && mshv_vp_handle_intercept(vp)); 692 + 693 + trace_mshv_run_vp_exit(vp->vp_partition->pt_id, vp->vp_index, 694 + vp->vp_intercept_msg_page->header.message_type, 695 + rc); 718 696 719 697 if (rc) 720 698 return rc; ··· 983 949 { 984 950 struct mshv_vp *vp = filp->private_data; 985 951 952 + trace_mshv_vp_release(vp->vp_partition->pt_id, vp->vp_index); 953 + 986 954 /* Rest of VP cleanup happens in destroy_partition() */ 987 955 mshv_partition_put(vp->vp_partition); 988 956 return 0; ··· 1157 1121 partition->pt_vp_count++; 1158 1122 partition->pt_vp_array[args.vp_index] = vp; 1159 1123 1160 - return ret; 1124 + goto out; 1161 1125 1162 1126 remove_debugfs_vp: 1163 1127 mshv_debugfs_vp_remove(vp); ··· 1183 1147 intercept_msg_page, input_vtl_zero); 1184 1148 destroy_vp: 1185 1149 hv_call_delete_vp(partition->pt_id, args.vp_index); 1150 + out: 1151 + trace_mshv_create_vp(partition->pt_id, args.vp_index, ret); 1186 1152 return ret; 1187 1153 } 1188 1154 ··· 1383 1345 region->nr_pages); 1384 1346 break; 1385 1347 } 1348 + 1349 + trace_mshv_map_user_memory(partition->pt_id, region->start_uaddr, 1350 + region->start_gfn, region->nr_pages, 1351 + region->hv_map_flags, ret); 1386 1352 1387 1353 if (ret) 1388 1354 goto errout; ··· 1683 1641 if (ret) 1684 1642 vp_err(vp, "failed to suspend\n"); 1685 1643 1644 + trace_mshv_disable_vp_dispatch(vp->vp_partition->pt_id, 1645 + vp->vp_index, ret); 1646 + 1686 1647 return ret; 1687 1648 } 1688 1649 ··· 1734 1689 vp->run.kicked_by_hv = 0; 1735 1690 vp_signal_count = atomic64_read(&vp->run.vp_signaled_count); 1736 1691 } 1692 + 1693 + trace_mshv_drain_vp_signals(vp->vp_partition->pt_id, vp->vp_index); 1737 1694 } 1738 1695 1739 1696 static void drain_all_vps(const struct mshv_partition *partition) ··· 1788 1741 "Attempt to destroy partition but refcount > 0\n"); 1789 1742 return; 1790 1743 } 1744 + 1745 + trace_mshv_destroy_partition(partition->pt_id); 1791 1746 1792 1747 if (partition->pt_initialized) { 1793 1748 /* ··· 1896 1847 mshv_partition_release(struct inode *inode, struct file *filp) 1897 1848 { 1898 1849 struct mshv_partition *partition = filp->private_data; 1850 + 1851 + trace_mshv_partition_release(partition->pt_id); 1899 1852 1900 1853 mshv_eventfd_release(partition); 1901 1854 ··· 2028 1977 struct hv_partition_creation_properties creation_properties; 2029 1978 union hv_partition_isolation_properties isolation_properties; 2030 1979 struct mshv_partition *partition; 1980 + u64 pt_id = -1; 2031 1981 long ret; 2032 1982 2033 1983 ret = mshv_ioctl_process_pt_flags(user_arg, &creation_flags, ··· 2068 2016 ret = hv_call_create_partition(creation_flags, 2069 2017 creation_properties, 2070 2018 isolation_properties, 2071 - &partition->pt_id); 2019 + &pt_id); 2072 2020 if (ret) 2073 2021 goto cleanup_irq_srcu; 2022 + 2023 + partition->pt_id = pt_id; 2074 2024 2075 2025 ret = add_partition(partition); 2076 2026 if (ret) 2077 2027 goto delete_partition; 2078 2028 2079 2029 ret = mshv_init_async_handler(partition); 2080 - if (!ret) { 2081 - ret = FD_ADD(O_CLOEXEC, anon_inode_getfile("mshv_partition", 2082 - &mshv_partition_fops, 2083 - partition, O_RDWR)); 2084 - if (ret >= 0) 2085 - return ret; 2086 - } 2030 + if (ret) 2031 + goto remove_partition; 2032 + 2033 + ret = FD_ADD(O_CLOEXEC, anon_inode_getfile("mshv_partition", 2034 + &mshv_partition_fops, 2035 + partition, O_RDWR)); 2036 + if (ret < 0) 2037 + goto remove_partition; 2038 + 2039 + goto out; 2040 + 2041 + remove_partition: 2087 2042 remove_partition(partition); 2088 2043 delete_partition: 2089 2044 hv_call_delete_partition(partition->pt_id); ··· 2098 2039 cleanup_srcu_struct(&partition->pt_irq_srcu); 2099 2040 free_partition: 2100 2041 kfree(partition); 2101 - 2042 + out: 2043 + trace_mshv_create_partition(pt_id, ret); 2102 2044 return ret; 2103 2045 } 2104 2046
+9
drivers/hv/mshv_trace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (c) 2026, Microsoft Corporation. 4 + * 5 + * Tracepoint definitions for mshv driver. 6 + */ 7 + 8 + #define CREATE_TRACE_POINTS 9 + #include "mshv_trace.h"
+544
drivers/hv/mshv_trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright (c) 2026, Microsoft Corporation. 4 + * 5 + * Tracepoint declarations for mshv driver. 6 + */ 7 + 8 + #undef TRACE_SYSTEM 9 + #define TRACE_SYSTEM mshv 10 + 11 + #if !defined(__MSHV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) 12 + #define _MSHV_TRACE_H_ 13 + 14 + #include <linux/tracepoint.h> 15 + #include <hyperv/hvhdk.h> 16 + 17 + #undef TRACE_INCLUDE_PATH 18 + #define TRACE_INCLUDE_PATH ../../drivers/hv 19 + 20 + #undef TRACE_INCLUDE_FILE 21 + #define TRACE_INCLUDE_FILE mshv_trace 22 + 23 + TRACE_EVENT(mshv_create_partition, 24 + TP_PROTO(u64 partition_id, int vm_fd), 25 + TP_ARGS(partition_id, vm_fd), 26 + TP_STRUCT__entry( 27 + __field(u64, partition_id) 28 + __field(int, vm_fd) 29 + ), 30 + TP_fast_assign( 31 + __entry->partition_id = partition_id; 32 + __entry->vm_fd = vm_fd; 33 + ), 34 + TP_printk("partition_id=%llu vm_fd=%d", 35 + __entry->partition_id, 36 + __entry->vm_fd 37 + ) 38 + ); 39 + 40 + TRACE_EVENT(mshv_hvcall_create_partition, 41 + TP_PROTO(u64 flags, s64 partition_id), 42 + TP_ARGS(flags, partition_id), 43 + TP_STRUCT__entry( 44 + __field(u64, flags) 45 + __field(s64, partition_id) 46 + ), 47 + TP_fast_assign( 48 + __entry->flags = flags; 49 + __entry->partition_id = partition_id; 50 + ), 51 + TP_printk("flags=%#llx partition_id=%lld", 52 + __entry->flags, 53 + __entry->partition_id 54 + ) 55 + ); 56 + 57 + TRACE_EVENT(mshv_hvcall_initialize_partition, 58 + TP_PROTO(u64 partition_id, u64 status), 59 + TP_ARGS(partition_id, status), 60 + TP_STRUCT__entry( 61 + __field(u64, partition_id) 62 + __field(u64, status) 63 + ), 64 + TP_fast_assign( 65 + __entry->partition_id = partition_id; 66 + __entry->status = status; 67 + ), 68 + TP_printk("partition_id=%llu status=%#llx", 69 + __entry->partition_id, 70 + __entry->status 71 + ) 72 + ); 73 + 74 + TRACE_EVENT(mshv_partition_release, 75 + TP_PROTO(u64 partition_id), 76 + TP_ARGS(partition_id), 77 + TP_STRUCT__entry( 78 + __field(u64, partition_id) 79 + ), 80 + TP_fast_assign( 81 + __entry->partition_id = partition_id; 82 + ), 83 + TP_printk("partition_id=%llu", 84 + __entry->partition_id 85 + ) 86 + ); 87 + 88 + TRACE_EVENT(mshv_destroy_partition, 89 + TP_PROTO(u64 partition_id), 90 + TP_ARGS(partition_id), 91 + TP_STRUCT__entry( 92 + __field(u64, partition_id) 93 + ), 94 + TP_fast_assign( 95 + __entry->partition_id = partition_id; 96 + ), 97 + TP_printk("partition_id=%llu", 98 + __entry->partition_id 99 + ) 100 + ); 101 + 102 + TRACE_EVENT(mshv_hvcall_finalize_partition, 103 + TP_PROTO(u64 partition_id, u64 status), 104 + TP_ARGS(partition_id, status), 105 + TP_STRUCT__entry( 106 + __field(u64, partition_id) 107 + __field(u64, status) 108 + ), 109 + TP_fast_assign( 110 + __entry->partition_id = partition_id; 111 + __entry->status = status; 112 + ), 113 + TP_printk("partition_id=%llu status=%#llx ", 114 + __entry->partition_id, 115 + __entry->status 116 + ) 117 + ); 118 + 119 + TRACE_EVENT(mshv_hvcall_withdraw_memory, 120 + TP_PROTO(u64 partition_id, u64 withdrawn, u64 status), 121 + TP_ARGS(partition_id, withdrawn, status), 122 + TP_STRUCT__entry( 123 + __field(u64, partition_id) 124 + __field(u64, withdrawn) 125 + __field(u64, status) 126 + ), 127 + TP_fast_assign( 128 + __entry->partition_id = partition_id; 129 + __entry->withdrawn = withdrawn; 130 + __entry->status = status; 131 + ), 132 + TP_printk("partition_id=%llu withdrawn=%llu status=%#llx", 133 + __entry->partition_id, 134 + __entry->withdrawn, 135 + __entry->status 136 + ) 137 + ); 138 + 139 + TRACE_EVENT(mshv_hvcall_delete_partition, 140 + TP_PROTO(u64 partition_id, u64 status), 141 + TP_ARGS(partition_id, status), 142 + TP_STRUCT__entry( 143 + __field(u64, partition_id) 144 + __field(u64, status) 145 + ), 146 + TP_fast_assign( 147 + __entry->partition_id = partition_id; 148 + __entry->status = status; 149 + ), 150 + TP_printk("partition_id=%llu status=%#llx", 151 + __entry->partition_id, 152 + __entry->status 153 + ) 154 + ); 155 + 156 + TRACE_EVENT(mshv_create_vp, 157 + TP_PROTO(u64 partition_id, u32 vp_index, long vp_fd), 158 + TP_ARGS(partition_id, vp_index, vp_fd), 159 + TP_STRUCT__entry( 160 + __field(u64, partition_id) 161 + __field(u32, vp_index) 162 + __field(long, vp_fd) 163 + ), 164 + TP_fast_assign( 165 + __entry->partition_id = partition_id; 166 + __entry->vp_index = vp_index; 167 + __entry->vp_fd = vp_fd; 168 + ), 169 + TP_printk("partition_id=%llu vp_index=%u vp_fd=%ld", 170 + __entry->partition_id, 171 + __entry->vp_index, 172 + __entry->vp_fd 173 + ) 174 + ); 175 + 176 + TRACE_EVENT(mshv_hvcall_map_vp_state_page, 177 + TP_PROTO(u64 partition_id, u32 vp_index, u32 page_type, u64 status), 178 + TP_ARGS(partition_id, vp_index, page_type, status), 179 + TP_STRUCT__entry( 180 + __field(u64, partition_id) 181 + __field(u32, vp_index) 182 + __field(u32, page_type) 183 + __field(u64, status) 184 + ), 185 + TP_fast_assign( 186 + __entry->partition_id = partition_id; 187 + __entry->vp_index = vp_index; 188 + __entry->page_type = page_type; 189 + __entry->status = status; 190 + ), 191 + TP_printk("partition_id=%llu vp_index=%u page_type=%u status=%#llx", 192 + __entry->partition_id, 193 + __entry->vp_index, 194 + __entry->page_type, 195 + __entry->status 196 + ) 197 + ); 198 + 199 + TRACE_EVENT(mshv_drain_vp_signals, 200 + TP_PROTO(u64 partition_id, u32 vp_index), 201 + TP_ARGS(partition_id, vp_index), 202 + TP_STRUCT__entry( 203 + __field(u64, partition_id) 204 + __field(u32, vp_index) 205 + ), 206 + TP_fast_assign( 207 + __entry->partition_id = partition_id; 208 + __entry->vp_index = vp_index; 209 + ), 210 + TP_printk("partition_id=%llu vp_index=%u", 211 + __entry->partition_id, 212 + __entry->vp_index 213 + ) 214 + ); 215 + 216 + TRACE_EVENT(mshv_disable_vp_dispatch, 217 + TP_PROTO(u64 partition_id, u32 vp_index, int ret), 218 + TP_ARGS(partition_id, vp_index, ret), 219 + TP_STRUCT__entry( 220 + __field(u64, partition_id) 221 + __field(u32, vp_index) 222 + __field(int, ret) 223 + ), 224 + TP_fast_assign( 225 + __entry->partition_id = partition_id; 226 + __entry->vp_index = vp_index; 227 + __entry->ret = ret; 228 + ), 229 + TP_printk("partition_id=%llu vp_index=%u ret=%d", 230 + __entry->partition_id, 231 + __entry->vp_index, 232 + __entry->ret 233 + ) 234 + ); 235 + 236 + TRACE_EVENT(mshv_vp_release, 237 + TP_PROTO(u64 partition_id, u32 vp_index), 238 + TP_ARGS(partition_id, vp_index), 239 + TP_STRUCT__entry( 240 + __field(u64, partition_id) 241 + __field(u32, vp_index) 242 + ), 243 + TP_fast_assign( 244 + __entry->partition_id = partition_id; 245 + __entry->vp_index = vp_index; 246 + ), 247 + TP_printk("partition_id=%llu vp_index=%u", 248 + __entry->partition_id, 249 + __entry->vp_index 250 + ) 251 + ); 252 + 253 + TRACE_EVENT(mshv_run_vp_entry, 254 + TP_PROTO(u64 partition_id, u32 vp_index), 255 + TP_ARGS(partition_id, vp_index), 256 + TP_STRUCT__entry( 257 + __field(u64, partition_id) 258 + __field(u32, vp_index) 259 + ), 260 + TP_fast_assign( 261 + __entry->partition_id = partition_id; 262 + __entry->vp_index = vp_index; 263 + ), 264 + TP_printk("partition_id=%llu vp_index=%u", 265 + __entry->partition_id, 266 + __entry->vp_index 267 + ) 268 + ); 269 + 270 + TRACE_EVENT(mshv_run_vp_exit, 271 + TP_PROTO(u64 partition_id, u32 vp_index, u64 hv_message_type, long ret), 272 + TP_ARGS(partition_id, vp_index, hv_message_type, ret), 273 + TP_STRUCT__entry( 274 + __field(u64, partition_id) 275 + __field(u32, vp_index) 276 + __field(u64, hv_message_type) 277 + __field(long, ret) 278 + ), 279 + TP_fast_assign( 280 + __entry->partition_id = partition_id; 281 + __entry->vp_index = vp_index; 282 + __entry->hv_message_type = hv_message_type; 283 + __entry->ret = ret; 284 + ), 285 + TP_printk("partition_id=%llu vp_index=%u hv_message_type=%#llx ret=%ld", 286 + __entry->partition_id, 287 + __entry->vp_index, 288 + __entry->hv_message_type, 289 + __entry->ret 290 + ) 291 + ); 292 + 293 + TRACE_EVENT(mshv_vp_clear_explicit_suspend, 294 + TP_PROTO(u64 partition_id, u32 vp_index, int ret), 295 + TP_ARGS(partition_id, vp_index, ret), 296 + TP_STRUCT__entry( 297 + __field(u64, partition_id) 298 + __field(u32, vp_index) 299 + __field(int, ret) 300 + ), 301 + TP_fast_assign( 302 + __entry->partition_id = partition_id; 303 + __entry->vp_index = vp_index; 304 + __entry->ret = ret; 305 + ), 306 + TP_printk("partition_id=%llu vp_index=%u ret=%d", 307 + __entry->partition_id, 308 + __entry->vp_index, 309 + __entry->ret 310 + ) 311 + ); 312 + 313 + TRACE_EVENT(mshv_xfer_to_guest_mode_work, 314 + TP_PROTO(u64 partition_id, u32 vp_index, unsigned long thread_info_flag, long ret), 315 + TP_ARGS(partition_id, vp_index, thread_info_flag, ret), 316 + TP_STRUCT__entry( 317 + __field(u64, partition_id) 318 + __field(u32, vp_index) 319 + __field(unsigned long, thread_info_flag) 320 + __field(long, ret) 321 + ), 322 + TP_fast_assign( 323 + __entry->partition_id = partition_id; 324 + __entry->vp_index = vp_index; 325 + __entry->thread_info_flag = thread_info_flag; 326 + __entry->ret = ret; 327 + ), 328 + TP_printk("partition_id=%llu vp_index=%u thread_info_flag=%#lx ret=%ld", 329 + __entry->partition_id, 330 + __entry->vp_index, 331 + __entry->thread_info_flag, 332 + __entry->ret 333 + ) 334 + ); 335 + 336 + TRACE_EVENT(mshv_hvcall_dispatch_vp, 337 + TP_PROTO(u64 partition_id, u32 vp_index, u32 flags, 338 + u32 dispatch_state, u32 dispatch_event, u64 irq_vectors, u64 status), 339 + TP_ARGS(partition_id, vp_index, flags, dispatch_state, dispatch_event, irq_vectors, 340 + status), 341 + TP_STRUCT__entry( 342 + __field(u64, partition_id) 343 + __field(u32, vp_index) 344 + __field(u32, flags) 345 + __field(u32, dispatch_state) 346 + __field(u32, dispatch_event) 347 + __field(u64, irq_vectors) 348 + __field(u64, status) 349 + ), 350 + TP_fast_assign( 351 + __entry->partition_id = partition_id; 352 + __entry->vp_index = vp_index; 353 + __entry->flags = flags; 354 + __entry->dispatch_state = dispatch_state; 355 + __entry->dispatch_event = dispatch_event; 356 + __entry->irq_vectors = irq_vectors; 357 + __entry->status = status; 358 + ), 359 + TP_printk("partition_id=%llu vp_index=%u flags=%#x dispatch_state=%#x dispatch_event=%#x irq_vectors=%#016llx status=%#llx", 360 + __entry->partition_id, 361 + __entry->vp_index, 362 + __entry->flags, 363 + __entry->dispatch_state, 364 + __entry->dispatch_event, 365 + __entry->irq_vectors, 366 + __entry->status 367 + ) 368 + ); 369 + 370 + TRACE_EVENT(mshv_update_routing_table, 371 + TP_PROTO(u64 partition_id, void *old, void *new, u32 numents), 372 + TP_ARGS(partition_id, old, new, numents), 373 + TP_STRUCT__entry( 374 + __field(u64, partition_id) 375 + __field(struct mshv_girq_routing_table *, old) 376 + __field(struct mshv_girq_routing_table *, new) 377 + __field(u32, numents) 378 + ), 379 + TP_fast_assign( 380 + __entry->partition_id = partition_id; 381 + __entry->old = old; 382 + __entry->new = new; 383 + __entry->numents = numents; 384 + ), 385 + TP_printk("partition_id=%llu old=%p new=%p numents=%u", 386 + __entry->partition_id, 387 + __entry->old, 388 + __entry->new, 389 + __entry->numents 390 + ) 391 + ); 392 + 393 + TRACE_EVENT(mshv_map_user_memory, 394 + TP_PROTO(u64 partition_id, u64 start_uaddr, u64 start_gfn, u64 nr_pages, u32 map_flags, 395 + long ret), 396 + TP_ARGS(partition_id, start_uaddr, start_gfn, nr_pages, map_flags, ret), 397 + TP_STRUCT__entry( 398 + __field(u64, partition_id) 399 + __field(u64, start_uaddr) 400 + __field(u64, start_gfn) 401 + __field(u64, nr_pages) 402 + __field(u32, map_flags) 403 + __field(long, ret) 404 + ), 405 + TP_fast_assign( 406 + __entry->partition_id = partition_id; 407 + __entry->start_uaddr = start_uaddr; 408 + __entry->start_gfn = start_gfn; 409 + __entry->nr_pages = nr_pages; 410 + __entry->map_flags = map_flags; 411 + __entry->ret = ret; 412 + ), 413 + TP_printk("partition_id=%llu start_uaddr=%#llx start_gfn=%#llx nr_pages=%llu map_flags=%#x ret=%ld", 414 + __entry->partition_id, 415 + __entry->start_uaddr, 416 + __entry->start_gfn, 417 + __entry->nr_pages, 418 + __entry->map_flags, 419 + __entry->ret 420 + ) 421 + ); 422 + 423 + TRACE_EVENT(mshv_assign_ioeventfd, 424 + TP_PROTO(u64 partition_id, u64 addr, u64 length, u64 datamatch, bool wildcard, 425 + void *eventfd, int ret), 426 + TP_ARGS(partition_id, addr, length, datamatch, wildcard, eventfd, ret), 427 + TP_STRUCT__entry( 428 + __field(u64, partition_id) 429 + __field(u64, addr) 430 + __field(u64, length) 431 + __field(u64, datamatch) 432 + __field(bool, wildcard) 433 + __field(struct eventfd_ctx *, eventfd) 434 + __field(int, ret) 435 + ), 436 + TP_fast_assign( 437 + __entry->partition_id = partition_id; 438 + __entry->addr = addr; 439 + __entry->length = length; 440 + __entry->datamatch = datamatch; 441 + __entry->wildcard = wildcard; 442 + __entry->eventfd = eventfd; 443 + __entry->ret = ret; 444 + ), 445 + TP_printk("partition_id=%llu addr=%#016llx length=%#llx datamatch=%#llx wildcard=%d eventfd=%p ret=%d", 446 + __entry->partition_id, 447 + __entry->addr, 448 + __entry->length, 449 + __entry->datamatch, 450 + __entry->wildcard, 451 + __entry->eventfd, 452 + __entry->ret 453 + ) 454 + ); 455 + 456 + TRACE_EVENT(mshv_deassign_ioeventfd, 457 + TP_PROTO(u64 partition_id, u64 addr, u64 length, u64 datamatch, bool wildcard, 458 + void *eventfd), 459 + TP_ARGS(partition_id, addr, length, datamatch, wildcard, eventfd), 460 + TP_STRUCT__entry( 461 + __field(u64, partition_id) 462 + __field(u64, addr) 463 + __field(u64, length) 464 + __field(u64, datamatch) 465 + __field(bool, wildcard) 466 + __field(struct eventfd_ctx *, eventfd) 467 + ), 468 + TP_fast_assign( 469 + __entry->partition_id = partition_id; 470 + __entry->addr = addr; 471 + __entry->length = length; 472 + __entry->datamatch = datamatch; 473 + __entry->wildcard = wildcard; 474 + __entry->eventfd = eventfd; 475 + ), 476 + TP_printk("partition_id=%llu addr=%#016llx length=%#llx datamatch=%#llx wildcard=%d eventfd=%p", 477 + __entry->partition_id, 478 + __entry->addr, 479 + __entry->length, 480 + __entry->datamatch, 481 + __entry->wildcard, 482 + __entry->eventfd 483 + ) 484 + ); 485 + 486 + TRACE_EVENT(mshv_vp_wait_for_hv_kick, 487 + TP_PROTO(u64 partition_id, u32 vp_index, bool kicked_by_hv, bool blocked, 488 + bool irq_pending), 489 + TP_ARGS(partition_id, vp_index, kicked_by_hv, blocked, irq_pending), 490 + TP_STRUCT__entry( 491 + __field(u64, partition_id) 492 + __field(u32, vp_index) 493 + __field(bool, kicked_by_hv) 494 + __field(bool, blocked) 495 + __field(bool, irq_pending) 496 + ), 497 + TP_fast_assign( 498 + __entry->partition_id = partition_id; 499 + __entry->vp_index = vp_index; 500 + __entry->kicked_by_hv = kicked_by_hv; 501 + __entry->blocked = blocked; 502 + __entry->irq_pending = irq_pending; 503 + ), 504 + TP_printk("partition_id=%llu vp_index=%u kicked_by_hv=%d blocked=%d irq_pending=%d", 505 + __entry->partition_id, 506 + __entry->vp_index, 507 + __entry->kicked_by_hv, 508 + __entry->blocked, 509 + __entry->irq_pending 510 + ) 511 + ); 512 + 513 + TRACE_EVENT(mshv_handle_gpa_intercept, 514 + TP_PROTO(u64 partition_id, u32 vp_index, u64 gfn, u8 access_type, bool handled), 515 + TP_ARGS(partition_id, vp_index, gfn, access_type, handled), 516 + TP_STRUCT__entry( 517 + __field(u64, partition_id) 518 + __field(u32, vp_index) 519 + __field(u64, gfn) 520 + __field(u8, access_type) 521 + __field(bool, handled) 522 + ), 523 + TP_fast_assign( 524 + __entry->partition_id = partition_id; 525 + __entry->vp_index = vp_index; 526 + __entry->gfn = gfn; 527 + __entry->access_type = access_type == HV_INTERCEPT_ACCESS_READ ? 'R' : 528 + (access_type == HV_INTERCEPT_ACCESS_WRITE ? 'W' : 529 + (access_type == HV_INTERCEPT_ACCESS_EXECUTE ? 'X' : '?')); 530 + __entry->handled = handled; 531 + ), 532 + TP_printk("partition_id=%llu vp_index=%u gfn=0x%llx access_type=%c handled=%d", 533 + __entry->partition_id, 534 + __entry->vp_index, 535 + __entry->gfn, 536 + __entry->access_type, 537 + __entry->handled 538 + ) 539 + ); 540 + 541 + #endif /* _MSHV_TRACE_H_ */ 542 + 543 + /* This part must be outside protection */ 544 + #include <trace/define_trace.h>
+9 -3
drivers/hv/mshv_vtl_main.c
··· 386 386 387 387 if (copy_from_user(&vtl0_mem, arg, sizeof(vtl0_mem))) 388 388 return -EFAULT; 389 - /* vtl0_mem.last_pfn is excluded in the pagemap range for VTL0 as per design */ 390 389 if (vtl0_mem.last_pfn <= vtl0_mem.start_pfn) { 391 390 dev_err(vtl->module_dev, "range start pfn (%llx) > end pfn (%llx)\n", 392 391 vtl0_mem.start_pfn, vtl0_mem.last_pfn); ··· 396 397 if (!pgmap) 397 398 return -ENOMEM; 398 399 400 + /* 401 + * vtl0_mem.last_pfn is excluded in the pagemap range for VTL0 as per design. 402 + * last_pfn is not reserved or wasted, and reflects 'start_pfn + size' of pagemap range. 403 + */ 399 404 pgmap->ranges[0].start = PFN_PHYS(vtl0_mem.start_pfn); 400 405 pgmap->ranges[0].end = PFN_PHYS(vtl0_mem.last_pfn) - 1; 401 406 pgmap->nr_range = 1; ··· 408 405 /* 409 406 * Determine the highest page order that can be used for the given memory range. 410 407 * This works best when the range is aligned; i.e. both the start and the length. 408 + * Clamp to MAX_FOLIO_ORDER to avoid a WARN in memremap_pages() when the range 409 + * alignment exceeds the maximum supported folio order for this kernel config. 411 410 */ 412 - pgmap->vmemmap_shift = count_trailing_zeros(vtl0_mem.start_pfn | vtl0_mem.last_pfn); 411 + pgmap->vmemmap_shift = min(count_trailing_zeros(vtl0_mem.start_pfn | vtl0_mem.last_pfn), 412 + MAX_FOLIO_ORDER); 413 413 dev_dbg(vtl->module_dev, 414 414 "Add VTL0 memory: start: 0x%llx, end_pfn: 0x%llx, page order: %lu\n", 415 415 vtl0_mem.start_pfn, vtl0_mem.last_pfn, pgmap->vmemmap_shift); ··· 421 415 if (IS_ERR(addr)) { 422 416 dev_err(vtl->module_dev, "devm_memremap_pages error: %ld\n", PTR_ERR(addr)); 423 417 kfree(pgmap); 424 - return -EFAULT; 418 + return PTR_ERR(addr); 425 419 } 426 420 427 421 /* Don't free pgmap, since it has to stick around until the memory
+9 -20
drivers/hv/vmbus_drv.c
··· 100 100 } 101 101 EXPORT_SYMBOL_GPL(hv_get_vmbus_root_device); 102 102 103 - static int vmbus_exists(void) 103 + bool hv_vmbus_exists(void) 104 104 { 105 - if (vmbus_root_device == NULL) 106 - return -ENODEV; 107 - 108 - return 0; 105 + return vmbus_root_device != NULL; 109 106 } 107 + EXPORT_SYMBOL_GPL(hv_vmbus_exists); 110 108 111 109 static u8 channel_monitor_group(const struct vmbus_channel *channel) 112 110 { ··· 1255 1257 return; 1256 1258 event = (union hv_synic_event_flags *)event_page_addr + VMBUS_MESSAGE_SINT; 1257 1259 1258 - maxbits = HV_EVENT_FLAGS_COUNT; 1260 + maxbits = READ_ONCE(vmbus_connection.relid_hiwater) + 1; 1259 1261 recv_int_page = event->flags; 1260 1262 1261 1263 if (unlikely(!recv_int_page)) 1262 1264 return; 1263 1265 1264 - /* 1265 - * Suggested-by: Michael Kelley <mhklinux@outlook.com> 1266 - * One possible optimization would be to keep track of the largest relID that's in use, 1267 - * and only scan up to that relID. 1268 - */ 1269 1266 for_each_set_bit(relid, recv_int_page, maxbits) { 1270 1267 void (*callback_fn)(void *context); 1271 1268 struct vmbus_channel *channel; ··· 1420 1427 { 1421 1428 int ret, cpu; 1422 1429 struct work_struct __percpu *works; 1423 - int hyperv_cpuhp_online; 1424 1430 1425 1431 ret = hv_synic_alloc(); 1426 1432 if (ret < 0) ··· 1571 1579 { 1572 1580 int ret; 1573 1581 1574 - pr_info("registering driver %s\n", hv_driver->name); 1582 + if (!hv_vmbus_exists()) 1583 + return -ENODEV; 1575 1584 1576 - ret = vmbus_exists(); 1577 - if (ret < 0) 1578 - return ret; 1585 + pr_info("registering driver %s\n", hv_driver->name); 1579 1586 1580 1587 hv_driver->driver.name = hv_driver->name; 1581 1588 hv_driver->driver.owner = owner; ··· 1600 1609 */ 1601 1610 void vmbus_driver_unregister(struct hv_driver *hv_driver) 1602 1611 { 1603 - pr_info("unregistering driver %s\n", hv_driver->name); 1604 - 1605 - if (!vmbus_exists()) { 1612 + if (hv_vmbus_exists()) { 1613 + pr_info("unregistering driver %s\n", hv_driver->name); 1606 1614 driver_unregister(&hv_driver->driver); 1607 1615 vmbus_free_dynids(hv_driver); 1608 1616 } ··· 2887 2897 2888 2898 static void hv_kexec_handler(void) 2889 2899 { 2890 - hv_stimer_global_cleanup(); 2891 2900 vmbus_initiate_unload(false); 2892 2901 /* Make sure conn_state is set as hv_synic_cleanup checks for it */ 2893 2902 mb();
+1 -1
drivers/pci/controller/pci-hyperv.c
··· 4172 4172 if (!hv_is_hyperv_initialized()) 4173 4173 return -ENODEV; 4174 4174 4175 - if (hv_root_partition() && !hv_nested) 4175 + if (!hv_vmbus_exists()) 4176 4176 return -ENODEV; 4177 4177 4178 4178 ret = hv_pci_irqchip_init();
+10
include/asm-generic/mshyperv.h
··· 347 347 int hv_deposit_memory_node(int node, u64 partition_id, u64 status); 348 348 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); 349 349 int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); 350 + int hv_call_notify_all_processors_started(void); 351 + bool hv_lp_exists(u32 lp_index); 350 352 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); 351 353 352 354 #else /* CONFIG_MSHV_ROOT */ ··· 367 365 static inline int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id) 368 366 { 369 367 return -EOPNOTSUPP; 368 + } 369 + static inline int hv_call_notify_all_processors_started(void) 370 + { 371 + return -EOPNOTSUPP; 372 + } 373 + static inline bool hv_lp_exists(u32 lp_index) 374 + { 375 + return false; 370 376 } 371 377 static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags) 372 378 {
+1
include/hyperv/hvgdk_mini.h
··· 435 435 /* HV_CALL_CODE */ 436 436 #define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002 437 437 #define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003 438 + #define HVCALL_GET_LOGICAL_PROCESSOR_RUN_TIME 0x0004 438 439 #define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008 439 440 #define HVCALL_SEND_IPI 0x000b 440 441 #define HVCALL_ENABLE_VP_VTL 0x000f
+12
include/hyperv/hvhdk_mini.h
··· 362 362 363 363 enum hv_partition_event { 364 364 HV_PARTITION_EVENT_ROOT_CRASHDUMP = 2, 365 + HV_PARTITION_ALL_LOGICAL_PROCESSORS_STARTED = 4, 365 366 }; 366 367 367 368 struct hv_input_notify_partition_event { 368 369 u32 event; /* enum hv_partition_event */ 369 370 union hv_partition_event_input input; 371 + } __packed; 372 + 373 + struct hv_input_get_logical_processor_run_time { 374 + u32 lp_index; 375 + } __packed; 376 + 377 + struct hv_output_get_logical_processor_run_time { 378 + u64 global_time; 379 + u64 local_run_time; 380 + u64 rsvdz0; 381 + u64 hypervisor_time; 370 382 } __packed; 371 383 372 384 struct hv_lp_startup_status {
+2
include/linux/hyperv.h
··· 1304 1304 1305 1305 struct device *hv_get_vmbus_root_device(void); 1306 1306 1307 + bool hv_vmbus_exists(void); 1308 + 1307 1309 struct hv_ring_buffer_debug_info { 1308 1310 u32 current_interrupt_mask; 1309 1311 u32 current_read_index;
+1 -1
include/uapi/linux/mshv.h
··· 357 357 358 358 struct mshv_vtl_ram_disposition { 359 359 __u64 start_pfn; 360 - __u64 last_pfn; 360 + __u64 last_pfn; /* last_pfn is excluded from the range [start_pfn, last_pfn) */ 361 361 }; 362 362 363 363 struct mshv_vtl_set_poll_file {
+2 -2
tools/hv/Makefile
··· 2 2 # Makefile for Hyper-V tools 3 3 include ../scripts/Makefile.include 4 4 5 - ARCH := $(shell uname -m 2>/dev/null) 5 + ARCH ?= $(shell uname -m 2>/dev/null) 6 6 sbindir ?= /usr/sbin 7 7 libexecdir ?= /usr/libexec 8 8 sharedstatedir ?= /var/lib ··· 20 20 override CFLAGS += -Wno-address-of-packed-member 21 21 22 22 ALL_TARGETS := hv_kvp_daemon hv_vss_daemon 23 - ifneq ($(ARCH), aarch64) 23 + ifneq ($(filter x86_64 x86,$(ARCH)),) 24 24 ALL_TARGETS += hv_fcopy_uio_daemon 25 25 endif 26 26 ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))