Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'hyperv-fixes-signed-20260121' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux

Pull hyperv fixes from Wei Liu:

- Fix ARM64 port of the MSHV driver (Anirudh Rayabharam)

- Fix huge page handling in the MSHV driver (Stanislav Kinsburskii)

- Minor fixes to driver code (Julia Lawall, Michael Kelley)

* tag 'hyperv-fixes-signed-20260121' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
mshv: handle gpa intercepts for arm64
mshv: add definitions for arm64 gpa intercepts
mshv: Add __user attribute to argument passed to access_ok()
mshv: Store the result of vfs_poll in a variable of type __poll_t
mshv: Align huge page stride with guest mapping
Drivers: hv: Always do Hyper-V panic notification in hv_kmsg_dump()
Drivers: hv: vmbus: fix typo in function name reference

+127 -46
+7 -5
drivers/hv/hv_common.c
··· 195 195 196 196 /* 197 197 * Write dump contents to the page. No need to synchronize; panic should 198 - * be single-threaded. 198 + * be single-threaded. Ignore failures from kmsg_dump_get_buffer() since 199 + * panic notification should be done even if there is no message data. 200 + * Don't assume bytes_written is set in case of failure, so initialize it. 199 201 */ 200 202 kmsg_dump_rewind(&iter); 201 - kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE, 203 + bytes_written = 0; 204 + (void)kmsg_dump_get_buffer(&iter, false, hv_panic_page, HV_HYP_PAGE_SIZE, 202 205 &bytes_written); 203 - if (!bytes_written) 204 - return; 206 + 205 207 /* 206 208 * P3 to contain the physical address of the panic page & P4 to 207 209 * contain the size of the panic data in that page. Rest of the ··· 212 210 hv_set_msr(HV_MSR_CRASH_P0, 0); 213 211 hv_set_msr(HV_MSR_CRASH_P1, 0); 214 212 hv_set_msr(HV_MSR_CRASH_P2, 0); 215 - hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page)); 213 + hv_set_msr(HV_MSR_CRASH_P3, bytes_written ? virt_to_phys(hv_panic_page) : 0); 216 214 hv_set_msr(HV_MSR_CRASH_P4, bytes_written); 217 215 218 216 /*
+1 -1
drivers/hv/hyperv_vmbus.h
··· 375 375 return; 376 376 377 377 /* 378 - * The cmxchg() above does an implicit memory barrier to 378 + * The cmpxchg() above does an implicit memory barrier to 379 379 * ensure the write to MessageType (ie set to 380 380 * HVMSG_NONE) happens before we read the 381 381 * MessagePending and EOMing. Otherwise, the EOMing
+1 -1
drivers/hv/mshv_eventfd.c
··· 388 388 { 389 389 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL; 390 390 struct mshv_irqfd *irqfd, *tmp; 391 - unsigned int events; 391 + __poll_t events; 392 392 int ret; 393 393 int idx; 394 394
+62 -31
drivers/hv/mshv_regions.c
··· 20 20 #define MSHV_MAP_FAULT_IN_PAGES PTRS_PER_PMD 21 21 22 22 /** 23 + * mshv_chunk_stride - Compute stride for mapping guest memory 24 + * @page : The page to check for huge page backing 25 + * @gfn : Guest frame number for the mapping 26 + * @page_count: Total number of pages in the mapping 27 + * 28 + * Determines the appropriate stride (in pages) for mapping guest memory. 29 + * Uses huge page stride if the backing page is huge and the guest mapping 30 + * is properly aligned; otherwise falls back to single page stride. 31 + * 32 + * Return: Stride in pages, or -EINVAL if page order is unsupported. 33 + */ 34 + static int mshv_chunk_stride(struct page *page, 35 + u64 gfn, u64 page_count) 36 + { 37 + unsigned int page_order; 38 + 39 + /* 40 + * Use single page stride by default. For huge page stride, the 41 + * page must be compound and point to the head of the compound 42 + * page, and both gfn and page_count must be huge-page aligned. 43 + */ 44 + if (!PageCompound(page) || !PageHead(page) || 45 + !IS_ALIGNED(gfn, PTRS_PER_PMD) || 46 + !IS_ALIGNED(page_count, PTRS_PER_PMD)) 47 + return 1; 48 + 49 + page_order = folio_order(page_folio(page)); 50 + /* The hypervisor only supports 2M huge page */ 51 + if (page_order != PMD_ORDER) 52 + return -EINVAL; 53 + 54 + return 1 << page_order; 55 + } 56 + 57 + /** 23 58 * mshv_region_process_chunk - Processes a contiguous chunk of memory pages 24 59 * in a region. 25 60 * @region : Pointer to the memory region structure. ··· 80 45 int (*handler)(struct mshv_mem_region *region, 81 46 u32 flags, 82 47 u64 page_offset, 83 - u64 page_count)) 48 + u64 page_count, 49 + bool huge_page)) 84 50 { 85 - u64 count, stride; 86 - unsigned int page_order; 51 + u64 gfn = region->start_gfn + page_offset; 52 + u64 count; 87 53 struct page *page; 88 - int ret; 54 + int stride, ret; 89 55 90 56 page = region->pages[page_offset]; 91 57 if (!page) 92 58 return -EINVAL; 93 59 94 - page_order = folio_order(page_folio(page)); 95 - /* The hypervisor only supports 4K and 2M page sizes */ 96 - if (page_order && page_order != PMD_ORDER) 97 - return -EINVAL; 60 + stride = mshv_chunk_stride(page, gfn, page_count); 61 + if (stride < 0) 62 + return stride; 98 63 99 - stride = 1 << page_order; 100 - 101 - /* Start at stride since the first page is validated */ 64 + /* Start at stride since the first stride is validated */ 102 65 for (count = stride; count < page_count; count += stride) { 103 66 page = region->pages[page_offset + count]; 104 67 ··· 104 71 if (!page) 105 72 break; 106 73 107 - /* Break if page size changes */ 108 - if (page_order != folio_order(page_folio(page))) 74 + /* Break if stride size changes */ 75 + if (stride != mshv_chunk_stride(page, gfn + count, 76 + page_count - count)) 109 77 break; 110 78 } 111 79 112 - ret = handler(region, flags, page_offset, count); 80 + ret = handler(region, flags, page_offset, count, stride > 1); 113 81 if (ret) 114 82 return ret; 115 83 ··· 142 108 int (*handler)(struct mshv_mem_region *region, 143 109 u32 flags, 144 110 u64 page_offset, 145 - u64 page_count)) 111 + u64 page_count, 112 + bool huge_page)) 146 113 { 147 114 long ret; 148 115 ··· 197 162 198 163 static int mshv_region_chunk_share(struct mshv_mem_region *region, 199 164 u32 flags, 200 - u64 page_offset, u64 page_count) 165 + u64 page_offset, u64 page_count, 166 + bool huge_page) 201 167 { 202 - struct page *page = region->pages[page_offset]; 203 - 204 - if (PageHuge(page) || PageTransCompound(page)) 168 + if (huge_page) 205 169 flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE; 206 170 207 171 return hv_call_modify_spa_host_access(region->partition->pt_id, ··· 222 188 223 189 static int mshv_region_chunk_unshare(struct mshv_mem_region *region, 224 190 u32 flags, 225 - u64 page_offset, u64 page_count) 191 + u64 page_offset, u64 page_count, 192 + bool huge_page) 226 193 { 227 - struct page *page = region->pages[page_offset]; 228 - 229 - if (PageHuge(page) || PageTransCompound(page)) 194 + if (huge_page) 230 195 flags |= HV_MODIFY_SPA_PAGE_HOST_ACCESS_LARGE_PAGE; 231 196 232 197 return hv_call_modify_spa_host_access(region->partition->pt_id, ··· 245 212 246 213 static int mshv_region_chunk_remap(struct mshv_mem_region *region, 247 214 u32 flags, 248 - u64 page_offset, u64 page_count) 215 + u64 page_offset, u64 page_count, 216 + bool huge_page) 249 217 { 250 - struct page *page = region->pages[page_offset]; 251 - 252 - if (PageHuge(page) || PageTransCompound(page)) 218 + if (huge_page) 253 219 flags |= HV_MAP_GPA_LARGE_PAGE; 254 220 255 221 return hv_call_map_gpa_pages(region->partition->pt_id, ··· 327 295 328 296 static int mshv_region_chunk_unmap(struct mshv_mem_region *region, 329 297 u32 flags, 330 - u64 page_offset, u64 page_count) 298 + u64 page_offset, u64 page_count, 299 + bool huge_page) 331 300 { 332 - struct page *page = region->pages[page_offset]; 333 - 334 - if (PageHuge(page) || PageTransCompound(page)) 301 + if (huge_page) 335 302 flags |= HV_UNMAP_GPA_LARGE_PAGE; 336 303 337 304 return hv_call_unmap_gpa_pages(region->partition->pt_id,
+9 -8
drivers/hv/mshv_root_main.c
··· 611 611 return NULL; 612 612 } 613 613 614 - #ifdef CONFIG_X86_64 615 614 static struct mshv_mem_region * 616 615 mshv_partition_region_by_gfn_get(struct mshv_partition *p, u64 gfn) 617 616 { ··· 642 643 { 643 644 struct mshv_partition *p = vp->vp_partition; 644 645 struct mshv_mem_region *region; 645 - struct hv_x64_memory_intercept_message *msg; 646 646 bool ret; 647 647 u64 gfn; 648 - 649 - msg = (struct hv_x64_memory_intercept_message *) 648 + #if defined(CONFIG_X86_64) 649 + struct hv_x64_memory_intercept_message *msg = 650 + (struct hv_x64_memory_intercept_message *) 650 651 vp->vp_intercept_msg_page->u.payload; 652 + #elif defined(CONFIG_ARM64) 653 + struct hv_arm64_memory_intercept_message *msg = 654 + (struct hv_arm64_memory_intercept_message *) 655 + vp->vp_intercept_msg_page->u.payload; 656 + #endif 651 657 652 658 gfn = HVPFN_DOWN(msg->guest_physical_address); 653 659 ··· 670 666 671 667 return ret; 672 668 } 673 - #else /* CONFIG_X86_64 */ 674 - static bool mshv_handle_gpa_intercept(struct mshv_vp *vp) { return false; } 675 - #endif /* CONFIG_X86_64 */ 676 669 677 670 static bool mshv_vp_handle_intercept(struct mshv_vp *vp) 678 671 { ··· 1281 1280 long ret; 1282 1281 1283 1282 if (mem.flags & BIT(MSHV_SET_MEM_BIT_UNMAP) || 1284 - !access_ok((const void *)mem.userspace_addr, mem.size)) 1283 + !access_ok((const void __user *)mem.userspace_addr, mem.size)) 1285 1284 return -EINVAL; 1286 1285 1287 1286 mmap_read_lock(current->mm);
+47
include/hyperv/hvhdk.h
··· 800 800 u8 instruction_bytes[16]; 801 801 } __packed; 802 802 803 + #if IS_ENABLED(CONFIG_ARM64) 804 + union hv_arm64_vp_execution_state { 805 + u16 as_uint16; 806 + struct { 807 + u16 cpl:2; /* Exception Level (EL) */ 808 + u16 debug_active:1; 809 + u16 interruption_pending:1; 810 + u16 vtl:4; 811 + u16 virtualization_fault_active:1; 812 + u16 reserved:7; 813 + } __packed; 814 + }; 815 + 816 + struct hv_arm64_intercept_message_header { 817 + u32 vp_index; 818 + u8 instruction_length; 819 + u8 intercept_access_type; 820 + union hv_arm64_vp_execution_state execution_state; 821 + u64 pc; 822 + u64 cpsr; 823 + } __packed; 824 + 825 + union hv_arm64_memory_access_info { 826 + u8 as_uint8; 827 + struct { 828 + u8 gva_valid:1; 829 + u8 gva_gpa_valid:1; 830 + u8 hypercall_output_pending:1; 831 + u8 reserved:5; 832 + } __packed; 833 + }; 834 + 835 + struct hv_arm64_memory_intercept_message { 836 + struct hv_arm64_intercept_message_header header; 837 + u32 cache_type; /* enum hv_cache_type */ 838 + u8 instruction_byte_count; 839 + union hv_arm64_memory_access_info memory_access_info; 840 + u16 reserved1; 841 + u8 instruction_bytes[4]; 842 + u32 reserved2; 843 + u64 guest_virtual_address; 844 + u64 guest_physical_address; 845 + u64 syndrome; 846 + } __packed; 847 + 848 + #endif /* CONFIG_ARM64 */ 849 + 803 850 /* 804 851 * Dispatch state for the VP communicated by the hypervisor to the 805 852 * VP-dispatching thread in the root on return from HVCALL_DISPATCH_VP.