Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2025-11-16-10-40' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"7 hotfixes. 5 are cc:stable, 4 are against mm/

All are singletons - please see the respective changelogs for details"

* tag 'mm-hotfixes-stable-2025-11-16-10-40' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm, swap: fix potential UAF issue for VMA readahead
selftests/user_events: fix type cast for write_index packed member in perf_test
lib/test_kho: check if KHO is enabled
mm/huge_memory: fix folio split check for anon folios in swapcache
MAINTAINERS: update David Hildenbrand's email address
crash: fix crashkernel resource shrink
mm: fix MAX_FOLIO_ORDER on powerpc configs with hugetlb

+55 -22
+1
.mailmap
··· 206 206 David Brownell <david-b@pacbell.net> 207 207 David Collins <quic_collinsd@quicinc.com> <collinsd@codeaurora.org> 208 208 David Heidelberg <david@ixit.cz> <d.okias@gmail.com> 209 + David Hildenbrand <david@kernel.org> <david@redhat.com> 209 210 David Rheinsberg <david@readahead.eu> <dh.herrmann@gmail.com> 210 211 David Rheinsberg <david@readahead.eu> <dh.herrmann@googlemail.com> 211 212 David Rheinsberg <david@readahead.eu> <david.rheinsberg@gmail.com>
+14 -14
MAINTAINERS
··· 11528 11528 HUGETLB SUBSYSTEM 11529 11529 M: Muchun Song <muchun.song@linux.dev> 11530 11530 M: Oscar Salvador <osalvador@suse.de> 11531 - R: David Hildenbrand <david@redhat.com> 11531 + R: David Hildenbrand <david@kernel.org> 11532 11532 L: linux-mm@kvack.org 11533 11533 S: Maintained 11534 11534 F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages ··· 13735 13735 M: Christian Borntraeger <borntraeger@linux.ibm.com> 13736 13736 M: Janosch Frank <frankja@linux.ibm.com> 13737 13737 M: Claudio Imbrenda <imbrenda@linux.ibm.com> 13738 - R: David Hildenbrand <david@redhat.com> 13738 + R: David Hildenbrand <david@kernel.org> 13739 13739 L: kvm@vger.kernel.org 13740 13740 S: Supported 13741 13741 T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git ··· 16222 16222 F: drivers/devfreq/tegra30-devfreq.c 16223 16223 16224 16224 MEMORY HOT(UN)PLUG 16225 - M: David Hildenbrand <david@redhat.com> 16225 + M: David Hildenbrand <david@kernel.org> 16226 16226 M: Oscar Salvador <osalvador@suse.de> 16227 16227 L: linux-mm@kvack.org 16228 16228 S: Maintained ··· 16247 16247 16248 16248 MEMORY MANAGEMENT - CORE 16249 16249 M: Andrew Morton <akpm@linux-foundation.org> 16250 - M: David Hildenbrand <david@redhat.com> 16250 + M: David Hildenbrand <david@kernel.org> 16251 16251 R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16252 16252 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16253 16253 R: Vlastimil Babka <vbabka@suse.cz> ··· 16303 16303 16304 16304 MEMORY MANAGEMENT - GUP (GET USER PAGES) 16305 16305 M: Andrew Morton <akpm@linux-foundation.org> 16306 - M: David Hildenbrand <david@redhat.com> 16306 + M: David Hildenbrand <david@kernel.org> 16307 16307 R: Jason Gunthorpe <jgg@nvidia.com> 16308 16308 R: John Hubbard <jhubbard@nvidia.com> 16309 16309 R: Peter Xu <peterx@redhat.com> ··· 16319 16319 16320 16320 MEMORY MANAGEMENT - KSM (Kernel Samepage Merging) 16321 16321 M: Andrew Morton <akpm@linux-foundation.org> 16322 - M: David Hildenbrand <david@redhat.com> 16322 + M: David Hildenbrand <david@kernel.org> 16323 16323 R: Xu Xin <xu.xin16@zte.com.cn> 16324 16324 R: Chengming Zhou <chengming.zhou@linux.dev> 16325 16325 L: linux-mm@kvack.org ··· 16335 16335 16336 16336 MEMORY MANAGEMENT - MEMORY POLICY AND MIGRATION 16337 16337 M: Andrew Morton <akpm@linux-foundation.org> 16338 - M: David Hildenbrand <david@redhat.com> 16338 + M: David Hildenbrand <david@kernel.org> 16339 16339 R: Zi Yan <ziy@nvidia.com> 16340 16340 R: Matthew Brost <matthew.brost@intel.com> 16341 16341 R: Joshua Hahn <joshua.hahnjy@gmail.com> ··· 16375 16375 16376 16376 MEMORY MANAGEMENT - MISC 16377 16377 M: Andrew Morton <akpm@linux-foundation.org> 16378 - M: David Hildenbrand <david@redhat.com> 16378 + M: David Hildenbrand <david@kernel.org> 16379 16379 R: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16380 16380 R: Liam R. Howlett <Liam.Howlett@oracle.com> 16381 16381 R: Vlastimil Babka <vbabka@suse.cz> ··· 16463 16463 MEMORY MANAGEMENT - RECLAIM 16464 16464 M: Andrew Morton <akpm@linux-foundation.org> 16465 16465 M: Johannes Weiner <hannes@cmpxchg.org> 16466 - R: David Hildenbrand <david@redhat.com> 16466 + R: David Hildenbrand <david@kernel.org> 16467 16467 R: Michal Hocko <mhocko@kernel.org> 16468 16468 R: Qi Zheng <zhengqi.arch@bytedance.com> 16469 16469 R: Shakeel Butt <shakeel.butt@linux.dev> ··· 16476 16476 16477 16477 MEMORY MANAGEMENT - RMAP (REVERSE MAPPING) 16478 16478 M: Andrew Morton <akpm@linux-foundation.org> 16479 - M: David Hildenbrand <david@redhat.com> 16479 + M: David Hildenbrand <david@kernel.org> 16480 16480 M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16481 16481 R: Rik van Riel <riel@surriel.com> 16482 16482 R: Liam R. Howlett <Liam.Howlett@oracle.com> ··· 16521 16521 16522 16522 MEMORY MANAGEMENT - THP (TRANSPARENT HUGE PAGE) 16523 16523 M: Andrew Morton <akpm@linux-foundation.org> 16524 - M: David Hildenbrand <david@redhat.com> 16524 + M: David Hildenbrand <david@kernel.org> 16525 16525 M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16526 16526 R: Zi Yan <ziy@nvidia.com> 16527 16527 R: Baolin Wang <baolin.wang@linux.alibaba.com> ··· 16623 16623 M: Andrew Morton <akpm@linux-foundation.org> 16624 16624 M: Liam R. Howlett <Liam.Howlett@oracle.com> 16625 16625 M: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> 16626 - M: David Hildenbrand <david@redhat.com> 16626 + M: David Hildenbrand <david@kernel.org> 16627 16627 R: Vlastimil Babka <vbabka@suse.cz> 16628 16628 R: Jann Horn <jannh@google.com> 16629 16629 L: linux-mm@kvack.org ··· 27090 27090 27091 27091 VIRTIO BALLOON 27092 27092 M: "Michael S. Tsirkin" <mst@redhat.com> 27093 - M: David Hildenbrand <david@redhat.com> 27093 + M: David Hildenbrand <david@kernel.org> 27094 27094 L: virtualization@lists.linux.dev 27095 27095 S: Maintained 27096 27096 F: drivers/virtio/virtio_balloon.c ··· 27245 27245 F: include/uapi/linux/virtio_iommu.h 27246 27246 27247 27247 VIRTIO MEM DRIVER 27248 - M: David Hildenbrand <david@redhat.com> 27248 + M: David Hildenbrand <david@kernel.org> 27249 27249 L: virtualization@lists.linux.dev 27250 27250 S: Maintained 27251 27251 W: https://virtio-mem.gitlab.io/
+1
arch/powerpc/Kconfig
··· 137 137 select ARCH_HAS_DMA_OPS if PPC64 138 138 select ARCH_HAS_FORTIFY_SOURCE 139 139 select ARCH_HAS_GCOV_PROFILE_ALL 140 + select ARCH_HAS_GIGANTIC_PAGE if ARCH_SUPPORTS_HUGETLBFS 140 141 select ARCH_HAS_KCOV 141 142 select ARCH_HAS_KERNEL_FPU_SUPPORT if PPC64 && PPC_FPU 142 143 select ARCH_HAS_MEMBARRIER_CALLBACKS
-1
arch/powerpc/platforms/Kconfig.cputype
··· 423 423 config PPC_RADIX_MMU 424 424 bool "Radix MMU Support" 425 425 depends on PPC_BOOK3S_64 426 - select ARCH_HAS_GIGANTIC_PAGE 427 426 default y 428 427 help 429 428 Enable support for the Power ISA 3.0 Radix style MMU. Currently this
+10 -3
include/linux/mm.h
··· 2074 2074 return folio_large_nr_pages(folio); 2075 2075 } 2076 2076 2077 - #if !defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) 2077 + #if !defined(CONFIG_HAVE_GIGANTIC_FOLIOS) 2078 2078 /* 2079 2079 * We don't expect any folios that exceed buddy sizes (and consequently 2080 2080 * memory sections). ··· 2087 2087 * pages are guaranteed to be contiguous. 2088 2088 */ 2089 2089 #define MAX_FOLIO_ORDER PFN_SECTION_SHIFT 2090 - #else 2090 + #elif defined(CONFIG_HUGETLB_PAGE) 2091 2091 /* 2092 2092 * There is no real limit on the folio size. We limit them to the maximum we 2093 - * currently expect (e.g., hugetlb, dax). 2093 + * currently expect (see CONFIG_HAVE_GIGANTIC_FOLIOS): with hugetlb, we expect 2094 + * no folios larger than 16 GiB on 64bit and 1 GiB on 32bit. 2095 + */ 2096 + #define MAX_FOLIO_ORDER get_order(IS_ENABLED(CONFIG_64BIT) ? SZ_16G : SZ_1G) 2097 + #else 2098 + /* 2099 + * Without hugetlb, gigantic folios that are bigger than a single PUD are 2100 + * currently impossible. 2094 2101 */ 2095 2102 #define MAX_FOLIO_ORDER PUD_ORDER 2096 2103 #endif
+1 -1
kernel/crash_core.c
··· 373 373 old_res->start = 0; 374 374 old_res->end = 0; 375 375 } else { 376 - crashk_res.end = ram_res->start - 1; 376 + old_res->end = ram_res->start - 1; 377 377 } 378 378 379 379 crash_free_reserved_phys_range(ram_res->start, ram_res->end);
+3
lib/test_kho.c
··· 301 301 phys_addr_t fdt_phys; 302 302 int err; 303 303 304 + if (!kho_is_enabled()) 305 + return 0; 306 + 304 307 err = kho_retrieve_subtree(KHO_TEST_FDT, &fdt_phys); 305 308 if (!err) 306 309 return kho_test_restore(fdt_phys);
+7
mm/Kconfig
··· 908 908 config PGTABLE_HAS_HUGE_LEAVES 909 909 def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE 910 910 911 + # 912 + # We can end up creating gigantic folio. 913 + # 914 + config HAVE_GIGANTIC_FOLIOS 915 + def_bool (HUGETLB_PAGE && ARCH_HAS_GIGANTIC_PAGE) || \ 916 + (ZONE_DEVICE && HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) 917 + 911 918 # TODO: Allow to be enabled without THP 912 919 config ARCH_SUPPORTS_HUGE_PFNMAP 913 920 def_bool n
+4 -2
mm/huge_memory.c
··· 3522 3522 /* order-1 is not supported for anonymous THP. */ 3523 3523 VM_WARN_ONCE(warns && new_order == 1, 3524 3524 "Cannot split to order-1 folio"); 3525 - return new_order != 1; 3525 + if (new_order == 1) 3526 + return false; 3526 3527 } else if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3527 3528 !mapping_large_folio_support(folio->mapping)) { 3528 3529 /* ··· 3554 3553 if (folio_test_anon(folio)) { 3555 3554 VM_WARN_ONCE(warns && new_order == 1, 3556 3555 "Cannot split to order-1 folio"); 3557 - return new_order != 1; 3556 + if (new_order == 1) 3557 + return false; 3558 3558 } else if (new_order) { 3559 3559 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3560 3560 !mapping_large_folio_support(folio->mapping)) {
+13
mm/swap_state.c
··· 748 748 749 749 blk_start_plug(&plug); 750 750 for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) { 751 + struct swap_info_struct *si = NULL; 752 + 751 753 if (!pte++) { 752 754 pte = pte_offset_map(vmf->pmd, addr); 753 755 if (!pte) ··· 763 761 continue; 764 762 pte_unmap(pte); 765 763 pte = NULL; 764 + /* 765 + * Readahead entry may come from a device that we are not 766 + * holding a reference to, try to grab a reference, or skip. 767 + */ 768 + if (swp_type(entry) != swp_type(targ_entry)) { 769 + si = get_swap_device(entry); 770 + if (!si) 771 + continue; 772 + } 766 773 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, 767 774 &page_allocated, false); 775 + if (si) 776 + put_swap_device(si); 768 777 if (!folio) 769 778 continue; 770 779 if (page_allocated) {
+1 -1
tools/testing/selftests/user_events/perf_test.c
··· 236 236 ASSERT_EQ(1 << reg.enable_bit, self->check); 237 237 238 238 /* Ensure write shows up at correct offset */ 239 - ASSERT_NE(-1, write(self->data_fd, &reg.write_index, 239 + ASSERT_NE(-1, write(self->data_fd, (void *)&reg.write_index, 240 240 sizeof(reg.write_index))); 241 241 val = (void *)(((char *)perf_page) + perf_page->data_offset); 242 242 ASSERT_EQ(PERF_RECORD_SAMPLE, *val);