Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2026-02-13-07-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull MM fixes from Andrew Morton:
"Three MM hotfixes, all three are cc:stable"

* tag 'mm-hotfixes-stable-2026-02-13-07-14' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
procfs: fix possible double mmput() in do_procmap_query()
mm/page_alloc: skip debug_check_no_{obj,locks}_freed with FPI_TRYLOCK
mm/hugetlb: restore failed global reservations to subpool

+22 -7
+2 -1
fs/proc/task_mmu.c
··· 780 780 } else { 781 781 if (karg.build_id_size < build_id_sz) { 782 782 err = -ENAMETOOLONG; 783 - goto out; 783 + goto out_file; 784 784 } 785 785 karg.build_id_size = build_id_sz; 786 786 } ··· 808 808 out: 809 809 query_vma_teardown(&lock_ctx); 810 810 mmput(mm); 811 + out_file: 811 812 if (vm_file) 812 813 fput(vm_file); 813 814 kfree(name_buf);
+9
mm/hugetlb.c
··· 6723 6723 */ 6724 6724 hugetlb_acct_memory(h, -gbl_resv); 6725 6725 } 6726 + /* Restore used_hpages for pages that failed global reservation */ 6727 + if (gbl_reserve && spool) { 6728 + unsigned long flags; 6729 + 6730 + spin_lock_irqsave(&spool->lock, flags); 6731 + if (spool->max_hpages != -1) 6732 + spool->used_hpages -= gbl_reserve; 6733 + unlock_or_release_subpool(spool, flags); 6734 + } 6726 6735 out_uncharge_cgroup: 6727 6736 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h), 6728 6737 chg * pages_per_huge_page(h), h_cg);
+11 -6
mm/page_alloc.c
··· 1339 1339 1340 1340 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 1341 1341 1342 - __always_inline bool free_pages_prepare(struct page *page, 1343 - unsigned int order) 1342 + __always_inline bool __free_pages_prepare(struct page *page, 1343 + unsigned int order, fpi_t fpi_flags) 1344 1344 { 1345 1345 int bad = 0; 1346 1346 bool skip_kasan_poison = should_skip_kasan_poison(page); ··· 1433 1433 page_table_check_free(page, order); 1434 1434 pgalloc_tag_sub(page, 1 << order); 1435 1435 1436 - if (!PageHighMem(page)) { 1436 + if (!PageHighMem(page) && !(fpi_flags & FPI_TRYLOCK)) { 1437 1437 debug_check_no_locks_freed(page_address(page), 1438 1438 PAGE_SIZE << order); 1439 1439 debug_check_no_obj_freed(page_address(page), ··· 1470 1470 debug_pagealloc_unmap_pages(page, 1 << order); 1471 1471 1472 1472 return true; 1473 + } 1474 + 1475 + bool free_pages_prepare(struct page *page, unsigned int order) 1476 + { 1477 + return __free_pages_prepare(page, order, FPI_NONE); 1473 1478 } 1474 1479 1475 1480 /* ··· 1610 1605 unsigned long pfn = page_to_pfn(page); 1611 1606 struct zone *zone = page_zone(page); 1612 1607 1613 - if (free_pages_prepare(page, order)) 1608 + if (__free_pages_prepare(page, order, fpi_flags)) 1614 1609 free_one_page(zone, page, pfn, order, fpi_flags); 1615 1610 } 1616 1611 ··· 2974 2969 return; 2975 2970 } 2976 2971 2977 - if (!free_pages_prepare(page, order)) 2972 + if (!__free_pages_prepare(page, order, fpi_flags)) 2978 2973 return; 2979 2974 2980 2975 /* ··· 3036 3031 unsigned long pfn = folio_pfn(folio); 3037 3032 unsigned int order = folio_order(folio); 3038 3033 3039 - if (!free_pages_prepare(&folio->page, order)) 3034 + if (!__free_pages_prepare(&folio->page, order, FPI_NONE)) 3040 3035 continue; 3041 3036 /* 3042 3037 * Free orders not handled on the PCP directly to the