Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-stable-2025-12-11-11-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull more MM updates from Andrew Morton:

- "powerpc/pseries/cmm: two smaller fixes" (David Hildenbrand)
fixes a couple of minor things in ppc land

- "Improve folio split related functions" (Zi Yan)
some cleanups and minorish fixes in the folio splitting code

* tag 'mm-stable-2025-12-11-11-39' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm/damon/tests/core-kunit: avoid damos_test_commit stack warning
mm: vmscan: correct nr_requested tracing in scan_folios
MAINTAINERS: add idr core-api doc file to XARRAY
mm/hugetlb: fix incorrect error return from hugetlb_reserve_pages()
mm: fix CONFIG_STACK_GROWSUP typo in mm.h
mm/huge_memory: fix folio split stats counting
mm/huge_memory: make min_order_for_split() always return an order
mm/huge_memory: replace can_split_folio() with direct refcount calculation
mm/huge_memory: change folio_split_supported() to folio_check_splittable()
mm/sparse: fix sparse_vmemmap_init_nid_early definition without CONFIG_SPARSEMEM
powerpc/pseries/cmm: adjust BALLOON_MIGRATE when migrating pages
powerpc/pseries/cmm: call balloon_devinfo_init() also without CONFIG_BALLOON_COMPACTION

+132 -98
+1
MAINTAINERS
··· 28321 28321 L: linux-fsdevel@vger.kernel.org 28322 28322 L: linux-mm@kvack.org 28323 28323 S: Supported 28324 + F: Documentation/core-api/idr.rst 28324 28325 F: Documentation/core-api/xarray.rst 28325 28326 F: include/linux/idr.h 28326 28327 F: include/linux/xarray.h
+2 -1
arch/powerpc/platforms/pseries/cmm.c
··· 532 532 533 533 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 534 534 balloon_page_insert(b_dev_info, newpage); 535 + __count_vm_event(BALLOON_MIGRATE); 535 536 b_dev_info->isolated_pages--; 536 537 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 537 538 ··· 551 550 552 551 static void cmm_balloon_compaction_init(void) 553 552 { 554 - balloon_devinfo_init(&b_dev_info); 555 553 b_dev_info.migratepage = cmm_migratepage; 556 554 } 557 555 #else /* CONFIG_BALLOON_COMPACTION */ ··· 572 572 if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate) 573 573 return -EOPNOTSUPP; 574 574 575 + balloon_devinfo_init(&b_dev_info); 575 576 cmm_balloon_compaction_init(); 576 577 577 578 rc = register_oom_notifier(&cmm_oom_nb);
+6 -7
include/linux/huge_mm.h
··· 369 369 SPLIT_TYPE_NON_UNIFORM, 370 370 }; 371 371 372 - bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); 373 372 int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list, 374 373 unsigned int new_order); 375 374 int folio_split_unmapped(struct folio *folio, unsigned int new_order); 376 - int min_order_for_split(struct folio *folio); 375 + unsigned int min_order_for_split(struct folio *folio); 377 376 int split_folio_to_list(struct folio *folio, struct list_head *list); 378 - bool folio_split_supported(struct folio *folio, unsigned int new_order, 379 - enum split_type split_type, bool warns); 377 + int folio_check_splittable(struct folio *folio, unsigned int new_order, 378 + enum split_type split_type); 380 379 int folio_split(struct folio *folio, unsigned int new_order, struct page *page, 381 380 struct list_head *list); 382 381 ··· 406 407 static inline int try_folio_split_to_order(struct folio *folio, 407 408 struct page *page, unsigned int new_order) 408 409 { 409 - if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false)) 410 + if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM)) 410 411 return split_huge_page_to_order(&folio->page, new_order); 411 412 return folio_split(folio, new_order, page, NULL); 412 413 } ··· 630 631 return -EINVAL; 631 632 } 632 633 633 - static inline int min_order_for_split(struct folio *folio) 634 + static inline unsigned int min_order_for_split(struct folio *folio) 634 635 { 635 636 VM_WARN_ON_ONCE_FOLIO(1, folio); 636 - return -EINVAL; 637 + return 0; 637 638 } 638 639 639 640 static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+1 -1
include/linux/mm.h
··· 438 438 #define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE) 439 439 #define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE) 440 440 #define VM_STACK INIT_VM_FLAG(STACK) 441 - #ifdef CONFIG_STACK_GROWS_UP 441 + #ifdef CONFIG_STACK_GROWSUP 442 442 #define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY) 443 443 #else 444 444 #define VM_STACK_EARLY VM_NONE
+1 -1
include/linux/mmzone.h
··· 2289 2289 #else 2290 2290 #define sparse_init() do {} while (0) 2291 2291 #define sparse_index_init(_sec, _nid) do {} while (0) 2292 - #define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0) 2292 + #define sparse_vmemmap_init_nid_early(_nid) do {} while (0) 2293 2293 #define sparse_vmemmap_init_nid_late(_nid) do {} while (0) 2294 2294 #define pfn_in_present_section pfn_valid 2295 2295 #define subsection_map_init(_pfn, _nr_pages) do {} while (0)
+7 -2
mm/damon/tests/core-kunit.h
··· 924 924 } 925 925 } 926 926 927 - static void damos_test_commit(struct kunit *test) 927 + static void damos_test_commit_pageout(struct kunit *test) 928 928 { 929 929 damos_test_commit_for(test, 930 930 &(struct damos){ ··· 945 945 DAMOS_WMARK_FREE_MEM_RATE, 946 946 800, 50, 30}, 947 947 }); 948 + } 949 + 950 + static void damos_test_commit_migrate_hot(struct kunit *test) 951 + { 948 952 damos_test_commit_for(test, 949 953 &(struct damos){ 950 954 .pattern = (struct damos_access_pattern){ ··· 1234 1230 KUNIT_CASE(damos_test_commit_quota), 1235 1231 KUNIT_CASE(damos_test_commit_dests), 1236 1232 KUNIT_CASE(damos_test_commit_filter), 1237 - KUNIT_CASE(damos_test_commit), 1233 + KUNIT_CASE(damos_test_commit_pageout), 1234 + KUNIT_CASE(damos_test_commit_migrate_hot), 1238 1235 KUNIT_CASE(damon_test_commit_target_regions), 1239 1236 KUNIT_CASE(damos_test_filter_out), 1240 1237 KUNIT_CASE(damon_test_feed_loop_next_input),
+91 -76
mm/huge_memory.c
··· 3464 3464 } 3465 3465 } 3466 3466 3467 - /* Racy check whether the huge page can be split */ 3468 - bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) 3469 - { 3470 - int extra_pins; 3471 - 3472 - /* Additional pins from page cache */ 3473 - if (folio_test_anon(folio)) 3474 - extra_pins = folio_test_swapcache(folio) ? 3475 - folio_nr_pages(folio) : 0; 3476 - else 3477 - extra_pins = folio_nr_pages(folio); 3478 - if (pextra_pins) 3479 - *pextra_pins = extra_pins; 3480 - return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins - 3481 - caller_pins; 3482 - } 3483 - 3484 3467 static bool page_range_has_hwpoisoned(struct page *page, long nr_pages) 3485 3468 { 3486 3469 for (; nr_pages; page++, nr_pages--) ··· 3680 3697 return 0; 3681 3698 } 3682 3699 3683 - bool folio_split_supported(struct folio *folio, unsigned int new_order, 3684 - enum split_type split_type, bool warns) 3700 + /** 3701 + * folio_check_splittable() - check if a folio can be split to a given order 3702 + * @folio: folio to be split 3703 + * @new_order: the smallest order of the after split folios (since buddy 3704 + * allocator like split generates folios with orders from @folio's 3705 + * order - 1 to new_order). 3706 + * @split_type: uniform or non-uniform split 3707 + * 3708 + * folio_check_splittable() checks if @folio can be split to @new_order using 3709 + * @split_type method. The truncated folio check must come first. 3710 + * 3711 + * Context: folio must be locked. 3712 + * 3713 + * Return: 0 - @folio can be split to @new_order, otherwise an error number is 3714 + * returned. 3715 + */ 3716 + int folio_check_splittable(struct folio *folio, unsigned int new_order, 3717 + enum split_type split_type) 3685 3718 { 3719 + VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 3720 + /* 3721 + * Folios that just got truncated cannot get split. Signal to the 3722 + * caller that there was a race. 3723 + * 3724 + * TODO: this will also currently refuse folios without a mapping in the 3725 + * swapcache (shmem or to-be-anon folios). 3726 + */ 3727 + if (!folio->mapping && !folio_test_anon(folio)) 3728 + return -EBUSY; 3729 + 3686 3730 if (folio_test_anon(folio)) { 3687 3731 /* order-1 is not supported for anonymous THP. */ 3688 - VM_WARN_ONCE(warns && new_order == 1, 3689 - "Cannot split to order-1 folio"); 3690 3732 if (new_order == 1) 3691 - return false; 3733 + return -EINVAL; 3692 3734 } else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) { 3693 3735 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && 3694 3736 !mapping_large_folio_support(folio->mapping)) { ··· 3734 3726 * case, the mapping does not actually support large 3735 3727 * folios properly. 3736 3728 */ 3737 - VM_WARN_ONCE(warns, 3738 - "Cannot split file folio to non-0 order"); 3739 - return false; 3729 + return -EINVAL; 3740 3730 } 3741 3731 } 3742 3732 ··· 3747 3741 * here. 3748 3742 */ 3749 3743 if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) { 3750 - VM_WARN_ONCE(warns, 3751 - "Cannot split swapcache folio to non-0 order"); 3752 - return false; 3744 + return -EINVAL; 3753 3745 } 3754 3746 3755 - return true; 3747 + if (is_huge_zero_folio(folio)) 3748 + return -EINVAL; 3749 + 3750 + if (folio_test_writeback(folio)) 3751 + return -EBUSY; 3752 + 3753 + return 0; 3754 + } 3755 + 3756 + /* Number of folio references from the pagecache or the swapcache. */ 3757 + static unsigned int folio_cache_ref_count(const struct folio *folio) 3758 + { 3759 + if (folio_test_anon(folio) && !folio_test_swapcache(folio)) 3760 + return 0; 3761 + return folio_nr_pages(folio); 3756 3762 } 3757 3763 3758 3764 static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order, 3759 3765 struct page *split_at, struct xa_state *xas, 3760 3766 struct address_space *mapping, bool do_lru, 3761 3767 struct list_head *list, enum split_type split_type, 3762 - pgoff_t end, int *nr_shmem_dropped, int extra_pins) 3768 + pgoff_t end, int *nr_shmem_dropped) 3763 3769 { 3764 3770 struct folio *end_folio = folio_next(folio); 3765 3771 struct folio *new_folio, *next; ··· 3782 3764 VM_WARN_ON_ONCE(!mapping && end); 3783 3765 /* Prevent deferred_split_scan() touching ->_refcount */ 3784 3766 ds_queue = folio_split_queue_lock(folio); 3785 - if (folio_ref_freeze(folio, 1 + extra_pins)) { 3767 + if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) { 3786 3768 struct swap_cluster_info *ci = NULL; 3787 3769 struct lruvec *lruvec; 3788 - int expected_refs; 3789 3770 3790 3771 if (old_order > 1) { 3791 3772 if (!list_empty(&folio->_deferred_list)) { ··· 3852 3835 3853 3836 zone_device_private_split_cb(folio, new_folio); 3854 3837 3855 - expected_refs = folio_expected_ref_count(new_folio) + 1; 3856 - folio_ref_unfreeze(new_folio, expected_refs); 3838 + folio_ref_unfreeze(new_folio, 3839 + folio_cache_ref_count(new_folio) + 1); 3857 3840 3858 3841 if (do_lru) 3859 3842 lru_add_split_folio(folio, new_folio, lruvec, list); ··· 3896 3879 * Otherwise, a parallel folio_try_get() can grab @folio 3897 3880 * and its caller can see stale page cache entries. 3898 3881 */ 3899 - expected_refs = folio_expected_ref_count(folio) + 1; 3900 - folio_ref_unfreeze(folio, expected_refs); 3882 + folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1); 3901 3883 3902 3884 if (do_lru) 3903 3885 unlock_page_lruvec(lruvec); ··· 3945 3929 struct folio *new_folio, *next; 3946 3930 int nr_shmem_dropped = 0; 3947 3931 int remap_flags = 0; 3948 - int extra_pins, ret; 3932 + int ret; 3949 3933 pgoff_t end = 0; 3950 - bool is_hzp; 3951 3934 3952 3935 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); 3953 3936 VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio); 3954 3937 3955 - if (folio != page_folio(split_at) || folio != page_folio(lock_at)) 3956 - return -EINVAL; 3957 - 3958 - /* 3959 - * Folios that just got truncated cannot get split. Signal to the 3960 - * caller that there was a race. 3961 - * 3962 - * TODO: this will also currently refuse shmem folios that are in the 3963 - * swapcache. 3964 - */ 3965 - if (!is_anon && !folio->mapping) 3966 - return -EBUSY; 3967 - 3968 - if (new_order >= old_order) 3969 - return -EINVAL; 3970 - 3971 - if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true)) 3972 - return -EINVAL; 3973 - 3974 - is_hzp = is_huge_zero_folio(folio); 3975 - if (is_hzp) { 3976 - pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); 3977 - return -EBUSY; 3938 + if (folio != page_folio(split_at) || folio != page_folio(lock_at)) { 3939 + ret = -EINVAL; 3940 + goto out; 3978 3941 } 3979 3942 3980 - if (folio_test_writeback(folio)) 3981 - return -EBUSY; 3943 + if (new_order >= old_order) { 3944 + ret = -EINVAL; 3945 + goto out; 3946 + } 3947 + 3948 + ret = folio_check_splittable(folio, new_order, split_type); 3949 + if (ret) { 3950 + VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio"); 3951 + goto out; 3952 + } 3982 3953 3983 3954 if (is_anon) { 3984 3955 /* ··· 4030 4027 * Racy check if we can split the page, before unmap_folio() will 4031 4028 * split PMDs 4032 4029 */ 4033 - if (!can_split_folio(folio, 1, &extra_pins)) { 4030 + if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) { 4034 4031 ret = -EAGAIN; 4035 4032 goto out_unlock; 4036 4033 } ··· 4053 4050 } 4054 4051 4055 4052 ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping, 4056 - true, list, split_type, end, &nr_shmem_dropped, 4057 - extra_pins); 4053 + true, list, split_type, end, &nr_shmem_dropped); 4058 4054 fail: 4059 4055 if (mapping) 4060 4056 xas_unlock(&xas); ··· 4127 4125 */ 4128 4126 int folio_split_unmapped(struct folio *folio, unsigned int new_order) 4129 4127 { 4130 - int extra_pins, ret = 0; 4128 + int ret = 0; 4131 4129 4132 4130 VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio); 4133 4131 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); 4134 4132 VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio); 4135 4133 VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio); 4136 4134 4137 - if (!can_split_folio(folio, 1, &extra_pins)) 4135 + if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) 4138 4136 return -EAGAIN; 4139 4137 4140 4138 local_irq_disable(); 4141 4139 ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL, 4142 4140 NULL, false, NULL, SPLIT_TYPE_UNIFORM, 4143 - 0, NULL, extra_pins); 4141 + 0, NULL); 4144 4142 local_irq_enable(); 4145 4143 return ret; 4146 4144 } ··· 4232 4230 SPLIT_TYPE_NON_UNIFORM); 4233 4231 } 4234 4232 4235 - int min_order_for_split(struct folio *folio) 4233 + /** 4234 + * min_order_for_split() - get the minimum order @folio can be split to 4235 + * @folio: folio to split 4236 + * 4237 + * min_order_for_split() tells the minimum order @folio can be split to. 4238 + * If a file-backed folio is truncated, 0 will be returned. Any subsequent 4239 + * split attempt should get -EBUSY from split checking code. 4240 + * 4241 + * Return: @folio's minimum order for split 4242 + */ 4243 + unsigned int min_order_for_split(struct folio *folio) 4236 4244 { 4237 4245 if (folio_test_anon(folio)) 4238 4246 return 0; 4239 4247 4240 - if (!folio->mapping) { 4241 - if (folio_test_pmd_mappable(folio)) 4242 - count_vm_event(THP_SPLIT_PAGE_FAILED); 4243 - return -EBUSY; 4244 - } 4248 + /* 4249 + * If the folio got truncated, we don't know the previous mapping and 4250 + * consequently the old min order. But it doesn't matter, as any split 4251 + * attempt will immediately fail with -EBUSY as the folio cannot get 4252 + * split until freed. 4253 + */ 4254 + if (!folio->mapping) 4255 + return 0; 4245 4256 4246 4257 return mapping_min_folio_order(folio->mapping); 4247 4258 } ··· 4646 4631 * can be split or not. So skip the check here. 4647 4632 */ 4648 4633 if (!folio_test_private(folio) && 4649 - !can_split_folio(folio, 0, NULL)) 4634 + folio_expected_ref_count(folio) != folio_ref_count(folio)) 4650 4635 goto next; 4651 4636 4652 4637 if (!folio_trylock(folio))
+18 -7
mm/hugetlb.c
··· 6579 6579 struct resv_map *resv_map; 6580 6580 struct hugetlb_cgroup *h_cg = NULL; 6581 6581 long gbl_reserve, regions_needed = 0; 6582 + int err; 6582 6583 6583 6584 /* This should never happen */ 6584 6585 if (from > to) { ··· 6613 6612 } else { 6614 6613 /* Private mapping. */ 6615 6614 resv_map = resv_map_alloc(); 6616 - if (!resv_map) 6615 + if (!resv_map) { 6616 + err = -ENOMEM; 6617 6617 goto out_err; 6618 + } 6618 6619 6619 6620 chg = to - from; 6620 6621 ··· 6624 6621 set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER); 6625 6622 } 6626 6623 6627 - if (chg < 0) 6624 + if (chg < 0) { 6625 + /* region_chg() above can return -ENOMEM */ 6626 + err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL; 6628 6627 goto out_err; 6628 + } 6629 6629 6630 - if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6631 - chg * pages_per_huge_page(h), &h_cg) < 0) 6630 + err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h), 6631 + chg * pages_per_huge_page(h), &h_cg); 6632 + if (err < 0) 6632 6633 goto out_err; 6633 6634 6634 6635 if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) { ··· 6648 6641 * reservations already in place (gbl_reserve). 6649 6642 */ 6650 6643 gbl_reserve = hugepage_subpool_get_pages(spool, chg); 6651 - if (gbl_reserve < 0) 6644 + if (gbl_reserve < 0) { 6645 + err = gbl_reserve; 6652 6646 goto out_uncharge_cgroup; 6647 + } 6653 6648 6654 6649 /* 6655 6650 * Check enough hugepages are available for the reservation. 6656 6651 * Hand the pages back to the subpool if there are not 6657 6652 */ 6658 - if (hugetlb_acct_memory(h, gbl_reserve) < 0) 6653 + err = hugetlb_acct_memory(h, gbl_reserve); 6654 + if (err < 0) 6659 6655 goto out_put_pages; 6660 6656 6661 6657 /* ··· 6677 6667 6678 6668 if (unlikely(add < 0)) { 6679 6669 hugetlb_acct_memory(h, -gbl_reserve); 6670 + err = add; 6680 6671 goto out_put_pages; 6681 6672 } else if (unlikely(chg > add)) { 6682 6673 /* ··· 6737 6726 kref_put(&resv_map->refs, resv_map_release); 6738 6727 set_vma_desc_resv_map(desc, NULL); 6739 6728 } 6740 - return chg < 0 ? chg : add < 0 ? add : -EINVAL; 6729 + return err; 6741 6730 } 6742 6731 6743 6732 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
+5 -3
mm/vmscan.c
··· 1284 1284 goto keep_locked; 1285 1285 if (folio_test_large(folio)) { 1286 1286 /* cannot split folio, skip it */ 1287 - if (!can_split_folio(folio, 1, NULL)) 1287 + if (folio_expected_ref_count(folio) != 1288 + folio_ref_count(folio) - 1) 1288 1289 goto activate_locked; 1289 1290 /* 1290 1291 * Split partially mapped folios right away. ··· 4541 4540 int scanned = 0; 4542 4541 int isolated = 0; 4543 4542 int skipped = 0; 4544 - int remaining = min(nr_to_scan, MAX_LRU_BATCH); 4543 + int scan_batch = min(nr_to_scan, MAX_LRU_BATCH); 4544 + int remaining = scan_batch; 4545 4545 struct lru_gen_folio *lrugen = &lruvec->lrugen; 4546 4546 struct mem_cgroup *memcg = lruvec_memcg(lruvec); 4547 4547 ··· 4602 4600 count_memcg_events(memcg, item, isolated); 4603 4601 count_memcg_events(memcg, PGREFILL, sorted); 4604 4602 __count_vm_events(PGSCAN_ANON + type, isolated); 4605 - trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, 4603 + trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch, 4606 4604 scanned, skipped, isolated, 4607 4605 type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); 4608 4606 if (type == LRU_GEN_FILE)