Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2025-01-04-18-02' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
"25 hotfixes. 16 are cc:stable. 18 are MM and 7 are non-MM.

The usual bunch of singletons and two doubletons - please see the
relevant changelogs for details"

* tag 'mm-hotfixes-stable-2025-01-04-18-02' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (25 commits)
MAINTAINERS: change Arınç _NAL's name and email address
scripts/sorttable: fix orc_sort_cmp() to maintain symmetry and transitivity
mm/util: make memdup_user_nul() similar to memdup_user()
mm, madvise: fix potential workingset node list_lru leaks
mm/damon/core: fix ignored quota goals and filters of newly committed schemes
mm/damon/core: fix new damon_target objects leaks on damon_commit_targets()
mm/list_lru: fix false warning of negative counter
vmstat: disable vmstat_work on vmstat_cpu_down_prep()
mm: shmem: fix the update of 'shmem_falloc->nr_unswapped'
mm: shmem: fix incorrect index alignment for within_size policy
percpu: remove intermediate variable in PERCPU_PTR()
mm: zswap: fix race between [de]compression and CPU hotunplug
ocfs2: fix slab-use-after-free due to dangling pointer dqi_priv
fs/proc/task_mmu: fix pagemap flags with PMD THP entries on 32bit
kcov: mark in_softirq_really() as __always_inline
docs: mm: fix the incorrect 'FileHugeMapped' field
mailmap: modify the entry for Mathieu Othacehe
mm/kmemleak: fix sleeping function called from invalid context at print message
mm: hugetlb: independent PMD page table shared count
maple_tree: reload mas before the second call for mas_empty_area
...

+211 -68
+1 -1
.mailmap
··· 435 435 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@puri.sm> 436 436 Martin Kepplinger <martink@posteo.de> <martin.kepplinger@theobroma-systems.com> 437 437 Martyna Szapar-Mudlaw <martyna.szapar-mudlaw@linux.intel.com> <martyna.szapar-mudlaw@intel.com> 438 - Mathieu Othacehe <m.othacehe@gmail.com> <othacehe@gnu.org> 438 + Mathieu Othacehe <othacehe@gnu.org> <m.othacehe@gmail.com> 439 439 Mat Martineau <martineau@kernel.org> <mathew.j.martineau@linux.intel.com> 440 440 Mat Martineau <martineau@kernel.org> <mathewm@codeaurora.org> 441 441 Matthew Wilcox <willy@infradead.org> <matthew.r.wilcox@intel.com>
+1 -1
Documentation/admin-guide/mm/transhuge.rst
··· 436 436 The number of file transparent huge pages mapped to userspace is available 437 437 by reading ShmemPmdMapped and ShmemHugePages fields in ``/proc/meminfo``. 438 438 To identify what applications are mapping file transparent huge pages, it 439 - is necessary to read ``/proc/PID/smaps`` and count the FileHugeMapped fields 439 + is necessary to read ``/proc/PID/smaps`` and count the FilePmdMapped fields 440 440 for each mapping. 441 441 442 442 Note that reading the smaps file is expensive and reading it
+3 -3
MAINTAINERS
··· 14756 14756 F: include/soc/mediatek/smi.h 14757 14757 14758 14758 MEDIATEK SWITCH DRIVER 14759 - M: Arınç ÜNAL <arinc.unal@arinc9.com> 14759 + M: Chester A. Unal <chester.a.unal@arinc9.com> 14760 14760 M: Daniel Golle <daniel@makrotopia.org> 14761 14761 M: DENG Qingfang <dqfext@gmail.com> 14762 14762 M: Sean Wang <sean.wang@mediatek.com> ··· 18460 18460 F: drivers/pinctrl/mediatek/ 18461 18461 18462 18462 PIN CONTROLLER - MEDIATEK MIPS 18463 - M: Arınç ÜNAL <arinc.unal@arinc9.com> 18463 + M: Chester A. Unal <chester.a.unal@arinc9.com> 18464 18464 M: Sergio Paracuellos <sergio.paracuellos@gmail.com> 18465 18465 L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) 18466 18466 L: linux-mips@vger.kernel.org ··· 19504 19504 F: arch/mips/ralink 19505 19505 19506 19506 RALINK MT7621 MIPS ARCHITECTURE 19507 - M: Arınç ÜNAL <arinc.unal@arinc9.com> 19507 + M: Chester A. Unal <chester.a.unal@arinc9.com> 19508 19508 M: Sergio Paracuellos <sergio.paracuellos@gmail.com> 19509 19509 L: linux-mips@vger.kernel.org 19510 19510 S: Maintained
+1 -1
fs/ocfs2/quota_global.c
··· 893 893 int status = 0; 894 894 895 895 trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type); 896 - if (!sb_has_quota_loaded(sb, type)) { 896 + if (!sb_has_quota_active(sb, type)) { 897 897 status = -ESRCH; 898 898 goto out; 899 899 }
+1
fs/ocfs2/quota_local.c
··· 867 867 brelse(oinfo->dqi_libh); 868 868 brelse(oinfo->dqi_lqi_bh); 869 869 kfree(oinfo); 870 + info->dqi_priv = NULL; 870 871 return status; 871 872 } 872 873
+1 -1
fs/proc/task_mmu.c
··· 1810 1810 } 1811 1811 1812 1812 for (; addr != end; addr += PAGE_SIZE, idx++) { 1813 - unsigned long cur_flags = flags; 1813 + u64 cur_flags = flags; 1814 1814 pagemap_entry_t pme; 1815 1815 1816 1816 if (folio && (flags & PM_PRESENT) &&
+14
include/linux/memfd.h
··· 7 7 #ifdef CONFIG_MEMFD_CREATE 8 8 extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); 9 9 struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); 10 + unsigned int *memfd_file_seals_ptr(struct file *file); 10 11 #else 11 12 static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) 12 13 { ··· 17 16 { 18 17 return ERR_PTR(-EINVAL); 19 18 } 19 + 20 + static inline unsigned int *memfd_file_seals_ptr(struct file *file) 21 + { 22 + return NULL; 23 + } 20 24 #endif 25 + 26 + /* Retrieve memfd seals associated with the file, if any. */ 27 + static inline unsigned int memfd_file_seals(struct file *file) 28 + { 29 + unsigned int *sealsp = memfd_file_seals_ptr(file); 30 + 31 + return sealsp ? *sealsp : 0; 32 + } 21 33 22 34 #endif /* __LINUX_MEMFD_H */
+40 -17
include/linux/mm.h
··· 3125 3125 if (!pmd_ptlock_init(ptdesc)) 3126 3126 return false; 3127 3127 __folio_set_pgtable(folio); 3128 + ptdesc_pmd_pts_init(ptdesc); 3128 3129 lruvec_stat_add_folio(folio, NR_PAGETABLE); 3129 3130 return true; 3130 3131 } ··· 4102 4101 static inline void mem_dump_obj(void *object) {} 4103 4102 #endif 4104 4103 4104 + static inline bool is_write_sealed(int seals) 4105 + { 4106 + return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); 4107 + } 4108 + 4109 + /** 4110 + * is_readonly_sealed - Checks whether write-sealed but mapped read-only, 4111 + * in which case writes should be disallowing moving 4112 + * forwards. 4113 + * @seals: the seals to check 4114 + * @vm_flags: the VMA flags to check 4115 + * 4116 + * Returns whether readonly sealed, in which case writess should be disallowed 4117 + * going forward. 4118 + */ 4119 + static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags) 4120 + { 4121 + /* 4122 + * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as 4123 + * MAP_SHARED and read-only, take care to not allow mprotect to 4124 + * revert protections on such mappings. Do this only for shared 4125 + * mappings. For private mappings, don't need to mask 4126 + * VM_MAYWRITE as we still want them to be COW-writable. 4127 + */ 4128 + if (is_write_sealed(seals) && 4129 + ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED)) 4130 + return true; 4131 + 4132 + return false; 4133 + } 4134 + 4105 4135 /** 4106 4136 * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and 4107 4137 * handle them. ··· 4144 4112 */ 4145 4113 static inline int seal_check_write(int seals, struct vm_area_struct *vma) 4146 4114 { 4147 - if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 4148 - /* 4149 - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 4150 - * write seals are active. 4151 - */ 4152 - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) 4153 - return -EPERM; 4115 + if (!is_write_sealed(seals)) 4116 + return 0; 4154 4117 4155 - /* 4156 - * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as 4157 - * MAP_SHARED and read-only, take care to not allow mprotect to 4158 - * revert protections on such mappings. Do this only for shared 4159 - * mappings. For private mappings, don't need to mask 4160 - * VM_MAYWRITE as we still want them to be COW-writable. 4161 - */ 4162 - if (vma->vm_flags & VM_SHARED) 4163 - vm_flags_clear(vma, VM_MAYWRITE); 4164 - } 4118 + /* 4119 + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when 4120 + * write seals are active. 4121 + */ 4122 + if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) 4123 + return -EPERM; 4165 4124 4166 4125 return 0; 4167 4126 }
+30
include/linux/mm_types.h
··· 445 445 * @pt_index: Used for s390 gmap. 446 446 * @pt_mm: Used for x86 pgds. 447 447 * @pt_frag_refcount: For fragmented page table tracking. Powerpc only. 448 + * @pt_share_count: Used for HugeTLB PMD page table share count. 448 449 * @_pt_pad_2: Padding to ensure proper alignment. 449 450 * @ptl: Lock for the page table. 450 451 * @__page_type: Same as page->page_type. Unused for page tables. ··· 472 471 pgoff_t pt_index; 473 472 struct mm_struct *pt_mm; 474 473 atomic_t pt_frag_refcount; 474 + #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 475 + atomic_t pt_share_count; 476 + #endif 475 477 }; 476 478 477 479 union { ··· 519 515 #define page_ptdesc(p) (_Generic((p), \ 520 516 const struct page *: (const struct ptdesc *)(p), \ 521 517 struct page *: (struct ptdesc *)(p))) 518 + 519 + #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING 520 + static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) 521 + { 522 + atomic_set(&ptdesc->pt_share_count, 0); 523 + } 524 + 525 + static inline void ptdesc_pmd_pts_inc(struct ptdesc *ptdesc) 526 + { 527 + atomic_inc(&ptdesc->pt_share_count); 528 + } 529 + 530 + static inline void ptdesc_pmd_pts_dec(struct ptdesc *ptdesc) 531 + { 532 + atomic_dec(&ptdesc->pt_share_count); 533 + } 534 + 535 + static inline int ptdesc_pmd_pts_count(struct ptdesc *ptdesc) 536 + { 537 + return atomic_read(&ptdesc->pt_share_count); 538 + } 539 + #else 540 + static inline void ptdesc_pmd_pts_init(struct ptdesc *ptdesc) 541 + { 542 + } 543 + #endif 522 544 523 545 /* 524 546 * Used for sizing the vmemmap region on some architectures
+1 -4
include/linux/percpu-defs.h
··· 221 221 } while (0) 222 222 223 223 #define PERCPU_PTR(__p) \ 224 - ({ \ 225 - unsigned long __pcpu_ptr = (__force unsigned long)(__p); \ 226 - (typeof(*(__p)) __force __kernel *)(__pcpu_ptr); \ 227 - }) 224 + (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p)) 228 225 229 226 #ifdef CONFIG_SMP 230 227
+1 -1
kernel/kcov.c
··· 166 166 * Unlike in_serving_softirq(), this function returns false when called during 167 167 * a hardirq or an NMI that happened in the softirq context. 168 168 */ 169 - static inline bool in_softirq_really(void) 169 + static __always_inline bool in_softirq_really(void) 170 170 { 171 171 return in_serving_softirq() && !in_hardirq() && !in_nmi(); 172 172 }
+1
lib/maple_tree.c
··· 4354 4354 ret = 1; 4355 4355 } 4356 4356 if (ret < 0 && range_lo > min) { 4357 + mas_reset(mas); 4357 4358 ret = mas_empty_area(mas, min, range_hi, 1); 4358 4359 if (ret == 0) 4359 4360 ret = 1;
+9 -1
mm/damon/core.c
··· 868 868 NUMA_NO_NODE); 869 869 if (!new_scheme) 870 870 return -ENOMEM; 871 + err = damos_commit(new_scheme, src_scheme); 872 + if (err) { 873 + damon_destroy_scheme(new_scheme); 874 + return err; 875 + } 871 876 damon_add_scheme(dst, new_scheme); 872 877 } 873 878 return 0; ··· 966 961 return -ENOMEM; 967 962 err = damon_commit_target(new_target, false, 968 963 src_target, damon_target_has_pid(src)); 969 - if (err) 964 + if (err) { 965 + damon_destroy_target(new_target); 970 966 return err; 967 + } 968 + damon_add_target(dst, new_target); 971 969 } 972 970 return 0; 973 971 }
-9
mm/filemap.c
··· 124 124 * ->private_lock (zap_pte_range->block_dirty_folio) 125 125 */ 126 126 127 - static void mapping_set_update(struct xa_state *xas, 128 - struct address_space *mapping) 129 - { 130 - if (dax_mapping(mapping) || shmem_mapping(mapping)) 131 - return; 132 - xas_set_update(xas, workingset_update_node); 133 - xas_set_lru(xas, &shadow_nodes); 134 - } 135 - 136 127 static void page_cache_delete(struct address_space *mapping, 137 128 struct folio *folio, void *shadow) 138 129 {
+7 -9
mm/hugetlb.c
··· 7211 7211 spte = hugetlb_walk(svma, saddr, 7212 7212 vma_mmu_pagesize(svma)); 7213 7213 if (spte) { 7214 - get_page(virt_to_page(spte)); 7214 + ptdesc_pmd_pts_inc(virt_to_ptdesc(spte)); 7215 7215 break; 7216 7216 } 7217 7217 } ··· 7226 7226 (pmd_t *)((unsigned long)spte & PAGE_MASK)); 7227 7227 mm_inc_nr_pmds(mm); 7228 7228 } else { 7229 - put_page(virt_to_page(spte)); 7229 + ptdesc_pmd_pts_dec(virt_to_ptdesc(spte)); 7230 7230 } 7231 7231 spin_unlock(&mm->page_table_lock); 7232 7232 out: ··· 7238 7238 /* 7239 7239 * unmap huge page backed by shared pte. 7240 7240 * 7241 - * Hugetlb pte page is ref counted at the time of mapping. If pte is shared 7242 - * indicated by page_count > 1, unmap is achieved by clearing pud and 7243 - * decrementing the ref count. If count == 1, the pte page is not shared. 7244 - * 7245 7241 * Called with page table lock held. 7246 7242 * 7247 7243 * returns: 1 successfully unmapped a shared pte page ··· 7246 7250 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, 7247 7251 unsigned long addr, pte_t *ptep) 7248 7252 { 7253 + unsigned long sz = huge_page_size(hstate_vma(vma)); 7249 7254 pgd_t *pgd = pgd_offset(mm, addr); 7250 7255 p4d_t *p4d = p4d_offset(pgd, addr); 7251 7256 pud_t *pud = pud_offset(p4d, addr); 7252 7257 7253 7258 i_mmap_assert_write_locked(vma->vm_file->f_mapping); 7254 7259 hugetlb_vma_assert_locked(vma); 7255 - BUG_ON(page_count(virt_to_page(ptep)) == 0); 7256 - if (page_count(virt_to_page(ptep)) == 1) 7260 + if (sz != PMD_SIZE) 7261 + return 0; 7262 + if (!ptdesc_pmd_pts_count(virt_to_ptdesc(ptep))) 7257 7263 return 0; 7258 7264 7259 7265 pud_clear(pud); 7260 - put_page(virt_to_page(ptep)); 7266 + ptdesc_pmd_pts_dec(virt_to_ptdesc(ptep)); 7261 7267 mm_dec_nr_pmds(mm); 7262 7268 return 1; 7263 7269 }
+6
mm/internal.h
··· 1504 1504 /* Only track the nodes of mappings with shadow entries */ 1505 1505 void workingset_update_node(struct xa_node *node); 1506 1506 extern struct list_lru shadow_nodes; 1507 + #define mapping_set_update(xas, mapping) do { \ 1508 + if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \ 1509 + xas_set_update(xas, workingset_update_node); \ 1510 + xas_set_lru(xas, &shadow_nodes); \ 1511 + } \ 1512 + } while (0) 1507 1513 1508 1514 /* mremap.c */ 1509 1515 unsigned long move_page_tables(struct vm_area_struct *vma,
+3
mm/khugepaged.c
··· 19 19 #include <linux/rcupdate_wait.h> 20 20 #include <linux/swapops.h> 21 21 #include <linux/shmem_fs.h> 22 + #include <linux/dax.h> 22 23 #include <linux/ksm.h> 23 24 24 25 #include <asm/tlb.h> ··· 1837 1836 result = alloc_charge_folio(&new_folio, mm, cc); 1838 1837 if (result != SCAN_SUCCEED) 1839 1838 goto out; 1839 + 1840 + mapping_set_update(&xas, mapping); 1840 1841 1841 1842 __folio_set_locked(new_folio); 1842 1843 if (is_shmem)
+1 -1
mm/kmemleak.c
··· 373 373 374 374 for (i = 0; i < nr_entries; i++) { 375 375 void *ptr = (void *)entries[i]; 376 - warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr); 376 + warn_or_seq_printf(seq, " %pS\n", ptr); 377 377 } 378 378 } 379 379
+1 -1
mm/list_lru.c
··· 77 77 spin_lock(&l->lock); 78 78 nr_items = READ_ONCE(l->nr_items); 79 79 if (likely(nr_items != LONG_MIN)) { 80 - WARN_ON(nr_items < 0); 81 80 rcu_read_unlock(); 82 81 return l; 83 82 } ··· 449 450 450 451 list_splice_init(&src->list, &dst->list); 451 452 if (src->nr_items) { 453 + WARN_ON(src->nr_items < 0); 452 454 dst->nr_items += src->nr_items; 453 455 set_shrinker_bit(dst_memcg, nid, lru_shrinker_id(lru)); 454 456 }
+1 -1
mm/memfd.c
··· 170 170 return error; 171 171 } 172 172 173 - static unsigned int *memfd_file_seals_ptr(struct file *file) 173 + unsigned int *memfd_file_seals_ptr(struct file *file) 174 174 { 175 175 if (shmem_file(file)) 176 176 return &SHMEM_I(file_inode(file))->seals;
+5 -1
mm/mmap.c
··· 47 47 #include <linux/oom.h> 48 48 #include <linux/sched/mm.h> 49 49 #include <linux/ksm.h> 50 + #include <linux/memfd.h> 50 51 51 52 #include <linux/uaccess.h> 52 53 #include <asm/cacheflush.h> ··· 369 368 370 369 if (file) { 371 370 struct inode *inode = file_inode(file); 371 + unsigned int seals = memfd_file_seals(file); 372 372 unsigned long flags_mask; 373 373 374 374 if (!file_mmap_ok(file, inode, pgoff, len)) ··· 410 408 vm_flags |= VM_SHARED | VM_MAYSHARE; 411 409 if (!(file->f_mode & FMODE_WRITE)) 412 410 vm_flags &= ~(VM_MAYWRITE | VM_SHARED); 411 + else if (is_readonly_sealed(seals, vm_flags)) 412 + vm_flags &= ~VM_MAYWRITE; 413 413 fallthrough; 414 414 case MAP_PRIVATE: 415 415 if (!(file->f_mode & FMODE_READ)) ··· 892 888 893 889 if (get_area) { 894 890 addr = get_area(file, addr, len, pgoff, flags); 895 - } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) 891 + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && !file 896 892 && !addr /* no hint */ 897 893 && IS_ALIGNED(len, PMD_SIZE)) { 898 894 /* Ensures that larger anonymous mappings are THP aligned. */
+5 -1
mm/readahead.c
··· 646 646 1UL << order); 647 647 if (index == expected) { 648 648 ra->start += ra->size; 649 - ra->size = get_next_ra_size(ra, max_pages); 649 + /* 650 + * In the case of MADV_HUGEPAGE, the actual size might exceed 651 + * the readahead window. 652 + */ 653 + ra->size = max(ra->size, get_next_ra_size(ra, max_pages)); 650 654 ra->async_size = ra->size; 651 655 goto readit; 652 656 }
+4 -3
mm/shmem.c
··· 1535 1535 !shmem_falloc->waitq && 1536 1536 index >= shmem_falloc->start && 1537 1537 index < shmem_falloc->next) 1538 - shmem_falloc->nr_unswapped++; 1538 + shmem_falloc->nr_unswapped += nr_pages; 1539 1539 else 1540 1540 shmem_falloc = NULL; 1541 1541 spin_unlock(&inode->i_lock); ··· 1689 1689 unsigned long mask = READ_ONCE(huge_shmem_orders_always); 1690 1690 unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); 1691 1691 unsigned long vm_flags = vma ? vma->vm_flags : 0; 1692 + pgoff_t aligned_index; 1692 1693 bool global_huge; 1693 1694 loff_t i_size; 1694 1695 int order; ··· 1724 1723 /* Allow mTHP that will be fully within i_size. */ 1725 1724 order = highest_order(within_size_orders); 1726 1725 while (within_size_orders) { 1727 - index = round_up(index + 1, order); 1726 + aligned_index = round_up(index + 1, 1 << order); 1728 1727 i_size = round_up(i_size_read(inode), PAGE_SIZE); 1729 - if (i_size >> PAGE_SHIFT >= index) { 1728 + if (i_size >> PAGE_SHIFT >= aligned_index) { 1730 1729 mask |= within_size_orders; 1731 1730 break; 1732 1731 }
+1 -6
mm/util.c
··· 297 297 { 298 298 char *p; 299 299 300 - /* 301 - * Always use GFP_KERNEL, since copy_from_user() can sleep and 302 - * cause pagefault, which makes it pointless to use GFP_NOFS 303 - * or GFP_ATOMIC. 304 - */ 305 - p = kmalloc_track_caller(len + 1, GFP_KERNEL); 300 + p = kmem_buckets_alloc_track_caller(user_buckets, len + 1, GFP_USER | __GFP_NOWARN); 306 301 if (!p) 307 302 return ERR_PTR(-ENOMEM); 308 303
+8 -1
mm/vmscan.c
··· 374 374 if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) 375 375 nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + 376 376 zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); 377 - 377 + /* 378 + * If there are no reclaimable file-backed or anonymous pages, 379 + * ensure zones with sufficient free pages are not skipped. 380 + * This prevents zones like DMA32 from being ignored in reclaim 381 + * scenarios where they can still help alleviate memory pressure. 382 + */ 383 + if (nr == 0) 384 + nr = zone_page_state_snapshot(zone, NR_FREE_PAGES); 378 385 return nr; 379 386 } 380 387
+2 -1
mm/vmstat.c
··· 2148 2148 if (!node_state(cpu_to_node(cpu), N_CPU)) { 2149 2149 node_set_state(cpu_to_node(cpu), N_CPU); 2150 2150 } 2151 + enable_delayed_work(&per_cpu(vmstat_work, cpu)); 2151 2152 2152 2153 return 0; 2153 2154 } 2154 2155 2155 2156 static int vmstat_cpu_down_prep(unsigned int cpu) 2156 2157 { 2157 - cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); 2158 + disable_delayed_work_sync(&per_cpu(vmstat_work, cpu)); 2158 2159 return 0; 2159 2160 } 2160 2161
+16 -3
mm/zswap.c
··· 880 880 return 0; 881 881 } 882 882 883 + /* Prevent CPU hotplug from freeing up the per-CPU acomp_ctx resources */ 884 + static struct crypto_acomp_ctx *acomp_ctx_get_cpu(struct crypto_acomp_ctx __percpu *acomp_ctx) 885 + { 886 + cpus_read_lock(); 887 + return raw_cpu_ptr(acomp_ctx); 888 + } 889 + 890 + static void acomp_ctx_put_cpu(void) 891 + { 892 + cpus_read_unlock(); 893 + } 894 + 883 895 static bool zswap_compress(struct page *page, struct zswap_entry *entry, 884 896 struct zswap_pool *pool) 885 897 { ··· 905 893 gfp_t gfp; 906 894 u8 *dst; 907 895 908 - acomp_ctx = raw_cpu_ptr(pool->acomp_ctx); 909 - 896 + acomp_ctx = acomp_ctx_get_cpu(pool->acomp_ctx); 910 897 mutex_lock(&acomp_ctx->mutex); 911 898 912 899 dst = acomp_ctx->buffer; ··· 961 950 zswap_reject_alloc_fail++; 962 951 963 952 mutex_unlock(&acomp_ctx->mutex); 953 + acomp_ctx_put_cpu(); 964 954 return comp_ret == 0 && alloc_ret == 0; 965 955 } 966 956 ··· 972 960 struct crypto_acomp_ctx *acomp_ctx; 973 961 u8 *src; 974 962 975 - acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); 963 + acomp_ctx = acomp_ctx_get_cpu(entry->pool->acomp_ctx); 976 964 mutex_lock(&acomp_ctx->mutex); 977 965 978 966 src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); ··· 1002 990 1003 991 if (src != acomp_ctx->buffer) 1004 992 zpool_unmap_handle(zpool, entry->handle); 993 + acomp_ctx_put_cpu(); 1005 994 } 1006 995 1007 996 /*********************************
+4 -1
scripts/sorttable.h
··· 110 110 111 111 static int orc_sort_cmp(const void *_a, const void *_b) 112 112 { 113 - struct orc_entry *orc_a; 113 + struct orc_entry *orc_a, *orc_b; 114 114 const int *a = g_orc_ip_table + *(int *)_a; 115 115 const int *b = g_orc_ip_table + *(int *)_b; 116 116 unsigned long a_val = orc_ip(a); ··· 128 128 * whitelisted .o files which didn't get objtool generation. 129 129 */ 130 130 orc_a = g_orc_table + (a - g_orc_ip_table); 131 + orc_b = g_orc_table + (b - g_orc_ip_table); 132 + if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED) 133 + return 0; 131 134 return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1; 132 135 } 133 136
+43
tools/testing/selftests/memfd/memfd_test.c
··· 282 282 return p; 283 283 } 284 284 285 + static void *mfd_assert_mmap_read_shared(int fd) 286 + { 287 + void *p; 288 + 289 + p = mmap(NULL, 290 + mfd_def_size, 291 + PROT_READ, 292 + MAP_SHARED, 293 + fd, 294 + 0); 295 + if (p == MAP_FAILED) { 296 + printf("mmap() failed: %m\n"); 297 + abort(); 298 + } 299 + 300 + return p; 301 + } 302 + 285 303 static void *mfd_assert_mmap_private(int fd) 286 304 { 287 305 void *p; ··· 998 980 close(fd); 999 981 } 1000 982 983 + static void test_seal_write_map_read_shared(void) 984 + { 985 + int fd; 986 + void *p; 987 + 988 + printf("%s SEAL-WRITE-MAP-READ\n", memfd_str); 989 + 990 + fd = mfd_assert_new("kern_memfd_seal_write_map_read", 991 + mfd_def_size, 992 + MFD_CLOEXEC | MFD_ALLOW_SEALING); 993 + 994 + mfd_assert_add_seals(fd, F_SEAL_WRITE); 995 + mfd_assert_has_seals(fd, F_SEAL_WRITE); 996 + 997 + p = mfd_assert_mmap_read_shared(fd); 998 + 999 + mfd_assert_read(fd); 1000 + mfd_assert_read_shared(fd); 1001 + mfd_fail_write(fd); 1002 + 1003 + munmap(p, mfd_def_size); 1004 + close(fd); 1005 + } 1006 + 1001 1007 /* 1002 1008 * Test SEAL_SHRINK 1003 1009 * Test whether SEAL_SHRINK actually prevents shrinking ··· 1635 1593 1636 1594 test_seal_write(); 1637 1595 test_seal_future_write(); 1596 + test_seal_write_map_read_shared(); 1638 1597 test_seal_shrink(); 1639 1598 test_seal_grow(); 1640 1599 test_seal_resize();