···641641 __kasan_unpoison_vmap_areas(vms, nr_vms, flags);642642}643643644644+void __kasan_vrealloc(const void *start, unsigned long old_size,645645+ unsigned long new_size);646646+647647+static __always_inline void kasan_vrealloc(const void *start,648648+ unsigned long old_size,649649+ unsigned long new_size)650650+{651651+ if (kasan_enabled())652652+ __kasan_vrealloc(start, old_size, new_size);653653+}654654+644655#else /* CONFIG_KASAN_VMALLOC */645656646657static inline void kasan_populate_early_vm_area_shadow(void *start,···680669kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,681670 kasan_vmalloc_flags_t flags)682671{ }672672+673673+static inline void kasan_vrealloc(const void *start, unsigned long old_size,674674+ unsigned long new_size) { }683675684676#endif /* CONFIG_KASAN_VMALLOC */685677
+6
include/linux/memfd.h
···1717 * to by vm_flags_ptr.1818 */1919int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);2020+struct file *memfd_alloc_file(const char *name, unsigned int flags);2021#else2122static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)2223{···3130 vm_flags_t *vm_flags_ptr)3231{3332 return 0;3333+}3434+3535+static inline struct file *memfd_alloc_file(const char *name, unsigned int flags)3636+{3737+ return ERR_PTR(-EINVAL);3438}3539#endif3640
+6-3
include/linux/memremap.h
···224224}225225226226#ifdef CONFIG_ZONE_DEVICE227227-void zone_device_page_init(struct page *page, unsigned int order);227227+void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap,228228+ unsigned int order);228229void *memremap_pages(struct dev_pagemap *pgmap, int nid);229230void memunmap_pages(struct dev_pagemap *pgmap);230231void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);···235234236235unsigned long memremap_compat_align(void);237236238238-static inline void zone_device_folio_init(struct folio *folio, unsigned int order)237237+static inline void zone_device_folio_init(struct folio *folio,238238+ struct dev_pagemap *pgmap,239239+ unsigned int order)239240{240240- zone_device_page_init(&folio->page, order);241241+ zone_device_page_init(&folio->page, pgmap, order);241242 if (order)242243 folio_set_large_rmappable(folio);243244}
+11-1
kernel/liveupdate/kexec_handover.c
···252252 if (is_folio && info.order)253253 prep_compound_page(page, info.order);254254255255+ /* Always mark headpage's codetag as empty to avoid accounting mismatch */256256+ clear_page_tag_ref(page);257257+ if (!is_folio) {258258+ /* Also do that for the non-compound tail pages */259259+ for (unsigned int i = 1; i < nr_pages; i++)260260+ clear_page_tag_ref(page + i);261261+ }262262+255263 adjust_managed_page_count(page, nr_pages);256264 return page;257265}···1005997 chunk->phys[idx++] = phys;1006998 if (idx == ARRAY_SIZE(chunk->phys)) {1007999 chunk = new_vmalloc_chunk(chunk);10081008- if (!chunk)10001000+ if (!chunk) {10011001+ err = -ENOMEM;10091002 goto err_free;10031003+ }10101004 idx = 0;10111005 }10121006 }
+5-1
kernel/vmcore_info.c
···3636 time64_t timestamp;3737};38383939-static struct hwerr_info hwerr_data[HWERR_RECOV_MAX];3939+/*4040+ * The hwerr_data[] array is declared with global scope so that it remains4141+ * accessible to vmcoreinfo even when Link Time Optimization (LTO) is enabled.4242+ */4343+struct hwerr_info hwerr_data[HWERR_RECOV_MAX];40444145Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,4246 void *data, size_t data_len)
+3-2
lib/flex_proportions.c
···6464bool fprop_new_period(struct fprop_global *p, int periods)6565{6666 s64 events = percpu_counter_sum(&p->events);6767+ unsigned long flags;67686869 /*6970 * Don't do anything if there are no events.7071 */7172 if (events <= 1)7273 return false;7373- preempt_disable_nested();7474+ local_irq_save(flags);7475 write_seqcount_begin(&p->sequence);7576 if (periods < 64)7677 events -= events >> periods;···7978 percpu_counter_add(&p->events, -events);8079 p->period += periods;8180 write_seqcount_end(&p->sequence);8282- preempt_enable_nested();8181+ local_irq_restore(flags);83828483 return true;8584}
···692692 unsigned long poisoned_pfn, struct to_kill *tk)693693{694694 unsigned long pfn = 0;695695+ unsigned long hwpoison_vaddr;696696+ unsigned long mask;695697696698 if (pte_present(pte)) {697699 pfn = pte_pfn(pte);···704702 pfn = softleaf_to_pfn(entry);705703 }706704707707- if (!pfn || pfn != poisoned_pfn)705705+ mask = ~((1UL << (shift - PAGE_SHIFT)) - 1);706706+ if (!pfn || pfn != (poisoned_pfn & mask))708707 return 0;709708710710- set_to_kill(tk, addr, shift);709709+ hwpoison_vaddr = addr + ((poisoned_pfn - pfn) << PAGE_SHIFT);710710+ set_to_kill(tk, hwpoison_vaddr, shift);711711 return 1;712712}713713···18871883 return count;18881884}1889188518901890-static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page)18861886+#define MF_HUGETLB_FREED 0 /* freed hugepage */18871887+#define MF_HUGETLB_IN_USED 1 /* in-use hugepage */18881888+#define MF_HUGETLB_NON_HUGEPAGE 2 /* not a hugepage */18891889+#define MF_HUGETLB_FOLIO_PRE_POISONED 3 /* folio already poisoned */18901890+#define MF_HUGETLB_PAGE_PRE_POISONED 4 /* exact page already poisoned */18911891+#define MF_HUGETLB_RETRY 5 /* hugepage is busy, retry */18921892+/*18931893+ * Set hugetlb folio as hwpoisoned, update folio private raw hwpoison list18941894+ * to keep track of the poisoned pages.18951895+ */18961896+static int hugetlb_update_hwpoison(struct folio *folio, struct page *page)18911897{18921898 struct llist_head *head;18931899 struct raw_hwp_page *raw_hwp;18941900 struct raw_hwp_page *p;18951895- int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0;19011901+ int ret = folio_test_set_hwpoison(folio) ? MF_HUGETLB_FOLIO_PRE_POISONED : 0;1896190218971903 /*18981904 * Once the hwpoison hugepage has lost reliable raw error info,···19101896 * so skip to add additional raw error info.19111897 */19121898 if (folio_test_hugetlb_raw_hwp_unreliable(folio))19131913- return -EHWPOISON;18991899+ return MF_HUGETLB_FOLIO_PRE_POISONED;19141900 head = raw_hwp_list_head(folio);19151901 llist_for_each_entry(p, head->first, node) {19161902 if (p->page == page)19171917- return -EHWPOISON;19031903+ return MF_HUGETLB_PAGE_PRE_POISONED;19181904 }1919190519201906 raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC);19211907 if (raw_hwp) {19221908 raw_hwp->page = page;19231909 llist_add(&raw_hwp->node, head);19241924- /* the first error event will be counted in action_result(). */19251925- if (ret)19261926- num_poisoned_pages_inc(page_to_pfn(page));19271910 } else {19281911 /*19291912 * Failed to save raw error info. We no longer trace all···1968195719691958/*19701959 * Called from hugetlb code with hugetlb_lock held.19711971- *19721972- * Return values:19731973- * 0 - free hugepage19741974- * 1 - in-use hugepage19751975- * 2 - not a hugepage19761976- * -EBUSY - the hugepage is busy (try to retry)19771977- * -EHWPOISON - the hugepage is already hwpoisoned19781960 */19791961int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,19801962 bool *migratable_cleared)19811963{19821964 struct page *page = pfn_to_page(pfn);19831965 struct folio *folio = page_folio(page);19841984- int ret = 2; /* fallback to normal page handling */19851966 bool count_increased = false;19671967+ int ret, rc;1986196819871987- if (!folio_test_hugetlb(folio))19691969+ if (!folio_test_hugetlb(folio)) {19701970+ ret = MF_HUGETLB_NON_HUGEPAGE;19881971 goto out;19891989-19901990- if (flags & MF_COUNT_INCREASED) {19911991- ret = 1;19721972+ } else if (flags & MF_COUNT_INCREASED) {19731973+ ret = MF_HUGETLB_IN_USED;19921974 count_increased = true;19931975 } else if (folio_test_hugetlb_freed(folio)) {19941994- ret = 0;19761976+ ret = MF_HUGETLB_FREED;19951977 } else if (folio_test_hugetlb_migratable(folio)) {19961996- ret = folio_try_get(folio);19971997- if (ret)19781978+ if (folio_try_get(folio)) {19791979+ ret = MF_HUGETLB_IN_USED;19981980 count_increased = true;19811981+ } else {19821982+ ret = MF_HUGETLB_FREED;19831983+ }19991984 } else {20002000- ret = -EBUSY;19851985+ ret = MF_HUGETLB_RETRY;20011986 if (!(flags & MF_NO_RETRY))20021987 goto out;20031988 }2004198920052005- if (folio_set_hugetlb_hwpoison(folio, page)) {20062006- ret = -EHWPOISON;19901990+ rc = hugetlb_update_hwpoison(folio, page);19911991+ if (rc >= MF_HUGETLB_FOLIO_PRE_POISONED) {19921992+ ret = rc;20071993 goto out;20081994 }20091995···20252017 * with basic operations like hugepage allocation/free/demotion.20262018 * So some of prechecks for hwpoison (pinning, and testing/setting20272019 * PageHWPoison) should be done in single hugetlb_lock range.20202020+ * Returns:20212021+ * 0 - not hugetlb, or recovered20222022+ * -EBUSY - not recovered20232023+ * -EOPNOTSUPP - hwpoison_filter'ed20242024+ * -EHWPOISON - folio or exact page already poisoned20252025+ * -EFAULT - kill_accessing_process finds current->mm null20282026 */20292027static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)20302028{20312031- int res;20292029+ int res, rv;20322030 struct page *p = pfn_to_page(pfn);20332031 struct folio *folio;20342032 unsigned long page_flags;···20432029 *hugetlb = 1;20442030retry:20452031 res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared);20462046- if (res == 2) { /* fallback to normal page handling */20322032+ switch (res) {20332033+ case MF_HUGETLB_NON_HUGEPAGE: /* fallback to normal page handling */20472034 *hugetlb = 0;20482035 return 0;20492049- } else if (res == -EHWPOISON) {20502050- if (flags & MF_ACTION_REQUIRED) {20512051- folio = page_folio(p);20522052- res = kill_accessing_process(current, folio_pfn(folio), flags);20532053- }20542054- action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);20552055- return res;20562056- } else if (res == -EBUSY) {20362036+ case MF_HUGETLB_RETRY:20572037 if (!(flags & MF_NO_RETRY)) {20582038 flags |= MF_NO_RETRY;20592039 goto retry;20602040 }20612041 return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED);20422042+ case MF_HUGETLB_FOLIO_PRE_POISONED:20432043+ case MF_HUGETLB_PAGE_PRE_POISONED:20442044+ rv = -EHWPOISON;20452045+ if (flags & MF_ACTION_REQUIRED)20462046+ rv = kill_accessing_process(current, pfn, flags);20472047+ if (res == MF_HUGETLB_PAGE_PRE_POISONED)20482048+ action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED);20492049+ else20502050+ action_result(pfn, MF_MSG_HUGE, MF_FAILED);20512051+ return rv;20522052+ default:20532053+ WARN_ON((res != MF_HUGETLB_FREED) && (res != MF_HUGETLB_IN_USED));20542054+ break;20622055 }2063205620642057 folio = page_folio(p);···20762055 if (migratable_cleared)20772056 folio_set_hugetlb_migratable(folio);20782057 folio_unlock(folio);20792079- if (res == 1)20582058+ if (res == MF_HUGETLB_IN_USED)20802059 folio_put(folio);20812060 return -EOPNOTSUPP;20822061 }···20852064 * Handling free hugepage. The possible race with hugepage allocation20862065 * or demotion can be prevented by PageHWPoison flag.20872066 */20882088- if (res == 0) {20672067+ if (res == MF_HUGETLB_FREED) {20892068 folio_unlock(folio);20902069 if (__page_handle_poison(p) > 0) {20912070 page_ref_inc(p);
+34-1
mm/memremap.c
···477477 }478478}479479480480-void zone_device_page_init(struct page *page, unsigned int order)480480+void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap,481481+ unsigned int order)481482{483483+ struct page *new_page = page;484484+ unsigned int i;485485+482486 VM_WARN_ON_ONCE(order > MAX_ORDER_NR_PAGES);487487+488488+ for (i = 0; i < (1UL << order); ++i, ++new_page) {489489+ struct folio *new_folio = (struct folio *)new_page;490490+491491+ /*492492+ * new_page could have been part of previous higher order folio493493+ * which encodes the order, in page + 1, in the flags bits. We494494+ * blindly clear bits which could have set my order field here,495495+ * including page head.496496+ */497497+ new_page->flags.f &= ~0xffUL; /* Clear possible order, page head */498498+499499+#ifdef NR_PAGES_IN_LARGE_FOLIO500500+ /*501501+ * This pointer math looks odd, but new_page could have been502502+ * part of a previous higher order folio, which sets _nr_pages503503+ * in page + 1 (new_page). Therefore, we use pointer casting to504504+ * correctly locate the _nr_pages bits within new_page which505505+ * could have modified by previous higher order folio.506506+ */507507+ ((struct folio *)(new_page - 1))->_nr_pages = 0;508508+#endif509509+510510+ new_folio->mapping = NULL;511511+ new_folio->pgmap = pgmap; /* Also clear compound head */512512+ new_folio->share = 0; /* fsdax only, unused for device private */513513+ VM_WARN_ON_FOLIO(folio_ref_count(new_folio), new_folio);514514+ VM_WARN_ON_FOLIO(!folio_is_zone_device(new_folio), new_folio);515515+ }483516484517 /*485518 * Drivers shouldn't be allocating pages after calling
+6-6
mm/mm_init.c
···20592059 */20602060static unsigned long __init20612061deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,20622062- struct zone *zone)20622062+ struct zone *zone, bool can_resched)20632063{20642064 int nid = zone_to_nid(zone);20652065 unsigned long nr_pages = 0;···2085208520862086 spfn = chunk_end;2087208720882088- if (irqs_disabled())20892089- touch_nmi_watchdog();20902090- else20882088+ if (can_resched)20912089 cond_resched();20902090+ else20912091+ touch_nmi_watchdog();20922092 }20932093 }20942094···21012101{21022102 struct zone *zone = arg;2103210321042104- deferred_init_memmap_chunk(start_pfn, end_pfn, zone);21042104+ deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true);21052105}2106210621072107static unsigned int __init···22162216 for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1);22172217 nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone);22182218 spfn = epfn, epfn += PAGES_PER_SECTION) {22192219- nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone);22192219+ nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false);22202220 }2221222122222222 /*
+34-11
mm/shmem.c
···962962 * being freed).963963 */964964static long shmem_free_swap(struct address_space *mapping,965965- pgoff_t index, void *radswap)965965+ pgoff_t index, pgoff_t end, void *radswap)966966{967967- int order = xa_get_order(&mapping->i_pages, index);968968- void *old;967967+ XA_STATE(xas, &mapping->i_pages, index);968968+ unsigned int nr_pages = 0;969969+ pgoff_t base;970970+ void *entry;969971970970- old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0);971971- if (old != radswap)972972- return 0;973973- free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order);972972+ xas_lock_irq(&xas);973973+ entry = xas_load(&xas);974974+ if (entry == radswap) {975975+ nr_pages = 1 << xas_get_order(&xas);976976+ base = round_down(xas.xa_index, nr_pages);977977+ if (base < index || base + nr_pages - 1 > end)978978+ nr_pages = 0;979979+ else980980+ xas_store(&xas, NULL);981981+ }982982+ xas_unlock_irq(&xas);974983975975- return 1 << order;984984+ if (nr_pages)985985+ free_swap_and_cache_nr(radix_to_swp_entry(radswap), nr_pages);986986+987987+ return nr_pages;976988}977989978990/*···11361124 if (xa_is_value(folio)) {11371125 if (unfalloc)11381126 continue;11391139- nr_swaps_freed += shmem_free_swap(mapping,11401140- indices[i], folio);11271127+ nr_swaps_freed += shmem_free_swap(mapping, indices[i],11281128+ end - 1, folio);11411129 continue;11421130 }11431131···12031191 folio = fbatch.folios[i];1204119212051193 if (xa_is_value(folio)) {11941194+ int order;12061195 long swaps_freed;1207119612081197 if (unfalloc)12091198 continue;12101210- swaps_freed = shmem_free_swap(mapping, indices[i], folio);11991199+ swaps_freed = shmem_free_swap(mapping, indices[i],12001200+ end - 1, folio);12111201 if (!swaps_freed) {12021202+ /*12031203+ * If found a large swap entry cross the end border,12041204+ * skip it as the truncate_inode_partial_folio above12051205+ * should have at least zerod its content once.12061206+ */12071207+ order = shmem_confirm_swap(mapping, indices[i],12081208+ radix_to_swp_entry(folio));12091209+ if (order > 0 && indices[i] + (1 << order) > end)12101210+ continue;12121211 /* Swap was replaced by page: retry */12131212 index = indices[i];12141213 break;
···3737#endif3838};39394040-/* Set swap_space as read only as swap cache is handled by swap table */4141-struct address_space swap_space __ro_after_init = {4040+struct address_space swap_space __read_mostly = {4241 .a_ops = &swap_aops,4342};4443
+2-5
mm/vmalloc.c
···43224322 if (want_init_on_free() || want_init_on_alloc(flags))43234323 memset((void *)p + size, 0, old_size - size);43244324 vm->requested_size = size;43254325- kasan_poison_vmalloc(p + size, old_size - size);43254325+ kasan_vrealloc(p, old_size, size);43264326 return (void *)p;43274327 }43284328···43304330 * We already have the bytes available in the allocation; use them.43314331 */43324332 if (size <= alloced_size) {43334333- kasan_unpoison_vmalloc(p + old_size, size - old_size,43344334- KASAN_VMALLOC_PROT_NORMAL |43354335- KASAN_VMALLOC_VM_ALLOC |43364336- KASAN_VMALLOC_KEEP_TAG);43374333 /*43384334 * No need to zero memory here, as unused memory will have43394335 * already been zeroed at initial allocation time or during43404336 * realloc shrink time.43414337 */43424338 vm->requested_size = size;43394339+ kasan_vrealloc(p, old_size, size);43434340 return (void *)p;43444341 }43454342