Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2026-03-16-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"6 hotfixes. 4 are cc:stable. 3 are for MM.

All are singletons - please see the changelogs for details"

* tag 'mm-hotfixes-stable-2026-03-16-12-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
MAINTAINERS: update email address for Ignat Korchagin
mm/huge_memory: fix early failure try_to_migrate() when split huge pmd for shared THP
mm/rmap: fix incorrect pte restoration for lazyfree folios
mm/huge_memory: fix use of NULL folio in move_pages_huge_pmd()
build_bug.h: correct function parameters names in kernel-doc
crash_dump: don't log dm-crypt key bytes in read_key_from_user_keying

+29 -12
+1
.mailmap
··· 327 327 Herbert Xu <herbert@gondor.apana.org.au> 328 328 Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com> 329 329 Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn> 330 + Ignat Korchagin <ignat@linux.win> <ignat@cloudflare.com> 330 331 Ike Panhc <ikepanhc@gmail.com> <ike.pan@canonical.com> 331 332 J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com> 332 333 J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu>
+4 -4
MAINTAINERS
··· 4022 4022 ASYMMETRIC KEYS 4023 4023 M: David Howells <dhowells@redhat.com> 4024 4024 M: Lukas Wunner <lukas@wunner.de> 4025 - M: Ignat Korchagin <ignat@cloudflare.com> 4025 + M: Ignat Korchagin <ignat@linux.win> 4026 4026 L: keyrings@vger.kernel.org 4027 4027 L: linux-crypto@vger.kernel.org 4028 4028 S: Maintained ··· 4035 4035 4036 4036 ASYMMETRIC KEYS - ECDSA 4037 4037 M: Lukas Wunner <lukas@wunner.de> 4038 - M: Ignat Korchagin <ignat@cloudflare.com> 4038 + M: Ignat Korchagin <ignat@linux.win> 4039 4039 R: Stefan Berger <stefanb@linux.ibm.com> 4040 4040 L: linux-crypto@vger.kernel.org 4041 4041 S: Maintained ··· 4045 4045 4046 4046 ASYMMETRIC KEYS - GOST 4047 4047 M: Lukas Wunner <lukas@wunner.de> 4048 - M: Ignat Korchagin <ignat@cloudflare.com> 4048 + M: Ignat Korchagin <ignat@linux.win> 4049 4049 L: linux-crypto@vger.kernel.org 4050 4050 S: Odd fixes 4051 4051 F: crypto/ecrdsa* 4052 4052 4053 4053 ASYMMETRIC KEYS - RSA 4054 4054 M: Lukas Wunner <lukas@wunner.de> 4055 - M: Ignat Korchagin <ignat@cloudflare.com> 4055 + M: Ignat Korchagin <ignat@linux.win> 4056 4056 L: linux-crypto@vger.kernel.org 4057 4057 S: Maintained 4058 4058 F: crypto/rsa*
+3 -1
include/linux/build_bug.h
··· 32 32 /** 33 33 * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied 34 34 * error message. 35 - * @condition: the condition which the compiler should know is false. 35 + * @cond: the condition which the compiler should know is false. 36 + * @msg: build-time error message 36 37 * 37 38 * See BUILD_BUG_ON for description. 38 39 */ ··· 61 60 62 61 /** 63 62 * static_assert - check integer constant expression at build time 63 + * @expr: expression to be checked 64 64 * 65 65 * static_assert() is a wrapper for the C11 _Static_assert, with a 66 66 * little macro magic to make the message optional (defaulting to the
+2 -2
kernel/crash_dump_dm_crypt.c
··· 168 168 169 169 memcpy(dm_key->data, ukp->data, ukp->datalen); 170 170 dm_key->key_size = ukp->datalen; 171 - kexec_dprintk("Get dm crypt key (size=%u) %s: %8ph\n", dm_key->key_size, 172 - dm_key->key_desc, dm_key->data); 171 + kexec_dprintk("Get dm crypt key (size=%u) %s\n", dm_key->key_size, 172 + dm_key->key_desc); 173 173 174 174 out: 175 175 up_read(&key->sem);
+2 -1
mm/huge_memory.c
··· 2797 2797 _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma); 2798 2798 } else { 2799 2799 src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd); 2800 - _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot); 2800 + _dst_pmd = move_soft_dirty_pmd(src_pmdval); 2801 + _dst_pmd = clear_uffd_wp_pmd(_dst_pmd); 2801 2802 } 2802 2803 set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd); 2803 2804
+17 -4
mm/rmap.c
··· 1955 1955 if (userfaultfd_wp(vma)) 1956 1956 return 1; 1957 1957 1958 - return folio_pte_batch(folio, pvmw->pte, pte, max_nr); 1958 + /* 1959 + * If unmap fails, we need to restore the ptes. To avoid accidentally 1960 + * upgrading write permissions for ptes that were not originally 1961 + * writable, and to avoid losing the soft-dirty bit, use the 1962 + * appropriate FPB flags. 1963 + */ 1964 + return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr, 1965 + FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY); 1959 1966 } 1960 1967 1961 1968 /* ··· 2450 2443 __maybe_unused pmd_t pmdval; 2451 2444 2452 2445 if (flags & TTU_SPLIT_HUGE_PMD) { 2446 + /* 2447 + * split_huge_pmd_locked() might leave the 2448 + * folio mapped through PTEs. Retry the walk 2449 + * so we can detect this scenario and properly 2450 + * abort the walk. 2451 + */ 2453 2452 split_huge_pmd_locked(vma, pvmw.address, 2454 2453 pvmw.pmd, true); 2455 - ret = false; 2456 - page_vma_mapped_walk_done(&pvmw); 2457 - break; 2454 + flags &= ~TTU_SPLIT_HUGE_PMD; 2455 + page_vma_mapped_walk_restart(&pvmw); 2456 + continue; 2458 2457 } 2459 2458 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION 2460 2459 pmdval = pmdp_get(pvmw.pmd);