Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"Nine hotfixes.

Six for MM, three for other areas. Four of these patches address
post-6.0 issues"

* tag 'mm-hotfixes-stable-2022-12-10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
memcg: fix possible use-after-free in memcg_write_event_control()
MAINTAINERS: update Muchun Song's email
mm/gup: fix gup_pud_range() for dax
mmap: fix do_brk_flags() modifying obviously incorrect VMAs
mm/swap: fix SWP_PFN_BITS with CONFIG_PHYS_ADDR_T_64BIT on 32bit
tmpfs: fix data loss from failed fallocate
kselftests: cgroup: update kmem test precision tolerance
mm: do not BUG_ON missing brk mapping, because userspace can unmap it
mailmap: update Matti Vaittinen's email address

+29 -19
+3
.mailmap
··· 287 287 Matthew Wilcox <willy@infradead.org> <willy@parisc-linux.org> 288 288 Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu> 289 289 Matthieu CASTET <castet.matthieu@free.fr> 290 + Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com> 290 291 Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> 291 292 Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com> 292 293 Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> ··· 373 372 Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com> 374 373 Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com> 375 374 Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru> 375 + Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com> 376 + Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com> 376 377 Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com> 377 378 Rudolf Marek <R.Marek@sh.cvut.cz> 378 379 Rui Saraiva <rmps@joel.ist.utl.pt>
+2 -2
MAINTAINERS
··· 5299 5299 M: Michal Hocko <mhocko@kernel.org> 5300 5300 M: Roman Gushchin <roman.gushchin@linux.dev> 5301 5301 M: Shakeel Butt <shakeelb@google.com> 5302 - R: Muchun Song <songmuchun@bytedance.com> 5302 + R: Muchun Song <muchun.song@linux.dev> 5303 5303 L: cgroups@vger.kernel.org 5304 5304 L: linux-mm@kvack.org 5305 5305 S: Maintained ··· 9439 9439 9440 9440 HUGETLB SUBSYSTEM 9441 9441 M: Mike Kravetz <mike.kravetz@oracle.com> 9442 - M: Muchun Song <songmuchun@bytedance.com> 9442 + M: Muchun Song <muchun.song@linux.dev> 9443 9443 L: linux-mm@kvack.org 9444 9444 S: Maintained 9445 9445 F: Documentation/ABI/testing/sysfs-kernel-mm-hugepages
+5 -3
include/linux/swapops.h
··· 33 33 * can use the extra bits to store other information besides PFN. 34 34 */ 35 35 #ifdef MAX_PHYSMEM_BITS 36 - #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) 36 + #define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) 37 37 #else /* MAX_PHYSMEM_BITS */ 38 - #define SWP_PFN_BITS (BITS_PER_LONG - PAGE_SHIFT) 38 + #define SWP_PFN_BITS min_t(int, \ 39 + sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \ 40 + SWP_TYPE_SHIFT) 39 41 #endif /* MAX_PHYSMEM_BITS */ 40 - #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) 42 + #define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1) 41 43 42 44 /** 43 45 * Migration swap entry specific bitfield definitions. Layout:
+1 -1
mm/gup.c
··· 2852 2852 next = pud_addr_end(addr, end); 2853 2853 if (unlikely(!pud_present(pud))) 2854 2854 return 0; 2855 - if (unlikely(pud_huge(pud))) { 2855 + if (unlikely(pud_huge(pud) || pud_devmap(pud))) { 2856 2856 if (!gup_huge_pud(pud, pudp, addr, next, flags, 2857 2857 pages, nr)) 2858 2858 return 0;
+4 -10
mm/mmap.c
··· 226 226 /* Search one past newbrk */ 227 227 mas_set(&mas, newbrk); 228 228 brkvma = mas_find(&mas, oldbrk); 229 - BUG_ON(brkvma == NULL); 230 - if (brkvma->vm_start >= oldbrk) 229 + if (!brkvma || brkvma->vm_start >= oldbrk) 231 230 goto out; /* mapping intersects with an existing non-brk vma. */ 232 231 /* 233 232 * mm->brk must be protected by write mmap_lock. ··· 2945 2946 * Expand the existing vma if possible; Note that singular lists do not 2946 2947 * occur after forking, so the expand will only happen on new VMAs. 2947 2948 */ 2948 - if (vma && 2949 - (!vma->anon_vma || list_is_singular(&vma->anon_vma_chain)) && 2950 - ((vma->vm_flags & ~VM_SOFTDIRTY) == flags)) { 2949 + if (vma && vma->vm_end == addr && !vma_policy(vma) && 2950 + can_vma_merge_after(vma, flags, NULL, NULL, 2951 + addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) { 2951 2952 mas_set_range(mas, vma->vm_start, addr + len - 1); 2952 2953 if (mas_preallocate(mas, vma, GFP_KERNEL)) 2953 2954 return -ENOMEM; ··· 3034 3035 goto munmap_failed; 3035 3036 3036 3037 vma = mas_prev(&mas, 0); 3037 - if (!vma || vma->vm_end != addr || vma_policy(vma) || 3038 - !can_vma_merge_after(vma, flags, NULL, NULL, 3039 - addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) 3040 - vma = NULL; 3041 - 3042 3038 ret = do_brk_flags(&mas, vma, addr, len, flags); 3043 3039 populate = ((mm->def_flags & VM_LOCKED) != 0); 3044 3040 mmap_write_unlock(mm);
+11
mm/shmem.c
··· 948 948 index++; 949 949 } 950 950 951 + /* 952 + * When undoing a failed fallocate, we want none of the partial folio 953 + * zeroing and splitting below, but shall want to truncate the whole 954 + * folio when !uptodate indicates that it was added by this fallocate, 955 + * even when [lstart, lend] covers only a part of the folio. 956 + */ 957 + if (unfalloc) 958 + goto whole_folios; 959 + 951 960 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 952 961 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); 953 962 if (folio) { ··· 981 972 folio_unlock(folio); 982 973 folio_put(folio); 983 974 } 975 + 976 + whole_folios: 984 977 985 978 index = start; 986 979 while (index < end) {
+3 -3
tools/testing/selftests/cgroup/test_kmem.c
··· 19 19 20 20 21 21 /* 22 - * Memory cgroup charging is performed using percpu batches 32 pages 22 + * Memory cgroup charging is performed using percpu batches 64 pages 23 23 * big (look at MEMCG_CHARGE_BATCH), whereas memory.stat is exact. So 24 24 * the maximum discrepancy between charge and vmstat entries is number 25 - * of cpus multiplied by 32 pages. 25 + * of cpus multiplied by 64 pages. 26 26 */ 27 - #define MAX_VMSTAT_ERROR (4096 * 32 * get_nprocs()) 27 + #define MAX_VMSTAT_ERROR (4096 * 64 * get_nprocs()) 28 28 29 29 30 30 static int alloc_dcache(const char *cgroup, void *arg)