Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull more hotfixes from Andrew Morton:
"Seventeen hotfixes. Mostly memory management things.

Ten patches are cc:stable, addressing pre-6.0 issues"

* tag 'mm-hotfixes-stable-2022-08-28' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
.mailmap: update Luca Ceresoli's e-mail address
mm/mprotect: only reference swap pfn page if type match
squashfs: don't call kmalloc in decompressors
mm/damon/dbgfs: avoid duplicate context directory creation
mailmap: update email address for Colin King
asm-generic: sections: refactor memory_intersects
bootmem: remove the vmemmap pages from kmemleak in put_page_bootmem
ocfs2: fix freeing uninitialized resource on ocfs2_dlm_shutdown
Revert "memcg: cleanup racy sum avoidance code"
mm/zsmalloc: do not attempt to free IS_ERR handle
binder_alloc: add missing mmap_lock calls when using the VMA
mm: re-allow pinning of zero pfns (again)
vmcoreinfo: add kallsyms_num_syms symbol
mailmap: update Guilherme G. Piccoli's email addresses
writeback: avoid use-after-free after removing device
shmem: update folio if shmem_replace_page() updates the page
mm/hugetlb: avoid corrupting page->mapping in hugetlb_mcopy_atomic_pte

+108 -60
+4 -2
.mailmap
··· 98 98 Christian Marangi <ansuelsmth@gmail.com> 99 99 Christophe Ricard <christophe.ricard@gmail.com> 100 100 Christoph Hellwig <hch@lst.de> 101 - Colin Ian King <colin.king@intel.com> <colin.king@canonical.com> 102 - Colin Ian King <colin.king@intel.com> <colin.i.king@gmail.com> 101 + Colin Ian King <colin.i.king@gmail.com> <colin.king@canonical.com> 103 102 Corey Minyard <minyard@acm.org> 104 103 Damian Hobson-Garcia <dhobsong@igel.co.jp> 105 104 Daniel Borkmann <daniel@iogearbox.net> <danborkmann@googlemail.com> ··· 149 150 Greg Kroah-Hartman <greg@kroah.com> 150 151 Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com> 151 152 Gregory CLEMENT <gregory.clement@bootlin.com> <gregory.clement@free-electrons.com> 153 + Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@linux.vnet.ibm.com> 154 + Guilherme G. Piccoli <kernel@gpiccoli.net> <gpiccoli@canonical.com> 152 155 Guo Ren <guoren@kernel.org> <guoren@linux.alibaba.com> 153 156 Guo Ren <guoren@kernel.org> <ren_guo@c-sky.com> 154 157 Gustavo Padovan <gustavo@las.ic.unicamp.br> ··· 254 253 Li Yang <leoyang.li@nxp.com> <leoli@freescale.com> 255 254 Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org> 256 255 Lorenzo Pieralisi <lpieralisi@kernel.org> <lorenzo.pieralisi@arm.com> 256 + Luca Ceresoli <luca.ceresoli@bootlin.com> <luca@lucaceresoli.net> 257 257 Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com> 258 258 Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com> 259 259 Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
+21 -10
drivers/android/binder_alloc.c
··· 402 402 size_t size, data_offsets_size; 403 403 int ret; 404 404 405 + mmap_read_lock(alloc->vma_vm_mm); 405 406 if (!binder_alloc_get_vma(alloc)) { 407 + mmap_read_unlock(alloc->vma_vm_mm); 406 408 binder_alloc_debug(BINDER_DEBUG_USER_ERROR, 407 409 "%d: binder_alloc_buf, no vma\n", 408 410 alloc->pid); 409 411 return ERR_PTR(-ESRCH); 410 412 } 413 + mmap_read_unlock(alloc->vma_vm_mm); 411 414 412 415 data_offsets_size = ALIGN(data_size, sizeof(void *)) + 413 416 ALIGN(offsets_size, sizeof(void *)); ··· 932 929 * Make sure the binder_alloc is fully initialized, otherwise we might 933 930 * read inconsistent state. 934 931 */ 935 - if (binder_alloc_get_vma(alloc) != NULL) { 936 - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 937 - page = &alloc->pages[i]; 938 - if (!page->page_ptr) 939 - free++; 940 - else if (list_empty(&page->lru)) 941 - active++; 942 - else 943 - lru++; 944 - } 932 + 933 + mmap_read_lock(alloc->vma_vm_mm); 934 + if (binder_alloc_get_vma(alloc) == NULL) { 935 + mmap_read_unlock(alloc->vma_vm_mm); 936 + goto uninitialized; 945 937 } 938 + 939 + mmap_read_unlock(alloc->vma_vm_mm); 940 + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { 941 + page = &alloc->pages[i]; 942 + if (!page->page_ptr) 943 + free++; 944 + else if (list_empty(&page->lru)) 945 + active++; 946 + else 947 + lru++; 948 + } 949 + 950 + uninitialized: 946 951 mutex_unlock(&alloc->mutex); 947 952 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); 948 953 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
+6 -6
fs/fs-writeback.c
··· 134 134 135 135 static void wb_wakeup(struct bdi_writeback *wb) 136 136 { 137 - spin_lock_bh(&wb->work_lock); 137 + spin_lock_irq(&wb->work_lock); 138 138 if (test_bit(WB_registered, &wb->state)) 139 139 mod_delayed_work(bdi_wq, &wb->dwork, 0); 140 - spin_unlock_bh(&wb->work_lock); 140 + spin_unlock_irq(&wb->work_lock); 141 141 } 142 142 143 143 static void finish_writeback_work(struct bdi_writeback *wb, ··· 164 164 if (work->done) 165 165 atomic_inc(&work->done->cnt); 166 166 167 - spin_lock_bh(&wb->work_lock); 167 + spin_lock_irq(&wb->work_lock); 168 168 169 169 if (test_bit(WB_registered, &wb->state)) { 170 170 list_add_tail(&work->list, &wb->work_list); ··· 172 172 } else 173 173 finish_writeback_work(wb, work); 174 174 175 - spin_unlock_bh(&wb->work_lock); 175 + spin_unlock_irq(&wb->work_lock); 176 176 } 177 177 178 178 /** ··· 2082 2082 { 2083 2083 struct wb_writeback_work *work = NULL; 2084 2084 2085 - spin_lock_bh(&wb->work_lock); 2085 + spin_lock_irq(&wb->work_lock); 2086 2086 if (!list_empty(&wb->work_list)) { 2087 2087 work = list_entry(wb->work_list.next, 2088 2088 struct wb_writeback_work, list); 2089 2089 list_del_init(&work->list); 2090 2090 } 2091 - spin_unlock_bh(&wb->work_lock); 2091 + spin_unlock_irq(&wb->work_lock); 2092 2092 return work; 2093 2093 } 2094 2094
+5 -3
fs/ocfs2/dlmglue.c
··· 3403 3403 ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres); 3404 3404 ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres); 3405 3405 3406 - ocfs2_cluster_disconnect(osb->cconn, hangup_pending); 3407 - osb->cconn = NULL; 3406 + if (osb->cconn) { 3407 + ocfs2_cluster_disconnect(osb->cconn, hangup_pending); 3408 + osb->cconn = NULL; 3408 3409 3409 - ocfs2_dlm_shutdown_debug(osb); 3410 + ocfs2_dlm_shutdown_debug(osb); 3411 + } 3410 3412 } 3411 3413 3412 3414 static int ocfs2_drop_lock(struct ocfs2_super *osb,
+1 -2
fs/ocfs2/super.c
··· 1914 1914 !ocfs2_is_hard_readonly(osb)) 1915 1915 hangup_needed = 1; 1916 1916 1917 - if (osb->cconn) 1918 - ocfs2_dlm_shutdown(osb, hangup_needed); 1917 + ocfs2_dlm_shutdown(osb, hangup_needed); 1919 1918 1920 1919 ocfs2_blockcheck_stats_debugfs_remove(&osb->osb_ecc_stats); 1921 1920 debugfs_remove_recursive(osb->osb_debug_root);
+1 -1
fs/squashfs/file.c
··· 593 593 594 594 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); 595 595 596 - kfree(actor); 596 + squashfs_page_actor_free(actor); 597 597 598 598 if (res == expected) { 599 599 int bytes;
+1 -1
fs/squashfs/file_direct.c
··· 74 74 /* Decompress directly into the page cache buffers */ 75 75 res = squashfs_read_data(inode->i_sb, block, bsize, NULL, actor); 76 76 77 - kfree(actor); 77 + squashfs_page_actor_free(actor); 78 78 79 79 if (res < 0) 80 80 goto mark_errored;
+15 -19
fs/squashfs/page_actor.c
··· 52 52 actor->buffer = buffer; 53 53 actor->pages = pages; 54 54 actor->next_page = 0; 55 + actor->tmp_buffer = NULL; 55 56 actor->squashfs_first_page = cache_first_page; 56 57 actor->squashfs_next_page = cache_next_page; 57 58 actor->squashfs_finish_page = cache_finish_page; ··· 69 68 70 69 if ((actor->next_page == actor->pages) || 71 70 (actor->next_index != actor->page[actor->next_page]->index)) { 72 - if (actor->alloc_buffer) { 73 - void *tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 74 - 75 - if (tmp_buffer) { 76 - actor->tmp_buffer = tmp_buffer; 77 - actor->next_index++; 78 - actor->returned_pages++; 79 - return tmp_buffer; 80 - } 81 - } 82 - 83 71 actor->next_index++; 84 72 actor->returned_pages++; 85 - return ERR_PTR(-ENOMEM); 73 + return actor->alloc_buffer ? actor->tmp_buffer : ERR_PTR(-ENOMEM); 86 74 } 87 75 88 76 actor->next_index++; ··· 86 96 87 97 static void *direct_next_page(struct squashfs_page_actor *actor) 88 98 { 89 - if (actor->pageaddr) 99 + if (actor->pageaddr) { 90 100 kunmap_local(actor->pageaddr); 91 - 92 - kfree(actor->tmp_buffer); 93 - actor->pageaddr = actor->tmp_buffer = NULL; 101 + actor->pageaddr = NULL; 102 + } 94 103 95 104 return handle_next_page(actor); 96 105 } ··· 98 109 { 99 110 if (actor->pageaddr) 100 111 kunmap_local(actor->pageaddr); 101 - 102 - kfree(actor->tmp_buffer); 103 112 } 104 113 105 114 struct squashfs_page_actor *squashfs_page_actor_init_special(struct squashfs_sb_info *msblk, ··· 108 121 if (actor == NULL) 109 122 return NULL; 110 123 124 + if (msblk->decompressor->alloc_buffer) { 125 + actor->tmp_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); 126 + 127 + if (actor->tmp_buffer == NULL) { 128 + kfree(actor); 129 + return NULL; 130 + } 131 + } else 132 + actor->tmp_buffer = NULL; 133 + 111 134 actor->length = length ? : pages * PAGE_SIZE; 112 135 actor->page = page; 113 136 actor->pages = pages; ··· 125 128 actor->returned_pages = 0; 126 129 actor->next_index = page[0]->index & ~((1 << (msblk->block_log - PAGE_SHIFT)) - 1); 127 130 actor->pageaddr = NULL; 128 - actor->tmp_buffer = NULL; 129 131 actor->alloc_buffer = msblk->decompressor->alloc_buffer; 130 132 actor->squashfs_first_page = direct_first_page; 131 133 actor->squashfs_next_page = direct_next_page;
+5
fs/squashfs/page_actor.h
··· 29 29 extern struct squashfs_page_actor *squashfs_page_actor_init_special( 30 30 struct squashfs_sb_info *msblk, 31 31 struct page **page, int pages, int length); 32 + static inline void squashfs_page_actor_free(struct squashfs_page_actor *actor) 33 + { 34 + kfree(actor->tmp_buffer); 35 + kfree(actor); 36 + } 32 37 static inline void *squashfs_first_page(struct squashfs_page_actor *actor) 33 38 { 34 39 return actor->squashfs_first_page(actor);
+5 -2
include/asm-generic/sections.h
··· 97 97 /** 98 98 * memory_intersects - checks if the region occupied by an object intersects 99 99 * with another memory region 100 - * @begin: virtual address of the beginning of the memory regien 100 + * @begin: virtual address of the beginning of the memory region 101 101 * @end: virtual address of the end of the memory region 102 102 * @virt: virtual address of the memory object 103 103 * @size: size of the memory object ··· 110 110 { 111 111 void *vend = virt + size; 112 112 113 - return (virt >= begin && virt < end) || (vend >= begin && vend < end); 113 + if (virt < end && vend > begin) 114 + return true; 115 + 116 + return false; 114 117 } 115 118 116 119 /**
+13 -2
include/linux/memcontrol.h
··· 987 987 988 988 static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) 989 989 { 990 - return READ_ONCE(memcg->vmstats.state[idx]); 990 + long x = READ_ONCE(memcg->vmstats.state[idx]); 991 + #ifdef CONFIG_SMP 992 + if (x < 0) 993 + x = 0; 994 + #endif 995 + return x; 991 996 } 992 997 993 998 static inline unsigned long lruvec_page_state(struct lruvec *lruvec, 994 999 enum node_stat_item idx) 995 1000 { 996 1001 struct mem_cgroup_per_node *pn; 1002 + long x; 997 1003 998 1004 if (mem_cgroup_disabled()) 999 1005 return node_page_state(lruvec_pgdat(lruvec), idx); 1000 1006 1001 1007 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); 1002 - return READ_ONCE(pn->lruvec_stats.state[idx]); 1008 + x = READ_ONCE(pn->lruvec_stats.state[idx]); 1009 + #ifdef CONFIG_SMP 1010 + if (x < 0) 1011 + x = 0; 1012 + #endif 1013 + return x; 1003 1014 } 1004 1015 1005 1016 static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
+10 -3
include/linux/mm.h
··· 1544 1544 if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) 1545 1545 return false; 1546 1546 #endif 1547 - return !(is_device_coherent_page(page) || 1548 - is_zone_movable_page(page) || 1549 - is_zero_pfn(page_to_pfn(page))); 1547 + /* The zero page may always be pinned */ 1548 + if (is_zero_pfn(page_to_pfn(page))) 1549 + return true; 1550 + 1551 + /* Coherent device memory must always allow eviction. */ 1552 + if (is_device_coherent_page(page)) 1553 + return false; 1554 + 1555 + /* Otherwise, non-movable zone pages can be pinned. */ 1556 + return !is_zone_movable_page(page); 1550 1557 } 1551 1558 #else 1552 1559 static inline bool is_longterm_pinnable_page(struct page *page)
+1
kernel/crash_core.c
··· 494 494 495 495 #ifdef CONFIG_KALLSYMS 496 496 VMCOREINFO_SYMBOL(kallsyms_names); 497 + VMCOREINFO_SYMBOL(kallsyms_num_syms); 497 498 VMCOREINFO_SYMBOL(kallsyms_token_table); 498 499 VMCOREINFO_SYMBOL(kallsyms_token_index); 499 500 #ifdef CONFIG_KALLSYMS_BASE_RELATIVE
+5 -5
mm/backing-dev.c
··· 260 260 unsigned long timeout; 261 261 262 262 timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 263 - spin_lock_bh(&wb->work_lock); 263 + spin_lock_irq(&wb->work_lock); 264 264 if (test_bit(WB_registered, &wb->state)) 265 265 queue_delayed_work(bdi_wq, &wb->dwork, timeout); 266 - spin_unlock_bh(&wb->work_lock); 266 + spin_unlock_irq(&wb->work_lock); 267 267 } 268 268 269 269 static void wb_update_bandwidth_workfn(struct work_struct *work) ··· 334 334 static void wb_shutdown(struct bdi_writeback *wb) 335 335 { 336 336 /* Make sure nobody queues further work */ 337 - spin_lock_bh(&wb->work_lock); 337 + spin_lock_irq(&wb->work_lock); 338 338 if (!test_and_clear_bit(WB_registered, &wb->state)) { 339 - spin_unlock_bh(&wb->work_lock); 339 + spin_unlock_irq(&wb->work_lock); 340 340 return; 341 341 } 342 - spin_unlock_bh(&wb->work_lock); 342 + spin_unlock_irq(&wb->work_lock); 343 343 344 344 cgwb_remove_from_bdi_list(wb); 345 345 /*
+2
mm/bootmem_info.c
··· 12 12 #include <linux/memblock.h> 13 13 #include <linux/bootmem_info.h> 14 14 #include <linux/memory_hotplug.h> 15 + #include <linux/kmemleak.h> 15 16 16 17 void get_page_bootmem(unsigned long info, struct page *page, unsigned long type) 17 18 { ··· 34 33 ClearPagePrivate(page); 35 34 set_page_private(page, 0); 36 35 INIT_LIST_HEAD(&page->lru); 36 + kmemleak_free_part(page_to_virt(page), PAGE_SIZE); 37 37 free_reserved_page(page); 38 38 } 39 39 }
+3
mm/damon/dbgfs.c
··· 818 818 return -ENOENT; 819 819 820 820 new_dir = debugfs_create_dir(name, root); 821 + /* Below check is required for a potential duplicated name case */ 822 + if (IS_ERR(new_dir)) 823 + return PTR_ERR(new_dir); 821 824 dbgfs_dirs[dbgfs_nr_ctxs] = new_dir; 822 825 823 826 new_ctx = dbgfs_new_ctx();
+1 -1
mm/hugetlb.c
··· 6041 6041 if (!huge_pte_none_mostly(huge_ptep_get(dst_pte))) 6042 6042 goto out_release_unlock; 6043 6043 6044 - if (vm_shared) { 6044 + if (page_in_pagecache) { 6045 6045 page_dup_file_rmap(page, true); 6046 6046 } else { 6047 6047 ClearHPageRestoreReserve(page);
+2 -1
mm/mprotect.c
··· 196 196 pages++; 197 197 } else if (is_swap_pte(oldpte)) { 198 198 swp_entry_t entry = pte_to_swp_entry(oldpte); 199 - struct page *page = pfn_swap_entry_to_page(entry); 200 199 pte_t newpte; 201 200 202 201 if (is_writable_migration_entry(entry)) { 202 + struct page *page = pfn_swap_entry_to_page(entry); 203 + 203 204 /* 204 205 * A protection check is difficult so 205 206 * just be safe and disable write
+5 -1
mm/page-writeback.c
··· 2892 2892 2893 2893 static void wb_inode_writeback_end(struct bdi_writeback *wb) 2894 2894 { 2895 + unsigned long flags; 2895 2896 atomic_dec(&wb->writeback_inodes); 2896 2897 /* 2897 2898 * Make sure estimate of writeback throughput gets updated after ··· 2901 2900 * that if multiple inodes end writeback at a similar time, they get 2902 2901 * batched into one bandwidth update. 2903 2902 */ 2904 - queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); 2903 + spin_lock_irqsave(&wb->work_lock, flags); 2904 + if (test_bit(WB_registered, &wb->state)) 2905 + queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); 2906 + spin_unlock_irqrestore(&wb->work_lock, flags); 2905 2907 } 2906 2908 2907 2909 bool __folio_end_writeback(struct folio *folio)
+1
mm/shmem.c
··· 1782 1782 1783 1783 if (shmem_should_replace_folio(folio, gfp)) { 1784 1784 error = shmem_replace_page(&page, gfp, info, index); 1785 + folio = page_folio(page); 1785 1786 if (error) 1786 1787 goto failed; 1787 1788 }
+1 -1
mm/zsmalloc.c
··· 1487 1487 struct size_class *class; 1488 1488 enum fullness_group fullness; 1489 1489 1490 - if (unlikely(!handle)) 1490 + if (IS_ERR_OR_NULL((void *)handle)) 1491 1491 return; 1492 1492 1493 1493 /*