Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2024-11-16-15-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull hotfixes from Andrew Morton:
"10 hotfixes, 7 of which are cc:stable. All singletons, please see the
changelogs for details"

* tag 'mm-hotfixes-stable-2024-11-16-15-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm: revert "mm: shmem: fix data-race in shmem_getattr()"
ocfs2: uncache inode which has failed entering the group
mm: fix NULL pointer dereference in alloc_pages_bulk_noprof
mm, doc: update read_ahead_kb for MADV_HUGEPAGE
fs/proc/task_mmu: prevent integer overflow in pagemap_scan_get_args()
sched/task_stack: fix object_is_on_stack() for KASAN tagged pointers
crash, powerpc: default to CRASH_DUMP=n on PPC_BOOK3S_32
mm/mremap: fix address wraparound in move_page_tables()
tools/mm: fix compile error
mm, swap: fix allocation and scanning race with swapoff

+62 -10
+3
Documentation/ABI/stable/sysfs-block
··· 594 594 [RW] Maximum number of kilobytes to read-ahead for filesystems 595 595 on this block device. 596 596 597 + For MADV_HUGEPAGE, the readahead size may exceed this setting 598 + since its granularity is based on the hugepage size. 599 + 597 600 598 601 What: /sys/block/<disk>/queue/rotational 599 602 Date: January 2009
+3
arch/arm/Kconfig
··· 1598 1598 config ARCH_SUPPORTS_CRASH_DUMP 1599 1599 def_bool y 1600 1600 1601 + config ARCH_DEFAULT_CRASH_DUMP 1602 + def_bool y 1603 + 1601 1604 config AUTO_ZRELADDR 1602 1605 bool "Auto calculation of the decompressed kernel image address" if !ARCH_MULTIPLATFORM 1603 1606 default !(ARCH_FOOTBRIDGE || ARCH_RPC || ARCH_SA1100)
+3
arch/arm64/Kconfig
··· 1576 1576 config ARCH_SUPPORTS_CRASH_DUMP 1577 1577 def_bool y 1578 1578 1579 + config ARCH_DEFAULT_CRASH_DUMP 1580 + def_bool y 1581 + 1579 1582 config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION 1580 1583 def_bool CRASH_RESERVE 1581 1584
+3
arch/loongarch/Kconfig
··· 604 604 config ARCH_SUPPORTS_CRASH_DUMP 605 605 def_bool y 606 606 607 + config ARCH_DEFAULT_CRASH_DUMP 608 + def_bool y 609 + 607 610 config ARCH_SELECTS_CRASH_DUMP 608 611 def_bool y 609 612 depends on CRASH_DUMP
+3
arch/mips/Kconfig
··· 2876 2876 config ARCH_SUPPORTS_CRASH_DUMP 2877 2877 def_bool y 2878 2878 2879 + config ARCH_DEFAULT_CRASH_DUMP 2880 + def_bool y 2881 + 2879 2882 config PHYSICAL_START 2880 2883 hex "Physical address where the kernel is loaded" 2881 2884 default "0xffffffff84000000"
+4
arch/powerpc/Kconfig
··· 684 684 config ARCH_SUPPORTS_CRASH_DUMP 685 685 def_bool PPC64 || PPC_BOOK3S_32 || PPC_85xx || (44x && !SMP) 686 686 687 + config ARCH_DEFAULT_CRASH_DUMP 688 + bool 689 + default y if !PPC_BOOK3S_32 690 + 687 691 config ARCH_SELECTS_CRASH_DUMP 688 692 def_bool y 689 693 depends on CRASH_DUMP
+3
arch/riscv/Kconfig
··· 898 898 config ARCH_SUPPORTS_CRASH_DUMP 899 899 def_bool y 900 900 901 + config ARCH_DEFAULT_CRASH_DUMP 902 + def_bool y 903 + 901 904 config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION 902 905 def_bool CRASH_RESERVE 903 906
+3
arch/s390/Kconfig
··· 276 276 This option also enables s390 zfcpdump. 277 277 See also <file:Documentation/arch/s390/zfcpdump.rst> 278 278 279 + config ARCH_DEFAULT_CRASH_DUMP 280 + def_bool y 281 + 279 282 menu "Processor type and features" 280 283 281 284 config HAVE_MARCH_Z10_FEATURES
+3
arch/sh/Kconfig
··· 550 550 config ARCH_SUPPORTS_CRASH_DUMP 551 551 def_bool BROKEN_ON_SMP 552 552 553 + config ARCH_DEFAULT_CRASH_DUMP 554 + def_bool y 555 + 553 556 config ARCH_SUPPORTS_KEXEC_JUMP 554 557 def_bool y 555 558
+3
arch/x86/Kconfig
··· 2084 2084 config ARCH_SUPPORTS_CRASH_DUMP 2085 2085 def_bool X86_64 || (X86_32 && HIGHMEM) 2086 2086 2087 + config ARCH_DEFAULT_CRASH_DUMP 2088 + def_bool y 2089 + 2087 2090 config ARCH_SUPPORTS_CRASH_HOTPLUG 2088 2091 def_bool y 2089 2092
+2
fs/ocfs2/resize.c
··· 574 574 ocfs2_commit_trans(osb, handle); 575 575 576 576 out_free_group_bh: 577 + if (ret < 0) 578 + ocfs2_remove_from_cache(INODE_CACHE(inode), group_bh); 577 579 brelse(group_bh); 578 580 579 581 out_unlock:
+3 -1
fs/proc/task_mmu.c
··· 2665 2665 return -EFAULT; 2666 2666 if (!arg->vec && arg->vec_len) 2667 2667 return -EINVAL; 2668 + if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX) 2669 + return -EINVAL; 2668 2670 if (arg->vec && !access_ok((void __user *)(long)arg->vec, 2669 - arg->vec_len * sizeof(struct page_region))) 2671 + size_mul(arg->vec_len, sizeof(struct page_region)))) 2670 2672 return -EFAULT; 2671 2673 2672 2674 /* Fixup default values */
+2
include/linux/sched/task_stack.h
··· 9 9 #include <linux/sched.h> 10 10 #include <linux/magic.h> 11 11 #include <linux/refcount.h> 12 + #include <linux/kasan.h> 12 13 13 14 #ifdef CONFIG_THREAD_INFO_IN_TASK 14 15 ··· 90 89 { 91 90 void *stack = task_stack_page(current); 92 91 92 + obj = kasan_reset_tag(obj); 93 93 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); 94 94 } 95 95
+1 -1
kernel/Kconfig.kexec
··· 97 97 98 98 config CRASH_DUMP 99 99 bool "kernel crash dumps" 100 - default y 100 + default ARCH_DEFAULT_CRASH_DUMP 101 101 depends on ARCH_SUPPORTS_CRASH_DUMP 102 102 depends on KEXEC_CORE 103 103 select VMCORE_INFO
+1 -1
mm/mremap.c
··· 648 648 * Prevent negative return values when {old,new}_addr was realigned 649 649 * but we broke out of the above loop for the first PMD itself. 650 650 */ 651 - if (len + old_addr < old_end) 651 + if (old_addr < old_end - len) 652 652 return 0; 653 653 654 654 return len + old_addr - old_end; /* how much done */
+2 -1
mm/page_alloc.c
··· 4607 4607 gfp = alloc_gfp; 4608 4608 4609 4609 /* Find an allowed local zone that meets the low watermark. */ 4610 - for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { 4610 + z = ac.preferred_zoneref; 4611 + for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { 4611 4612 unsigned long mark; 4612 4613 4613 4614 if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
-2
mm/shmem.c
··· 1166 1166 stat->attributes_mask |= (STATX_ATTR_APPEND | 1167 1167 STATX_ATTR_IMMUTABLE | 1168 1168 STATX_ATTR_NODUMP); 1169 - inode_lock_shared(inode); 1170 1169 generic_fillattr(idmap, request_mask, inode, stat); 1171 - inode_unlock_shared(inode); 1172 1170 1173 1171 if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) 1174 1172 stat->blksize = HPAGE_PMD_SIZE;
+19 -3
mm/swapfile.c
··· 664 664 return true; 665 665 } 666 666 667 - static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, 667 + static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, 668 668 unsigned int start, unsigned char usage, 669 669 unsigned int order) 670 670 { 671 671 unsigned int nr_pages = 1 << order; 672 + 673 + if (!(si->flags & SWP_WRITEOK)) 674 + return false; 672 675 673 676 if (cluster_is_free(ci)) { 674 677 if (nr_pages < SWAPFILE_CLUSTER) { ··· 693 690 list_move_tail(&ci->list, &si->full_clusters); 694 691 ci->flags = CLUSTER_FLAG_FULL; 695 692 } 693 + 694 + return true; 696 695 } 697 696 698 697 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset, ··· 718 713 719 714 while (offset <= end) { 720 715 if (cluster_scan_range(si, ci, offset, nr_pages)) { 721 - cluster_alloc_range(si, ci, offset, usage, order); 716 + if (!cluster_alloc_range(si, ci, offset, usage, order)) { 717 + offset = SWAP_NEXT_INVALID; 718 + goto done; 719 + } 722 720 *foundp = offset; 723 721 if (ci->count == SWAPFILE_CLUSTER) { 724 722 offset = SWAP_NEXT_INVALID; ··· 813 805 if (!list_empty(&si->free_clusters)) { 814 806 ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); 815 807 offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); 816 - VM_BUG_ON(!found); 808 + /* 809 + * Either we didn't touch the cluster due to swapoff, 810 + * or the allocation must success. 811 + */ 812 + VM_BUG_ON((si->flags & SWP_WRITEOK) && !found); 817 813 goto done; 818 814 } 819 815 ··· 1053 1041 1054 1042 VM_BUG_ON(!si->cluster_info); 1055 1043 1044 + si->flags += SWP_SCANNING; 1045 + 1056 1046 while (n_ret < nr) { 1057 1047 unsigned long offset = cluster_alloc_swap_entry(si, order, usage); 1058 1048 ··· 1062 1048 break; 1063 1049 slots[n_ret++] = swp_entry(si->type, offset); 1064 1050 } 1051 + 1052 + si->flags -= SWP_SCANNING; 1065 1053 1066 1054 return n_ret; 1067 1055 }
+1 -1
tools/mm/page-types.c
··· 420 420 if (opt_file) 421 421 printf("%lx\t", voffset); 422 422 if (opt_list_cgroup) 423 - printf("@%" PRIu64 "\t", cgroup) 423 + printf("@%" PRIu64 "\t", cgroup); 424 424 if (opt_list_mapcnt) 425 425 printf("%" PRIu64 "\t", mapcnt); 426 426