Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'mm-hotfixes-stable-2026-03-23-17-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull MM fixes from Andrew Morton:
"6 hotfixes. 2 are cc:stable. All are for MM.

All are singletons - please see the changelogs for details"

* tag 'mm-hotfixes-stable-2026-03-23-17-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm/damon/stat: monitor all System RAM resources
mm/zswap: add missing kunmap_local()
mailmap: update email address for Muhammad Usama Anjum
zram: do not slot_free() written-back slots
mm/damon/core: avoid use of half-online-committed context
mm/rmap: clear vma->anon_vma on error

+93 -29
+1
.mailmap
··· 587 587 Morten Welinder <welinder@anemone.rentec.com> 588 588 Morten Welinder <welinder@darter.rentec.com> 589 589 Morten Welinder <welinder@troll.com> 590 + Muhammad Usama Anjum <usama.anjum@arm.com> <usama.anjum@collabora.com> 590 591 Mukesh Ojha <quic_mojha@quicinc.com> <mojha@codeaurora.org> 591 592 Muna Sinada <quic_msinada@quicinc.com> <msinada@codeaurora.org> 592 593 Murali Nalajala <quic_mnalajal@quicinc.com> <mnalajal@codeaurora.org>
+14 -25
drivers/block/zram/zram_drv.c
··· 917 917 918 918 static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req) 919 919 { 920 - u32 size, index = req->pps->index; 921 - int err, prio; 922 - bool huge; 920 + u32 index = req->pps->index; 921 + int err; 923 922 924 923 err = blk_status_to_errno(req->bio.bi_status); 925 924 if (err) { ··· 945 946 goto out; 946 947 } 947 948 948 - if (zram->compressed_wb) { 949 - /* 950 - * ZRAM_WB slots get freed, we need to preserve data required 951 - * for read decompression. 952 - */ 953 - size = get_slot_size(zram, index); 954 - prio = get_slot_comp_priority(zram, index); 955 - huge = test_slot_flag(zram, index, ZRAM_HUGE); 956 - } 957 - 958 - slot_free(zram, index); 959 - set_slot_flag(zram, index, ZRAM_WB); 949 + clear_slot_flag(zram, index, ZRAM_IDLE); 950 + if (test_slot_flag(zram, index, ZRAM_HUGE)) 951 + atomic64_dec(&zram->stats.huge_pages); 952 + atomic64_sub(get_slot_size(zram, index), &zram->stats.compr_data_size); 953 + zs_free(zram->mem_pool, get_slot_handle(zram, index)); 960 954 set_slot_handle(zram, index, req->blk_idx); 961 - 962 - if (zram->compressed_wb) { 963 - if (huge) 964 - set_slot_flag(zram, index, ZRAM_HUGE); 965 - set_slot_size(zram, index, size); 966 - set_slot_comp_priority(zram, index, prio); 967 - } 968 - 969 - atomic64_inc(&zram->stats.pages_stored); 955 + set_slot_flag(zram, index, ZRAM_WB); 970 956 971 957 out: 972 958 slot_unlock(zram, index); ··· 1994 2010 set_slot_comp_priority(zram, index, 0); 1995 2011 1996 2012 if (test_slot_flag(zram, index, ZRAM_HUGE)) { 2013 + /* 2014 + * Writeback completion decrements ->huge_pages but keeps 2015 + * ZRAM_HUGE flag for deferred decompression path. 2016 + */ 2017 + if (!test_slot_flag(zram, index, ZRAM_WB)) 2018 + atomic64_dec(&zram->stats.huge_pages); 1997 2019 clear_slot_flag(zram, index, ZRAM_HUGE); 1998 - atomic64_dec(&zram->stats.huge_pages); 1999 2020 } 2000 2021 2001 2022 if (test_slot_flag(zram, index, ZRAM_WB)) {
+6
include/linux/damon.h
··· 810 810 struct damos_walk_control *walk_control; 811 811 struct mutex walk_control_lock; 812 812 813 + /* 814 + * indicate if this may be corrupted. Currentonly this is set only for 815 + * damon_commit_ctx() failure. 816 + */ 817 + bool maybe_corrupted; 818 + 813 819 /* Working thread of the given DAMON context */ 814 820 struct task_struct *kdamond; 815 821 /* Protects @kdamond field access */
+8
mm/damon/core.c
··· 1252 1252 { 1253 1253 int err; 1254 1254 1255 + dst->maybe_corrupted = true; 1255 1256 if (!is_power_of_2(src->min_region_sz)) 1256 1257 return -EINVAL; 1257 1258 ··· 1278 1277 dst->addr_unit = src->addr_unit; 1279 1278 dst->min_region_sz = src->min_region_sz; 1280 1279 1280 + dst->maybe_corrupted = false; 1281 1281 return 0; 1282 1282 } 1283 1283 ··· 2680 2678 complete(&control->completion); 2681 2679 else if (control->canceled && control->dealloc_on_cancel) 2682 2680 kfree(control); 2681 + if (!cancel && ctx->maybe_corrupted) 2682 + break; 2683 2683 } 2684 2684 2685 2685 mutex_lock(&ctx->call_controls_lock); ··· 2711 2707 kdamond_usleep(min_wait_time); 2712 2708 2713 2709 kdamond_call(ctx, false); 2710 + if (ctx->maybe_corrupted) 2711 + return -EINVAL; 2714 2712 damos_walk_cancel(ctx); 2715 2713 } 2716 2714 return -EBUSY; ··· 2796 2790 * kdamond_merge_regions() if possible, to reduce overhead 2797 2791 */ 2798 2792 kdamond_call(ctx, false); 2793 + if (ctx->maybe_corrupted) 2794 + break; 2799 2795 if (!list_empty(&ctx->schemes)) 2800 2796 kdamond_apply_schemes(ctx); 2801 2797 else
+50 -3
mm/damon/stat.c
··· 145 145 return 0; 146 146 } 147 147 148 + struct damon_stat_system_ram_range_walk_arg { 149 + bool walked; 150 + struct resource res; 151 + }; 152 + 153 + static int damon_stat_system_ram_walk_fn(struct resource *res, void *arg) 154 + { 155 + struct damon_stat_system_ram_range_walk_arg *a = arg; 156 + 157 + if (!a->walked) { 158 + a->walked = true; 159 + a->res.start = res->start; 160 + } 161 + a->res.end = res->end; 162 + return 0; 163 + } 164 + 165 + static unsigned long damon_stat_res_to_core_addr(resource_size_t ra, 166 + unsigned long addr_unit) 167 + { 168 + /* 169 + * Use div_u64() for avoiding linking errors related with __udivdi3, 170 + * __aeabi_uldivmod, or similar problems. This should also improve the 171 + * performance optimization (read div_u64() comment for the detail). 172 + */ 173 + if (sizeof(ra) == 8 && sizeof(addr_unit) == 4) 174 + return div_u64(ra, addr_unit); 175 + return ra / addr_unit; 176 + } 177 + 178 + static int damon_stat_set_monitoring_region(struct damon_target *t, 179 + unsigned long addr_unit, unsigned long min_region_sz) 180 + { 181 + struct damon_addr_range addr_range; 182 + struct damon_stat_system_ram_range_walk_arg arg = {}; 183 + 184 + walk_system_ram_res(0, -1, &arg, damon_stat_system_ram_walk_fn); 185 + if (!arg.walked) 186 + return -EINVAL; 187 + addr_range.start = damon_stat_res_to_core_addr( 188 + arg.res.start, addr_unit); 189 + addr_range.end = damon_stat_res_to_core_addr( 190 + arg.res.end + 1, addr_unit); 191 + if (addr_range.end <= addr_range.start) 192 + return -EINVAL; 193 + return damon_set_regions(t, &addr_range, 1, min_region_sz); 194 + } 195 + 148 196 static struct damon_ctx *damon_stat_build_ctx(void) 149 197 { 150 198 struct damon_ctx *ctx; 151 199 struct damon_attrs attrs; 152 200 struct damon_target *target; 153 - unsigned long start = 0, end = 0; 154 201 155 202 ctx = damon_new_ctx(); 156 203 if (!ctx) ··· 227 180 if (!target) 228 181 goto free_out; 229 182 damon_add_target(ctx, target); 230 - if (damon_set_region_biggest_system_ram_default(target, &start, &end, 231 - ctx->min_region_sz)) 183 + if (damon_stat_set_monitoring_region(target, ctx->addr_unit, 184 + ctx->min_region_sz)) 232 185 goto free_out; 233 186 return ctx; 234 187 free_out:
+7
mm/rmap.c
··· 457 457 list_del(&avc->same_vma); 458 458 anon_vma_chain_free(avc); 459 459 } 460 + 461 + /* 462 + * The anon_vma assigned to this VMA is no longer valid, as we were not 463 + * able to correctly clone AVC state. Avoid inconsistent anon_vma tree 464 + * state by resetting. 465 + */ 466 + vma->anon_vma = NULL; 460 467 } 461 468 462 469 /**
+7 -1
mm/zswap.c
··· 942 942 943 943 /* zswap entries of length PAGE_SIZE are not compressed. */ 944 944 if (entry->length == PAGE_SIZE) { 945 + void *dst; 946 + 945 947 WARN_ON_ONCE(input->length != PAGE_SIZE); 946 - memcpy_from_sglist(kmap_local_folio(folio, 0), input, 0, PAGE_SIZE); 948 + 949 + dst = kmap_local_folio(folio, 0); 950 + memcpy_from_sglist(dst, input, 0, PAGE_SIZE); 947 951 dlen = PAGE_SIZE; 952 + kunmap_local(dst); 953 + flush_dcache_folio(folio); 948 954 } else { 949 955 sg_init_table(&output, 1); 950 956 sg_set_folio(&output, folio, PAGE_SIZE, 0);