Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

mm/damon: rename min_sz_region of damon_ctx to min_region_sz

'min_sz_region' field of 'struct damon_ctx' represents the minimum size of
each DAMON region for the context. 'struct damos_access_pattern' has a
field of the same name. It confuses readers and makes 'grep' less optimal
for them. Rename it to 'min_region_sz'.

Link: https://lkml.kernel.org/r/20260117175256.82826-9-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

SeongJae Park and committed by
Andrew Morton
cc1db8df dfb1b0c9

+49 -47
+4 -4
include/linux/damon.h
··· 773 773 * 774 774 * @ops: Set of monitoring operations for given use cases. 775 775 * @addr_unit: Scale factor for core to ops address conversion. 776 - * @min_sz_region: Minimum region size. 776 + * @min_region_sz: Minimum region size. 777 777 * @adaptive_targets: Head of monitoring targets (&damon_target) list. 778 778 * @schemes: Head of schemes (&damos) list. 779 779 */ ··· 818 818 /* public: */ 819 819 struct damon_operations ops; 820 820 unsigned long addr_unit; 821 - unsigned long min_sz_region; 821 + unsigned long min_region_sz; 822 822 823 823 struct list_head adaptive_targets; 824 824 struct list_head schemes; ··· 907 907 void damon_add_region(struct damon_region *r, struct damon_target *t); 908 908 void damon_destroy_region(struct damon_region *r, struct damon_target *t); 909 909 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 910 - unsigned int nr_ranges, unsigned long min_sz_region); 910 + unsigned int nr_ranges, unsigned long min_region_sz); 911 911 void damon_update_region_access_rate(struct damon_region *r, bool accessed, 912 912 struct damon_attrs *attrs); 913 913 ··· 975 975 976 976 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 977 977 unsigned long *start, unsigned long *end, 978 - unsigned long min_sz_region); 978 + unsigned long min_region_sz); 979 979 980 980 #endif /* CONFIG_DAMON */ 981 981
+35 -34
mm/damon/core.c
··· 203 203 * @t: the given target. 204 204 * @ranges: array of new monitoring target ranges. 205 205 * @nr_ranges: length of @ranges. 206 - * @min_sz_region: minimum region size. 206 + * @min_region_sz: minimum region size. 207 207 * 208 208 * This function adds new regions to, or modify existing regions of a 209 209 * monitoring target to fit in specific ranges. ··· 211 211 * Return: 0 if success, or negative error code otherwise. 212 212 */ 213 213 int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, 214 - unsigned int nr_ranges, unsigned long min_sz_region) 214 + unsigned int nr_ranges, unsigned long min_region_sz) 215 215 { 216 216 struct damon_region *r, *next; 217 217 unsigned int i; ··· 248 248 /* no region intersects with this range */ 249 249 newr = damon_new_region( 250 250 ALIGN_DOWN(range->start, 251 - min_sz_region), 252 - ALIGN(range->end, min_sz_region)); 251 + min_region_sz), 252 + ALIGN(range->end, min_region_sz)); 253 253 if (!newr) 254 254 return -ENOMEM; 255 255 damon_insert_region(newr, damon_prev_region(r), r, t); 256 256 } else { 257 257 /* resize intersecting regions to fit in this range */ 258 258 first->ar.start = ALIGN_DOWN(range->start, 259 - min_sz_region); 260 - last->ar.end = ALIGN(range->end, min_sz_region); 259 + min_region_sz); 260 + last->ar.end = ALIGN(range->end, min_region_sz); 261 261 262 262 /* fill possible holes in the range */ 263 263 err = damon_fill_regions_holes(first, last, t); ··· 553 553 ctx->attrs.max_nr_regions = 1000; 554 554 555 555 ctx->addr_unit = 1; 556 - ctx->min_sz_region = DAMON_MIN_REGION_SZ; 556 + ctx->min_region_sz = DAMON_MIN_REGION_SZ; 557 557 558 558 INIT_LIST_HEAD(&ctx->adaptive_targets); 559 559 INIT_LIST_HEAD(&ctx->schemes); ··· 1142 1142 * If @src has no region, @dst keeps current regions. 1143 1143 */ 1144 1144 static int damon_commit_target_regions(struct damon_target *dst, 1145 - struct damon_target *src, unsigned long src_min_sz_region) 1145 + struct damon_target *src, unsigned long src_min_region_sz) 1146 1146 { 1147 1147 struct damon_region *src_region; 1148 1148 struct damon_addr_range *ranges; ··· 1159 1159 i = 0; 1160 1160 damon_for_each_region(src_region, src) 1161 1161 ranges[i++] = src_region->ar; 1162 - err = damon_set_regions(dst, ranges, i, src_min_sz_region); 1162 + err = damon_set_regions(dst, ranges, i, src_min_region_sz); 1163 1163 kfree(ranges); 1164 1164 return err; 1165 1165 } ··· 1167 1167 static int damon_commit_target( 1168 1168 struct damon_target *dst, bool dst_has_pid, 1169 1169 struct damon_target *src, bool src_has_pid, 1170 - unsigned long src_min_sz_region) 1170 + unsigned long src_min_region_sz) 1171 1171 { 1172 1172 int err; 1173 1173 1174 - err = damon_commit_target_regions(dst, src, src_min_sz_region); 1174 + err = damon_commit_target_regions(dst, src, src_min_region_sz); 1175 1175 if (err) 1176 1176 return err; 1177 1177 if (dst_has_pid) ··· 1198 1198 err = damon_commit_target( 1199 1199 dst_target, damon_target_has_pid(dst), 1200 1200 src_target, damon_target_has_pid(src), 1201 - src->min_sz_region); 1201 + src->min_region_sz); 1202 1202 if (err) 1203 1203 return err; 1204 1204 } else { ··· 1225 1225 return -ENOMEM; 1226 1226 err = damon_commit_target(new_target, false, 1227 1227 src_target, damon_target_has_pid(src), 1228 - src->min_sz_region); 1228 + src->min_region_sz); 1229 1229 if (err) { 1230 1230 damon_destroy_target(new_target, NULL); 1231 1231 return err; ··· 1272 1272 } 1273 1273 dst->ops = src->ops; 1274 1274 dst->addr_unit = src->addr_unit; 1275 - dst->min_sz_region = src->min_sz_region; 1275 + dst->min_region_sz = src->min_region_sz; 1276 1276 1277 1277 return 0; 1278 1278 } ··· 1305 1305 1306 1306 if (ctx->attrs.min_nr_regions) 1307 1307 sz /= ctx->attrs.min_nr_regions; 1308 - if (sz < ctx->min_sz_region) 1309 - sz = ctx->min_sz_region; 1308 + if (sz < ctx->min_region_sz) 1309 + sz = ctx->min_region_sz; 1310 1310 1311 1311 return sz; 1312 1312 } ··· 1696 1696 * @t: The target of the region. 1697 1697 * @rp: The pointer to the region. 1698 1698 * @s: The scheme to be applied. 1699 - * @min_sz_region: minimum region size. 1699 + * @min_region_sz: minimum region size. 1700 1700 * 1701 1701 * If a quota of a scheme has exceeded in a quota charge window, the scheme's 1702 1702 * action would applied to only a part of the target access pattern fulfilling ··· 1714 1714 * Return: true if the region should be entirely skipped, false otherwise. 1715 1715 */ 1716 1716 static bool damos_skip_charged_region(struct damon_target *t, 1717 - struct damon_region **rp, struct damos *s, unsigned long min_sz_region) 1717 + struct damon_region **rp, struct damos *s, 1718 + unsigned long min_region_sz) 1718 1719 { 1719 1720 struct damon_region *r = *rp; 1720 1721 struct damos_quota *quota = &s->quota; ··· 1737 1736 if (quota->charge_addr_from && r->ar.start < 1738 1737 quota->charge_addr_from) { 1739 1738 sz_to_skip = ALIGN_DOWN(quota->charge_addr_from - 1740 - r->ar.start, min_sz_region); 1739 + r->ar.start, min_region_sz); 1741 1740 if (!sz_to_skip) { 1742 - if (damon_sz_region(r) <= min_sz_region) 1741 + if (damon_sz_region(r) <= min_region_sz) 1743 1742 return true; 1744 - sz_to_skip = min_sz_region; 1743 + sz_to_skip = min_region_sz; 1745 1744 } 1746 1745 damon_split_region_at(t, r, sz_to_skip); 1747 1746 r = damon_next_region(r); ··· 1767 1766 1768 1767 static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, 1769 1768 struct damon_region *r, struct damos_filter *filter, 1770 - unsigned long min_sz_region) 1769 + unsigned long min_region_sz) 1771 1770 { 1772 1771 bool matched = false; 1773 1772 struct damon_target *ti; ··· 1784 1783 matched = target_idx == filter->target_idx; 1785 1784 break; 1786 1785 case DAMOS_FILTER_TYPE_ADDR: 1787 - start = ALIGN_DOWN(filter->addr_range.start, min_sz_region); 1788 - end = ALIGN_DOWN(filter->addr_range.end, min_sz_region); 1786 + start = ALIGN_DOWN(filter->addr_range.start, min_region_sz); 1787 + end = ALIGN_DOWN(filter->addr_range.end, min_region_sz); 1789 1788 1790 1789 /* inside the range */ 1791 1790 if (start <= r->ar.start && r->ar.end <= end) { ··· 1821 1820 1822 1821 s->core_filters_allowed = false; 1823 1822 damos_for_each_core_filter(filter, s) { 1824 - if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) { 1823 + if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) { 1825 1824 if (filter->allow) 1826 1825 s->core_filters_allowed = true; 1827 1826 return !filter->allow; ··· 1956 1955 if (c->ops.apply_scheme) { 1957 1956 if (quota->esz && quota->charged_sz + sz > quota->esz) { 1958 1957 sz = ALIGN_DOWN(quota->esz - quota->charged_sz, 1959 - c->min_sz_region); 1958 + c->min_region_sz); 1960 1959 if (!sz) 1961 1960 goto update_stat; 1962 1961 damon_split_region_at(t, r, sz); ··· 2004 2003 if (quota->esz && quota->charged_sz >= quota->esz) 2005 2004 continue; 2006 2005 2007 - if (damos_skip_charged_region(t, &r, s, c->min_sz_region)) 2006 + if (damos_skip_charged_region(t, &r, s, c->min_region_sz)) 2008 2007 continue; 2009 2008 2010 2009 if (s->max_nr_snapshots && ··· 2497 2496 2498 2497 /* Split every region in the given target into 'nr_subs' regions */ 2499 2498 static void damon_split_regions_of(struct damon_target *t, int nr_subs, 2500 - unsigned long min_sz_region) 2499 + unsigned long min_region_sz) 2501 2500 { 2502 2501 struct damon_region *r, *next; 2503 2502 unsigned long sz_region, sz_sub = 0; ··· 2507 2506 sz_region = damon_sz_region(r); 2508 2507 2509 2508 for (i = 0; i < nr_subs - 1 && 2510 - sz_region > 2 * min_sz_region; i++) { 2509 + sz_region > 2 * min_region_sz; i++) { 2511 2510 /* 2512 2511 * Randomly select size of left sub-region to be at 2513 2512 * least 10 percent and at most 90% of original region 2514 2513 */ 2515 2514 sz_sub = ALIGN_DOWN(damon_rand(1, 10) * 2516 - sz_region / 10, min_sz_region); 2515 + sz_region / 10, min_region_sz); 2517 2516 /* Do not allow blank region */ 2518 2517 if (sz_sub == 0 || sz_sub >= sz_region) 2519 2518 continue; ··· 2553 2552 nr_subregions = 3; 2554 2553 2555 2554 damon_for_each_target(t, ctx) 2556 - damon_split_regions_of(t, nr_subregions, ctx->min_sz_region); 2555 + damon_split_regions_of(t, nr_subregions, ctx->min_region_sz); 2557 2556 2558 2557 last_nr_regions = nr_regions; 2559 2558 } ··· 2903 2902 * @t: The monitoring target to set the region. 2904 2903 * @start: The pointer to the start address of the region. 2905 2904 * @end: The pointer to the end address of the region. 2906 - * @min_sz_region: Minimum region size. 2905 + * @min_region_sz: Minimum region size. 2907 2906 * 2908 2907 * This function sets the region of @t as requested by @start and @end. If the 2909 2908 * values of @start and @end are zero, however, this function finds the biggest ··· 2915 2914 */ 2916 2915 int damon_set_region_biggest_system_ram_default(struct damon_target *t, 2917 2916 unsigned long *start, unsigned long *end, 2918 - unsigned long min_sz_region) 2917 + unsigned long min_region_sz) 2919 2918 { 2920 2919 struct damon_addr_range addr_range; 2921 2920 ··· 2928 2927 2929 2928 addr_range.start = *start; 2930 2929 addr_range.end = *end; 2931 - return damon_set_regions(t, &addr_range, 1, min_sz_region); 2930 + return damon_set_regions(t, &addr_range, 1, min_region_sz); 2932 2931 } 2933 2932 2934 2933 /*
+2 -2
mm/damon/lru_sort.c
··· 298 298 if (!monitor_region_start && !monitor_region_end) 299 299 addr_unit = 1; 300 300 param_ctx->addr_unit = addr_unit; 301 - param_ctx->min_sz_region = max(DAMON_MIN_REGION_SZ / addr_unit, 1); 301 + param_ctx->min_region_sz = max(DAMON_MIN_REGION_SZ / addr_unit, 1); 302 302 303 303 if (!damon_lru_sort_mon_attrs.sample_interval) { 304 304 err = -EINVAL; ··· 345 345 err = damon_set_region_biggest_system_ram_default(param_target, 346 346 &monitor_region_start, 347 347 &monitor_region_end, 348 - param_ctx->min_sz_region); 348 + param_ctx->min_region_sz); 349 349 if (err) 350 350 goto out; 351 351 err = damon_commit_ctx(ctx, param_ctx);
+2 -2
mm/damon/reclaim.c
··· 208 208 if (!monitor_region_start && !monitor_region_end) 209 209 addr_unit = 1; 210 210 param_ctx->addr_unit = addr_unit; 211 - param_ctx->min_sz_region = max(DAMON_MIN_REGION_SZ / addr_unit, 1); 211 + param_ctx->min_region_sz = max(DAMON_MIN_REGION_SZ / addr_unit, 1); 212 212 213 213 if (!damon_reclaim_mon_attrs.aggr_interval) { 214 214 err = -EINVAL; ··· 251 251 err = damon_set_region_biggest_system_ram_default(param_target, 252 252 &monitor_region_start, 253 253 &monitor_region_end, 254 - param_ctx->min_sz_region); 254 + param_ctx->min_region_sz); 255 255 if (err) 256 256 goto out; 257 257 err = damon_commit_ctx(ctx, param_ctx);
+1 -1
mm/damon/stat.c
··· 181 181 goto free_out; 182 182 damon_add_target(ctx, target); 183 183 if (damon_set_region_biggest_system_ram_default(target, &start, &end, 184 - ctx->min_sz_region)) 184 + ctx->min_region_sz)) 185 185 goto free_out; 186 186 return ctx; 187 187 free_out:
+5 -4
mm/damon/sysfs.c
··· 1365 1365 1366 1366 static int damon_sysfs_set_regions(struct damon_target *t, 1367 1367 struct damon_sysfs_regions *sysfs_regions, 1368 - unsigned long min_sz_region) 1368 + unsigned long min_region_sz) 1369 1369 { 1370 1370 struct damon_addr_range *ranges = kmalloc_array(sysfs_regions->nr, 1371 1371 sizeof(*ranges), GFP_KERNEL | __GFP_NOWARN); ··· 1387 1387 if (ranges[i - 1].end > ranges[i].start) 1388 1388 goto out; 1389 1389 } 1390 - err = damon_set_regions(t, ranges, sysfs_regions->nr, min_sz_region); 1390 + err = damon_set_regions(t, ranges, sysfs_regions->nr, min_region_sz); 1391 1391 out: 1392 1392 kfree(ranges); 1393 1393 return err; ··· 1409 1409 return -EINVAL; 1410 1410 } 1411 1411 t->obsolete = sys_target->obsolete; 1412 - return damon_sysfs_set_regions(t, sys_target->regions, ctx->min_sz_region); 1412 + return damon_sysfs_set_regions(t, sys_target->regions, 1413 + ctx->min_region_sz); 1413 1414 } 1414 1415 1415 1416 static int damon_sysfs_add_targets(struct damon_ctx *ctx, ··· 1470 1469 ctx->addr_unit = sys_ctx->addr_unit; 1471 1470 /* addr_unit is respected by only DAMON_OPS_PADDR */ 1472 1471 if (sys_ctx->ops_id == DAMON_OPS_PADDR) 1473 - ctx->min_sz_region = max( 1472 + ctx->min_region_sz = max( 1474 1473 DAMON_MIN_REGION_SZ / sys_ctx->addr_unit, 1); 1475 1474 err = damon_sysfs_set_attrs(ctx, sys_ctx->attrs); 1476 1475 if (err)