Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

ftrace: Do not over-allocate ftrace memory

The pg_remaining calculation in ftrace_process_locs() assumes that
ENTRIES_PER_PAGE multiplied by 2^order equals the actual capacity of the
allocated page group. However, ENTRIES_PER_PAGE is PAGE_SIZE / ENTRY_SIZE
(integer division). When PAGE_SIZE is not a multiple of ENTRY_SIZE (e.g.
4096 / 24 = 170 with remainder 16), high-order allocations (like 256 pages)
have significantly more capacity than 256 * 170. This leads to pg_remaining
being underestimated, which in turn makes skip (derived from skipped -
pg_remaining) larger than expected, causing the WARN(skip != remaining)
to trigger.

Extra allocated pages for ftrace: 2 with 654 skipped
WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:7295 ftrace_process_locs+0x5bf/0x5e0

A similar problem in ftrace_allocate_records() can result in allocating
too many pages. This can trigger the second warning in
ftrace_process_locs().

Extra allocated pages for ftrace
WARNING: CPU: 0 PID: 0 at kernel/trace/ftrace.c:7276 ftrace_process_locs+0x548/0x580

Use the actual capacity of a page group to determine the number of pages
to allocate. Have ftrace_allocate_pages() return the number of allocated
pages to avoid having to calculate it. Use the actual page group capacity
when validating the number of unused pages due to skipped entries.
Drop the definition of ENTRIES_PER_PAGE since it is no longer used.

Cc: stable@vger.kernel.org
Fixes: 4a3efc6baff93 ("ftrace: Update the mcount_loc check of skipped entries")
Link: https://patch.msgid.link/20260113152243.3557219-1-linux@roeck-us.net
Signed-off-by: Guenter Roeck <linux@roeck-us.net>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>

authored by

Guenter Roeck and committed by
Steven Rostedt (Google)
be55257f 0f61b186

+15 -14
+15 -14
kernel/trace/ftrace.c
··· 1148 1148 }; 1149 1149 1150 1150 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1151 - #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1152 1151 1153 1152 static struct ftrace_page *ftrace_pages_start; 1154 1153 static struct ftrace_page *ftrace_pages; ··· 3833 3834 return 0; 3834 3835 } 3835 3836 3836 - static int ftrace_allocate_records(struct ftrace_page *pg, int count) 3837 + static int ftrace_allocate_records(struct ftrace_page *pg, int count, 3838 + unsigned long *num_pages) 3837 3839 { 3838 3840 int order; 3839 3841 int pages; ··· 3844 3844 return -EINVAL; 3845 3845 3846 3846 /* We want to fill as much as possible, with no empty pages */ 3847 - pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); 3847 + pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE); 3848 3848 order = fls(pages) - 1; 3849 3849 3850 3850 again: ··· 3859 3859 } 3860 3860 3861 3861 ftrace_number_of_pages += 1 << order; 3862 + *num_pages += 1 << order; 3862 3863 ftrace_number_of_groups++; 3863 3864 3864 3865 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; ··· 3888 3887 } 3889 3888 3890 3889 static struct ftrace_page * 3891 - ftrace_allocate_pages(unsigned long num_to_init) 3890 + ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages) 3892 3891 { 3893 3892 struct ftrace_page *start_pg; 3894 3893 struct ftrace_page *pg; 3895 3894 int cnt; 3895 + 3896 + *num_pages = 0; 3896 3897 3897 3898 if (!num_to_init) 3898 3899 return NULL; ··· 3909 3906 * waste as little space as possible. 3910 3907 */ 3911 3908 for (;;) { 3912 - cnt = ftrace_allocate_records(pg, num_to_init); 3909 + cnt = ftrace_allocate_records(pg, num_to_init, num_pages); 3913 3910 if (cnt < 0) 3914 3911 goto free_pages; 3915 3912 ··· 7195 7192 if (!count) 7196 7193 return 0; 7197 7194 7198 - pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); 7199 - 7200 7195 /* 7201 7196 * Sorting mcount in vmlinux at build time depend on 7202 7197 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in ··· 7207 7206 test_is_sorted(start, count); 7208 7207 } 7209 7208 7210 - start_pg = ftrace_allocate_pages(count); 7209 + start_pg = ftrace_allocate_pages(count, &pages); 7211 7210 if (!start_pg) 7212 7211 return -ENOMEM; 7213 7212 ··· 7306 7305 /* We should have used all pages unless we skipped some */ 7307 7306 if (pg_unuse) { 7308 7307 unsigned long pg_remaining, remaining = 0; 7309 - unsigned long skip; 7308 + long skip; 7310 7309 7311 7310 /* Count the number of entries unused and compare it to skipped. */ 7312 - pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index; 7311 + pg_remaining = (PAGE_SIZE << pg->order) / ENTRY_SIZE - pg->index; 7313 7312 7314 7313 if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) { 7315 7314 7316 7315 skip = skipped - pg_remaining; 7317 7316 7318 - for (pg = pg_unuse; pg; pg = pg->next) 7317 + for (pg = pg_unuse; pg && skip > 0; pg = pg->next) { 7319 7318 remaining += 1 << pg->order; 7319 + skip -= (PAGE_SIZE << pg->order) / ENTRY_SIZE; 7320 + } 7320 7321 7321 7322 pages -= remaining; 7322 - 7323 - skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE); 7324 7323 7325 7324 /* 7326 7325 * Check to see if the number of pages remaining would 7327 7326 * just fit the number of entries skipped. 7328 7327 */ 7329 - WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped", 7328 + WARN(pg || skip > 0, "Extra allocated pages for ftrace: %lu with %lu skipped", 7330 7329 remaining, skipped); 7331 7330 } 7332 7331 /* Need to synchronize with ftrace_location_range() */