Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'ftrace-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull ftrace fix from Steven Rostedt:

- Fix allocation accounting on boot up

The ftrace records for each function that ftrace can attach to is
done in a group of pages. At boot up, the number of pages are
calculated and allocated. After that, the pages are filled with data.
It may allocate more than needed due to some functions not being
recorded (because they are unused weak functions), this too is
recorded.

After the data is filled in, a check is made to make sure the right
number of pages were allocated. But this was off due to the
assumption that the same number of entries fit per every page.
Because the size of an entry does not evenly divide into PAGE_SIZE,
there is a rounding error when a large number of pages is allocated
to hold the events. This causes the check to fail and triggers a
warning.

Fix the accounting by finding out how many pages are actually
allocated from the functions that allocate them and use that to see
if all the pages allocated were used and the ones not used are
properly freed.

* tag 'ftrace-v6.19-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
ftrace: Do not over-allocate ftrace memory

+15 -14
+15 -14
kernel/trace/ftrace.c
··· 1148 1148 }; 1149 1149 1150 1150 #define ENTRY_SIZE sizeof(struct dyn_ftrace) 1151 - #define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE) 1152 1151 1153 1152 static struct ftrace_page *ftrace_pages_start; 1154 1153 static struct ftrace_page *ftrace_pages; ··· 3833 3834 return 0; 3834 3835 } 3835 3836 3836 - static int ftrace_allocate_records(struct ftrace_page *pg, int count) 3837 + static int ftrace_allocate_records(struct ftrace_page *pg, int count, 3838 + unsigned long *num_pages) 3837 3839 { 3838 3840 int order; 3839 3841 int pages; ··· 3844 3844 return -EINVAL; 3845 3845 3846 3846 /* We want to fill as much as possible, with no empty pages */ 3847 - pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); 3847 + pages = DIV_ROUND_UP(count * ENTRY_SIZE, PAGE_SIZE); 3848 3848 order = fls(pages) - 1; 3849 3849 3850 3850 again: ··· 3859 3859 } 3860 3860 3861 3861 ftrace_number_of_pages += 1 << order; 3862 + *num_pages += 1 << order; 3862 3863 ftrace_number_of_groups++; 3863 3864 3864 3865 cnt = (PAGE_SIZE << order) / ENTRY_SIZE; ··· 3888 3887 } 3889 3888 3890 3889 static struct ftrace_page * 3891 - ftrace_allocate_pages(unsigned long num_to_init) 3890 + ftrace_allocate_pages(unsigned long num_to_init, unsigned long *num_pages) 3892 3891 { 3893 3892 struct ftrace_page *start_pg; 3894 3893 struct ftrace_page *pg; 3895 3894 int cnt; 3895 + 3896 + *num_pages = 0; 3896 3897 3897 3898 if (!num_to_init) 3898 3899 return NULL; ··· 3909 3906 * waste as little space as possible. 3910 3907 */ 3911 3908 for (;;) { 3912 - cnt = ftrace_allocate_records(pg, num_to_init); 3909 + cnt = ftrace_allocate_records(pg, num_to_init, num_pages); 3913 3910 if (cnt < 0) 3914 3911 goto free_pages; 3915 3912 ··· 7195 7192 if (!count) 7196 7193 return 0; 7197 7194 7198 - pages = DIV_ROUND_UP(count, ENTRIES_PER_PAGE); 7199 - 7200 7195 /* 7201 7196 * Sorting mcount in vmlinux at build time depend on 7202 7197 * CONFIG_BUILDTIME_MCOUNT_SORT, while mcount loc in ··· 7207 7206 test_is_sorted(start, count); 7208 7207 } 7209 7208 7210 - start_pg = ftrace_allocate_pages(count); 7209 + start_pg = ftrace_allocate_pages(count, &pages); 7211 7210 if (!start_pg) 7212 7211 return -ENOMEM; 7213 7212 ··· 7306 7305 /* We should have used all pages unless we skipped some */ 7307 7306 if (pg_unuse) { 7308 7307 unsigned long pg_remaining, remaining = 0; 7309 - unsigned long skip; 7308 + long skip; 7310 7309 7311 7310 /* Count the number of entries unused and compare it to skipped. */ 7312 - pg_remaining = (ENTRIES_PER_PAGE << pg->order) - pg->index; 7311 + pg_remaining = (PAGE_SIZE << pg->order) / ENTRY_SIZE - pg->index; 7313 7312 7314 7313 if (!WARN(skipped < pg_remaining, "Extra allocated pages for ftrace")) { 7315 7314 7316 7315 skip = skipped - pg_remaining; 7317 7316 7318 - for (pg = pg_unuse; pg; pg = pg->next) 7317 + for (pg = pg_unuse; pg && skip > 0; pg = pg->next) { 7319 7318 remaining += 1 << pg->order; 7319 + skip -= (PAGE_SIZE << pg->order) / ENTRY_SIZE; 7320 + } 7320 7321 7321 7322 pages -= remaining; 7322 - 7323 - skip = DIV_ROUND_UP(skip, ENTRIES_PER_PAGE); 7324 7323 7325 7324 /* 7326 7325 * Check to see if the number of pages remaining would 7327 7326 * just fit the number of entries skipped. 7328 7327 */ 7329 - WARN(skip != remaining, "Extra allocated pages for ftrace: %lu with %lu skipped", 7328 + WARN(pg || skip > 0, "Extra allocated pages for ftrace: %lu with %lu skipped", 7330 7329 remaining, skipped); 7331 7330 } 7332 7331 /* Need to synchronize with ftrace_location_range() */