Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'trace-v7.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing fixes from Steven Rostedt:

- Fix possible dereference of uninitialized pointer

When validating the persistent ring buffer on boot up, if the first
validation fails, a reference to "head_page" is performed in the
error path, but it skips over the initialization of that variable.
Move the initialization before the first validation check.

- Fix use of event length in validation of persistent ring buffer

On boot up, the persistent ring buffer is checked to see if it is
valid by several methods. One being to walk all the events in the
memory location to make sure they are all valid. The length of the
event is used to move to the next event. This length is determined by
the data in the buffer. If that length is corrupted, it could
possibly make the next event to check located at a bad memory
location.

Validate the length field of the event when doing the event walk.

- Fix function graph on archs that do not support use of ftrace_ops

When an architecture defines HAVE_DYNAMIC_FTRACE_WITH_ARGS, it means
that its function graph tracer uses the ftrace_ops of the function
tracer to call its callbacks. This allows a single registered
callback to be called directly instead of checking the callback's
meta data's hash entries against the function being traced.

For architectures that do not support this feature, it must always
call the loop function that tests each registered callback (even if
there's only one). The loop function tests each callback's meta data
against its hash of functions and will call its callback if the
function being traced is in its hash map.

The issue was that there was no check against this and the direct
function was being called even if the architecture didn't support it.
This meant that if function tracing was enabled at the same time as a
callback was registered with the function graph tracer, its callback
would be called for every function that the function tracer also
traced, even if the callback's meta data only wanted to be called
back for a small subset of functions.

Prevent the direct calling for those architectures that do not
support it.

- Fix references to trace_event_file for hist files

The hist files used event_file_data() to get a reference to the
associated trace_event_file the histogram was attached to. This would
return a pointer even if the trace_event_file is about to be freed
(via RCU). Instead it should use the event_file_file() helper that
returns NULL if the trace_event_file is marked to be freed so that no
new references are added to it.

- Wake up hist poll readers when an event is being freed

When polling on a hist file, the task is only awoken when a hist
trigger is triggered. This means that if an event is being freed
while there's a task waiting on its hist file, it will need to wait
until the hist trigger occurs to wake it up and allow the freeing to
happen. Note, the event will not be completely freed until all
references are removed, and a hist poller keeps a reference. But it
should still be woken when the event is being freed.

* tag 'trace-v7.0-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
tracing: Wake up poll waiters for hist files when removing an event
tracing: Fix checking of freed trace_event_file for hist files
fgraph: Do not call handlers direct when not using ftrace_ops
tracing: ring-buffer: Fix to check event length before using
ring-buffer: Fix possible dereference of uninitialized pointer

+38 -8
+10 -3
include/linux/ftrace.h
··· 1092 1092 1093 1093 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 1094 1094 #ifndef ftrace_graph_func 1095 - #define ftrace_graph_func ftrace_stub 1096 - #define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB 1095 + # define ftrace_graph_func ftrace_stub 1096 + # define FTRACE_OPS_GRAPH_STUB FTRACE_OPS_FL_STUB 1097 + /* 1098 + * The function graph is called every time the function tracer is called. 1099 + * It must always test the ops hash and cannot just directly call 1100 + * the handler. 1101 + */ 1102 + # define FGRAPH_NO_DIRECT 1 1097 1103 #else 1098 - #define FTRACE_OPS_GRAPH_STUB 0 1104 + # define FTRACE_OPS_GRAPH_STUB 0 1105 + # define FGRAPH_NO_DIRECT 0 1099 1106 #endif 1100 1107 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1101 1108
+5
include/linux/trace_events.h
··· 683 683 684 684 #define hist_poll_wait(file, wait) \ 685 685 poll_wait(file, &hist_poll_wq, wait) 686 + 687 + #else 688 + static inline void hist_poll_wakeup(void) 689 + { 690 + } 686 691 #endif 687 692 688 693 #define __TRACE_EVENT_FLAGS(name, value) \
+11 -1
kernel/trace/fgraph.c
··· 539 539 static struct fgraph_ops *fgraph_direct_gops = &fgraph_stub; 540 540 DEFINE_STATIC_CALL(fgraph_func, ftrace_graph_entry_stub); 541 541 DEFINE_STATIC_CALL(fgraph_retfunc, ftrace_graph_ret_stub); 542 + #if FGRAPH_NO_DIRECT 543 + static DEFINE_STATIC_KEY_FALSE(fgraph_do_direct); 544 + #else 542 545 static DEFINE_STATIC_KEY_TRUE(fgraph_do_direct); 546 + #endif 543 547 544 548 /** 545 549 * ftrace_graph_stop - set to permanently disable function graph tracing ··· 847 843 bitmap = get_bitmap_bits(current, offset); 848 844 849 845 #ifdef CONFIG_HAVE_STATIC_CALL 850 - if (static_branch_likely(&fgraph_do_direct)) { 846 + if (!FGRAPH_NO_DIRECT && static_branch_likely(&fgraph_do_direct)) { 851 847 if (test_bit(fgraph_direct_gops->idx, &bitmap)) 852 848 static_call(fgraph_retfunc)(&trace, fgraph_direct_gops, fregs); 853 849 } else ··· 1289 1285 trace_func_graph_ret_t retfunc = NULL; 1290 1286 int i; 1291 1287 1288 + if (FGRAPH_NO_DIRECT) 1289 + return; 1290 + 1292 1291 if (gops) { 1293 1292 func = gops->entryfunc; 1294 1293 retfunc = gops->retfunc; ··· 1315 1308 1316 1309 static void ftrace_graph_disable_direct(bool disable_branch) 1317 1310 { 1311 + if (FGRAPH_NO_DIRECT) 1312 + return; 1313 + 1318 1314 if (disable_branch) 1319 1315 static_branch_disable(&fgraph_do_direct); 1320 1316 static_call_update(fgraph_func, ftrace_graph_entry_stub);
+7 -2
kernel/trace/ring_buffer.c
··· 1849 1849 struct ring_buffer_event *event; 1850 1850 u64 ts, delta; 1851 1851 int events = 0; 1852 + int len; 1852 1853 int e; 1853 1854 1854 1855 *delta_ptr = 0; ··· 1857 1856 1858 1857 ts = dpage->time_stamp; 1859 1858 1860 - for (e = 0; e < tail; e += rb_event_length(event)) { 1859 + for (e = 0; e < tail; e += len) { 1861 1860 1862 1861 event = (struct ring_buffer_event *)(dpage->data + e); 1862 + len = rb_event_length(event); 1863 + if (len <= 0 || len > tail - e) 1864 + return -1; 1863 1865 1864 1866 switch (event->type_len) { 1865 1867 ··· 1923 1919 if (!meta || !meta->head_buffer) 1924 1920 return; 1925 1921 1922 + orig_head = head_page = cpu_buffer->head_page; 1923 + 1926 1924 /* Do the reader page first */ 1927 1925 ret = rb_validate_buffer(cpu_buffer->reader_page->page, cpu_buffer->cpu); 1928 1926 if (ret < 0) { ··· 1935 1929 entry_bytes += local_read(&cpu_buffer->reader_page->page->commit); 1936 1930 local_set(&cpu_buffer->reader_page->entries, ret); 1937 1931 1938 - orig_head = head_page = cpu_buffer->head_page; 1939 1932 ts = head_page->page->time_stamp; 1940 1933 1941 1934 /*
+3
kernel/trace/trace_events.c
··· 1311 1311 free_event_filter(file->filter); 1312 1312 file->flags |= EVENT_FILE_FL_FREED; 1313 1313 event_file_put(file); 1314 + 1315 + /* Wake up hist poll waiters to notice the EVENT_FILE_FL_FREED flag. */ 1316 + hist_poll_wakeup(); 1314 1317 } 1315 1318 1316 1319 /*
+2 -2
kernel/trace/trace_events_hist.c
··· 5784 5784 5785 5785 guard(mutex)(&event_mutex); 5786 5786 5787 - event_file = event_file_data(file); 5787 + event_file = event_file_file(file); 5788 5788 if (!event_file) 5789 5789 return EPOLLERR; 5790 5790 ··· 5822 5822 5823 5823 guard(mutex)(&event_mutex); 5824 5824 5825 - event_file = event_file_data(file); 5825 + event_file = event_file_file(file); 5826 5826 if (!event_file) { 5827 5827 ret = -ENODEV; 5828 5828 goto err;