Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'trace-v4.11-rc5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull ftrace fix from Steven Rostedt:
"While rewriting the function probe code, I stumbled over a long
standing bug. This bug has been there sinc function tracing was added
way back when. But my new development depends on this bug being fixed,
and it should be fixed regardless as it causes ftrace to disable
itself when triggered, and a reboot is required to enable it again.

The bug is that the function probe does not disable itself properly if
there's another probe of its type still enabled. For example:

# cd /sys/kernel/debug/tracing
# echo schedule:traceoff > set_ftrace_filter
# echo do_IRQ:traceoff > set_ftrace_filter
# echo \!do_IRQ:traceoff > /debug/tracing/set_ftrace_filter
# echo do_IRQ:traceoff > set_ftrace_filter

The above registers two traceoff probes (one for schedule and one for
do_IRQ, and then removes do_IRQ.

But since there still exists one for schedule, it is not done
properly. When adding do_IRQ back, the breakage in the accounting is
noticed by the ftrace self tests, and it causes a warning and disables
ftrace"

* tag 'trace-v4.11-rc5-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
ftrace: Fix removing of second function probe

+16 -4
+16 -4
kernel/trace/ftrace.c
··· 3755 3755 ftrace_probe_registered = 1; 3756 3756 } 3757 3757 3758 - static void __disable_ftrace_function_probe(void) 3758 + static bool __disable_ftrace_function_probe(void) 3759 3759 { 3760 3760 int i; 3761 3761 3762 3762 if (!ftrace_probe_registered) 3763 - return; 3763 + return false; 3764 3764 3765 3765 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 3766 3766 struct hlist_head *hhd = &ftrace_func_hash[i]; 3767 3767 if (hhd->first) 3768 - return; 3768 + return false; 3769 3769 } 3770 3770 3771 3771 /* no more funcs left */ 3772 3772 ftrace_shutdown(&trace_probe_ops, 0); 3773 3773 3774 3774 ftrace_probe_registered = 0; 3775 + return true; 3775 3776 } 3776 3777 3777 3778 ··· 3902 3901 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, 3903 3902 void *data, int flags) 3904 3903 { 3904 + struct ftrace_ops_hash old_hash_ops; 3905 3905 struct ftrace_func_entry *rec_entry; 3906 3906 struct ftrace_func_probe *entry; 3907 3907 struct ftrace_func_probe *p; ··· 3914 3912 struct hlist_node *tmp; 3915 3913 char str[KSYM_SYMBOL_LEN]; 3916 3914 int i, ret; 3915 + bool disabled; 3917 3916 3918 3917 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) 3919 3918 func_g.search = NULL; ··· 3932 3929 } 3933 3930 3934 3931 mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3932 + 3933 + old_hash_ops.filter_hash = old_hash; 3934 + /* Probes only have filters */ 3935 + old_hash_ops.notrace_hash = NULL; 3935 3936 3936 3937 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3937 3938 if (!hash) ··· 3974 3967 } 3975 3968 } 3976 3969 mutex_lock(&ftrace_lock); 3977 - __disable_ftrace_function_probe(); 3970 + disabled = __disable_ftrace_function_probe(); 3978 3971 /* 3979 3972 * Remove after the disable is called. Otherwise, if the last 3980 3973 * probe is removed, a null hash means *all enabled*. 3981 3974 */ 3982 3975 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3976 + 3977 + /* still need to update the function call sites */ 3978 + if (ftrace_enabled && !disabled) 3979 + ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, 3980 + &old_hash_ops); 3983 3981 synchronize_sched(); 3984 3982 if (!ret) 3985 3983 free_ftrace_hash_rcu(old_hash);