Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'trace-v5.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:

- Out of range read of stack trace output

- Fix for NULL pointer dereference in trace_uprobe_create()

- Fix to a livepatching / ftrace permission race in the module code

- Fix for NULL pointer dereference in free_ftrace_func_mapper()

- A couple of build warning clean ups

* tag 'trace-v5.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
ftrace: Fix NULL pointer dereference in free_ftrace_func_mapper()
module: Fix livepatch/ftrace module text permissions race
tracing/uprobe: Fix obsolete comment on trace_uprobe_create()
tracing/uprobe: Fix NULL pointer dereference in trace_uprobe_create()
tracing: Make two symbols static
tracing: avoid build warning with HAVE_NOP_MCOUNT
tracing: Fix out-of-range read in trace_stack_print()

+35 -14
+6
kernel/livepatch/core.c
··· 18 18 #include <linux/elf.h> 19 19 #include <linux/moduleloader.h> 20 20 #include <linux/completion.h> 21 + #include <linux/memory.h> 21 22 #include <asm/cacheflush.h> 22 23 #include "core.h" 23 24 #include "patch.h" ··· 719 718 struct klp_func *func; 720 719 int ret; 721 720 721 + mutex_lock(&text_mutex); 722 + 722 723 module_disable_ro(patch->mod); 723 724 ret = klp_write_object_relocations(patch->mod, obj); 724 725 if (ret) { 725 726 module_enable_ro(patch->mod, true); 727 + mutex_unlock(&text_mutex); 726 728 return ret; 727 729 } 728 730 729 731 arch_klp_init_object_loaded(patch, obj); 730 732 module_enable_ro(patch->mod, true); 733 + 734 + mutex_unlock(&text_mutex); 731 735 732 736 klp_for_each_func(obj, func) { 733 737 ret = klp_find_object_symbol(obj->name, func->old_name,
+16 -6
kernel/trace/ftrace.c
··· 34 34 #include <linux/hash.h> 35 35 #include <linux/rcupdate.h> 36 36 #include <linux/kprobes.h> 37 + #include <linux/memory.h> 37 38 38 39 #include <trace/events/sched.h> 39 40 ··· 2611 2610 { 2612 2611 int ret; 2613 2612 2613 + mutex_lock(&text_mutex); 2614 + 2614 2615 ret = ftrace_arch_code_modify_prepare(); 2615 2616 FTRACE_WARN_ON(ret); 2616 2617 if (ret) 2617 - return; 2618 + goto out_unlock; 2618 2619 2619 2620 /* 2620 2621 * By default we use stop_machine() to modify the code. ··· 2628 2625 2629 2626 ret = ftrace_arch_code_modify_post_process(); 2630 2627 FTRACE_WARN_ON(ret); 2628 + 2629 + out_unlock: 2630 + mutex_unlock(&text_mutex); 2631 2631 } 2632 2632 2633 2633 static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, ··· 2941 2935 p = &pg->records[i]; 2942 2936 p->flags = rec_flags; 2943 2937 2944 - #ifndef CC_USING_NOP_MCOUNT 2945 2938 /* 2946 2939 * Do the initial record conversion from mcount jump 2947 2940 * to the NOP instructions. 2948 2941 */ 2949 - if (!ftrace_code_disable(mod, p)) 2942 + if (!__is_defined(CC_USING_NOP_MCOUNT) && 2943 + !ftrace_code_disable(mod, p)) 2950 2944 break; 2951 - #endif 2952 2945 2953 2946 update_cnt++; 2954 2947 } ··· 4226 4221 struct ftrace_func_entry *entry; 4227 4222 struct ftrace_func_map *map; 4228 4223 struct hlist_head *hhd; 4229 - int size = 1 << mapper->hash.size_bits; 4230 - int i; 4224 + int size, i; 4225 + 4226 + if (!mapper) 4227 + return; 4231 4228 4232 4229 if (free_func && mapper->hash.count) { 4230 + size = 1 << mapper->hash.size_bits; 4233 4231 for (i = 0; i < size; i++) { 4234 4232 hhd = &mapper->hash.buckets[i]; 4235 4233 hlist_for_each_entry(entry, hhd, hlist) { ··· 5784 5776 struct ftrace_page *pg; 5785 5777 5786 5778 mutex_lock(&ftrace_lock); 5779 + mutex_lock(&text_mutex); 5787 5780 5788 5781 if (ftrace_disabled) 5789 5782 goto out_unlock; ··· 5846 5837 ftrace_arch_code_modify_post_process(); 5847 5838 5848 5839 out_unlock: 5840 + mutex_unlock(&text_mutex); 5849 5841 mutex_unlock(&ftrace_lock); 5850 5842 5851 5843 process_cached_mods(mod->name);
+2 -2
kernel/trace/trace.c
··· 6923 6923 6924 6924 static DEFINE_MUTEX(tracing_err_log_lock); 6925 6925 6926 - struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) 6926 + static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr) 6927 6927 { 6928 6928 struct tracing_log_err *err; 6929 6929 ··· 8192 8192 .llseek = default_llseek, 8193 8193 }; 8194 8194 8195 - struct dentry *trace_instance_dir; 8195 + static struct dentry *trace_instance_dir; 8196 8196 8197 8197 static void 8198 8198 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
+1 -1
kernel/trace/trace_output.c
··· 1057 1057 1058 1058 trace_seq_puts(s, "<stack trace>\n"); 1059 1059 1060 - for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { 1060 + for (p = field->caller; p && p < end && *p != ULONG_MAX; p++) { 1061 1061 1062 1062 if (trace_seq_has_overflowed(s)) 1063 1063 break;
+10 -5
kernel/trace/trace_uprobe.c
··· 426 426 /* 427 427 * Argument syntax: 428 428 * - Add uprobe: p|r[:[GRP/]EVENT] PATH:OFFSET [FETCHARGS] 429 - * 430 - * - Remove uprobe: -:[GRP/]EVENT 431 429 */ 432 430 static int trace_uprobe_create(int argc, const char **argv) 433 431 { ··· 441 443 ret = 0; 442 444 ref_ctr_offset = 0; 443 445 444 - /* argc must be >= 1 */ 445 - if (argv[0][0] == 'r') 446 + switch (argv[0][0]) { 447 + case 'r': 446 448 is_return = true; 447 - else if (argv[0][0] != 'p' || argc < 2) 449 + break; 450 + case 'p': 451 + break; 452 + default: 453 + return -ECANCELED; 454 + } 455 + 456 + if (argc < 2) 448 457 return -ECANCELED; 449 458 450 459 if (argv[0][1] == ':')