Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'trace-fixes-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing/kprobes update from Steven Rostedt:
"The majority of these changes are from Masami Hiramatsu bringing
kprobes up to par with the latest changes to ftrace (multi buffering
and the new function probes).

He also discovered and fixed some bugs in doing so. When pulling in
his patches, I also found a few minor bugs as well and fixed them.

This also includes a compile fix for some archs that select the ring
buffer but not tracing.

I based this off of the last patch you took from me that fixed the
merge conflict error, as that was the commit that had all the changes
I needed for this set of changes."

* tag 'trace-fixes-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
tracing/kprobes: Support soft-mode disabling
tracing/kprobes: Support ftrace_event_file base multibuffer
tracing/kprobes: Pass trace_probe directly from dispatcher
tracing/kprobes: Increment probe hit-count even if it is used by perf
tracing/kprobes: Use bool for retprobe checker
ftrace: Fix function probe when more than one probe is added
ftrace: Fix the output of enabled_functions debug file
ftrace: Fix locking in register_ftrace_function_probe()
tracing: Add helper function trace_create_new_event() to remove duplicate code
tracing: Modify soft-mode only if there's no other referrer
tracing: Indicate enabled soft-mode in enable file
tracing/kprobes: Fix to increment return event probe hit-count
ftrace: Cleanup regex_lock and ftrace_lock around hash updating
ftrace, kprobes: Fix a deadlock on ftrace_regex_lock
ftrace: Have ftrace_regex_write() return either read or error
tracing: Return error if register_ftrace_function_probe() fails for event_enable_func()
tracing: Don't succeed if event_enable_func did not register anything
ring-buffer: Select IRQ_WORK

+369 -109
+4
include/linux/ftrace.h
··· 90 90 * not set this, then the ftrace infrastructure will add recursion 91 91 * protection for the caller. 92 92 * STUB - The ftrace_ops is just a place holder. 93 + * INITIALIZED - The ftrace_ops has already been initialized (first use time 94 + * register_ftrace_function() is called, it will initialized the ops) 93 95 */ 94 96 enum { 95 97 FTRACE_OPS_FL_ENABLED = 1 << 0, ··· 102 100 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, 103 101 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, 104 102 FTRACE_OPS_FL_STUB = 1 << 7, 103 + FTRACE_OPS_FL_INITIALIZED = 1 << 8, 105 104 }; 106 105 107 106 struct ftrace_ops { ··· 113 110 #ifdef CONFIG_DYNAMIC_FTRACE 114 111 struct ftrace_hash *notrace_hash; 115 112 struct ftrace_hash *filter_hash; 113 + struct mutex regex_lock; 116 114 #endif 117 115 }; 118 116
+1
include/linux/ftrace_event.h
··· 293 293 * caching and such. Which is mostly OK ;-) 294 294 */ 295 295 unsigned long flags; 296 + atomic_t sm_ref; /* soft-mode reference counter */ 296 297 }; 297 298 298 299 #define __TRACE_EVENT_FLAGS(name, value) \
+1 -1
kernel/trace/Kconfig
··· 71 71 config RING_BUFFER 72 72 bool 73 73 select TRACE_CLOCK 74 + select IRQ_WORK 74 75 75 76 config FTRACE_NMI_ENTER 76 77 bool ··· 108 107 select BINARY_PRINTF 109 108 select EVENT_TRACING 110 109 select TRACE_CLOCK 111 - select IRQ_WORK 112 110 113 111 config GENERIC_TRACER 114 112 bool
+84 -42
kernel/trace/ftrace.c
··· 64 64 65 65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) 66 66 67 + #ifdef CONFIG_DYNAMIC_FTRACE 68 + #define INIT_REGEX_LOCK(opsname) \ 69 + .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), 70 + #else 71 + #define INIT_REGEX_LOCK(opsname) 72 + #endif 73 + 67 74 static struct ftrace_ops ftrace_list_end __read_mostly = { 68 75 .func = ftrace_stub, 69 76 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, ··· 137 130 #define while_for_each_ftrace_op(op) \ 138 131 while (likely(op = rcu_dereference_raw((op)->next)) && \ 139 132 unlikely((op) != &ftrace_list_end)) 133 + 134 + static inline void ftrace_ops_init(struct ftrace_ops *ops) 135 + { 136 + #ifdef CONFIG_DYNAMIC_FTRACE 137 + if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 138 + mutex_init(&ops->regex_lock); 139 + ops->flags |= FTRACE_OPS_FL_INITIALIZED; 140 + } 141 + #endif 142 + } 140 143 141 144 /** 142 145 * ftrace_nr_registered_ops - return number of ops registered ··· 924 907 #else 925 908 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 926 909 .func = function_profile_call, 927 - .flags = FTRACE_OPS_FL_RECURSION_SAFE, 910 + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 911 + INIT_REGEX_LOCK(ftrace_profile_ops) 928 912 }; 929 913 930 914 static int register_ftrace_profiler(void) ··· 1121 1103 .func = ftrace_stub, 1122 1104 .notrace_hash = EMPTY_HASH, 1123 1105 .filter_hash = EMPTY_HASH, 1124 - .flags = FTRACE_OPS_FL_RECURSION_SAFE, 1106 + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 1107 + INIT_REGEX_LOCK(global_ops) 1125 1108 }; 1126 - 1127 - static DEFINE_MUTEX(ftrace_regex_lock); 1128 1109 1129 1110 struct ftrace_page { 1130 1111 struct ftrace_page *next; ··· 1264 1247 1265 1248 void ftrace_free_filter(struct ftrace_ops *ops) 1266 1249 { 1250 + ftrace_ops_init(ops); 1267 1251 free_ftrace_hash(ops->filter_hash); 1268 1252 free_ftrace_hash(ops->notrace_hash); 1269 1253 } ··· 2459 2441 !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || 2460 2442 2461 2443 ((iter->flags & FTRACE_ITER_ENABLED) && 2462 - !(rec->flags & ~FTRACE_FL_MASK))) { 2444 + !(rec->flags & FTRACE_FL_ENABLED))) { 2463 2445 2464 2446 rec = NULL; 2465 2447 goto retry; ··· 2642 2624 struct ftrace_hash *hash; 2643 2625 int ret = 0; 2644 2626 2627 + ftrace_ops_init(ops); 2628 + 2645 2629 if (unlikely(ftrace_disabled)) 2646 2630 return -ENODEV; 2647 2631 ··· 2656 2636 return -ENOMEM; 2657 2637 } 2658 2638 2639 + iter->ops = ops; 2640 + iter->flags = flag; 2641 + 2642 + mutex_lock(&ops->regex_lock); 2643 + 2659 2644 if (flag & FTRACE_ITER_NOTRACE) 2660 2645 hash = ops->notrace_hash; 2661 2646 else 2662 2647 hash = ops->filter_hash; 2663 2648 2664 - iter->ops = ops; 2665 - iter->flags = flag; 2666 - 2667 2649 if (file->f_mode & FMODE_WRITE) { 2668 - mutex_lock(&ftrace_lock); 2669 2650 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash); 2670 - mutex_unlock(&ftrace_lock); 2671 - 2672 2651 if (!iter->hash) { 2673 2652 trace_parser_put(&iter->parser); 2674 2653 kfree(iter); 2675 - return -ENOMEM; 2654 + ret = -ENOMEM; 2655 + goto out_unlock; 2676 2656 } 2677 2657 } 2678 - 2679 - mutex_lock(&ftrace_regex_lock); 2680 2658 2681 2659 if ((file->f_mode & FMODE_WRITE) && 2682 2660 (file->f_flags & O_TRUNC)) ··· 2695 2677 } 2696 2678 } else 2697 2679 file->private_data = iter; 2698 - mutex_unlock(&ftrace_regex_lock); 2680 + 2681 + out_unlock: 2682 + mutex_unlock(&ops->regex_lock); 2699 2683 2700 2684 return ret; 2701 2685 } ··· 2930 2910 static struct ftrace_ops trace_probe_ops __read_mostly = 2931 2911 { 2932 2912 .func = function_trace_probe_call, 2913 + .flags = FTRACE_OPS_FL_INITIALIZED, 2914 + INIT_REGEX_LOCK(trace_probe_ops) 2933 2915 }; 2934 2916 2935 2917 static int ftrace_probe_registered; ··· 2941 2919 int ret; 2942 2920 int i; 2943 2921 2944 - if (ftrace_probe_registered) 2922 + if (ftrace_probe_registered) { 2923 + /* still need to update the function call sites */ 2924 + if (ftrace_enabled) 2925 + ftrace_run_update_code(FTRACE_UPDATE_CALLS); 2945 2926 return; 2927 + } 2946 2928 2947 2929 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { 2948 2930 struct hlist_head *hhd = &ftrace_func_hash[i]; ··· 3016 2990 if (WARN_ON(not)) 3017 2991 return -EINVAL; 3018 2992 3019 - mutex_lock(&ftrace_lock); 2993 + mutex_lock(&trace_probe_ops.regex_lock); 3020 2994 3021 2995 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3022 2996 if (!hash) { 3023 2997 count = -ENOMEM; 3024 - goto out_unlock; 2998 + goto out; 3025 2999 } 3026 3000 3027 3001 if (unlikely(ftrace_disabled)) { 3028 3002 count = -ENODEV; 3029 - goto out_unlock; 3003 + goto out; 3030 3004 } 3005 + 3006 + mutex_lock(&ftrace_lock); 3031 3007 3032 3008 do_for_each_ftrace_rec(pg, rec) { 3033 3009 ··· 3084 3056 3085 3057 out_unlock: 3086 3058 mutex_unlock(&ftrace_lock); 3059 + out: 3060 + mutex_unlock(&trace_probe_ops.regex_lock); 3087 3061 free_ftrace_hash(hash); 3088 3062 3089 3063 return count; ··· 3125 3095 return; 3126 3096 } 3127 3097 3128 - mutex_lock(&ftrace_lock); 3098 + mutex_lock(&trace_probe_ops.regex_lock); 3129 3099 3130 3100 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3131 3101 if (!hash) ··· 3163 3133 list_add(&entry->free_list, &free_list); 3164 3134 } 3165 3135 } 3136 + mutex_lock(&ftrace_lock); 3166 3137 __disable_ftrace_function_probe(); 3167 3138 /* 3168 3139 * Remove after the disable is called. Otherwise, if the last ··· 3175 3144 list_del(&entry->free_list); 3176 3145 ftrace_free_entry(entry); 3177 3146 } 3147 + mutex_unlock(&ftrace_lock); 3178 3148 3179 3149 out_unlock: 3180 - mutex_unlock(&ftrace_lock); 3150 + mutex_unlock(&trace_probe_ops.regex_lock); 3181 3151 free_ftrace_hash(hash); 3182 3152 } 3183 3153 ··· 3288 3256 if (!cnt) 3289 3257 return 0; 3290 3258 3291 - mutex_lock(&ftrace_regex_lock); 3292 - 3293 - ret = -ENODEV; 3294 - if (unlikely(ftrace_disabled)) 3295 - goto out_unlock; 3296 - 3297 3259 if (file->f_mode & FMODE_READ) { 3298 3260 struct seq_file *m = file->private_data; 3299 3261 iter = m->private; 3300 3262 } else 3301 3263 iter = file->private_data; 3264 + 3265 + if (unlikely(ftrace_disabled)) 3266 + return -ENODEV; 3267 + 3268 + /* iter->hash is a local copy, so we don't need regex_lock */ 3302 3269 3303 3270 parser = &iter->parser; 3304 3271 read = trace_get_user(parser, ubuf, cnt, ppos); ··· 3307 3276 ret = ftrace_process_regex(iter->hash, parser->buffer, 3308 3277 parser->idx, enable); 3309 3278 trace_parser_clear(parser); 3310 - if (ret) 3311 - goto out_unlock; 3279 + if (ret < 0) 3280 + goto out; 3312 3281 } 3313 3282 3314 3283 ret = read; 3315 - out_unlock: 3316 - mutex_unlock(&ftrace_regex_lock); 3317 - 3284 + out: 3318 3285 return ret; 3319 3286 } 3320 3287 ··· 3364 3335 if (unlikely(ftrace_disabled)) 3365 3336 return -ENODEV; 3366 3337 3338 + mutex_lock(&ops->regex_lock); 3339 + 3367 3340 if (enable) 3368 3341 orig_hash = &ops->filter_hash; 3369 3342 else 3370 3343 orig_hash = &ops->notrace_hash; 3371 3344 3372 3345 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3373 - if (!hash) 3374 - return -ENOMEM; 3346 + if (!hash) { 3347 + ret = -ENOMEM; 3348 + goto out_regex_unlock; 3349 + } 3375 3350 3376 - mutex_lock(&ftrace_regex_lock); 3377 3351 if (reset) 3378 3352 ftrace_filter_reset(hash); 3379 3353 if (buf && !ftrace_match_records(hash, buf, len)) { ··· 3398 3366 mutex_unlock(&ftrace_lock); 3399 3367 3400 3368 out_regex_unlock: 3401 - mutex_unlock(&ftrace_regex_lock); 3369 + mutex_unlock(&ops->regex_lock); 3402 3370 3403 3371 free_ftrace_hash(hash); 3404 3372 return ret; ··· 3424 3392 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, 3425 3393 int remove, int reset) 3426 3394 { 3395 + ftrace_ops_init(ops); 3427 3396 return ftrace_set_addr(ops, ip, remove, reset, 1); 3428 3397 } 3429 3398 EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); ··· 3449 3416 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, 3450 3417 int len, int reset) 3451 3418 { 3419 + ftrace_ops_init(ops); 3452 3420 return ftrace_set_regex(ops, buf, len, reset, 1); 3453 3421 } 3454 3422 EXPORT_SYMBOL_GPL(ftrace_set_filter); ··· 3468 3434 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, 3469 3435 int len, int reset) 3470 3436 { 3437 + ftrace_ops_init(ops); 3471 3438 return ftrace_set_regex(ops, buf, len, reset, 0); 3472 3439 } 3473 3440 EXPORT_SYMBOL_GPL(ftrace_set_notrace); ··· 3559 3524 { 3560 3525 char *func; 3561 3526 3527 + ftrace_ops_init(ops); 3528 + 3562 3529 while (buf) { 3563 3530 func = strsep(&buf, ","); 3564 3531 ftrace_set_regex(ops, func, strlen(func), 0, enable); ··· 3588 3551 int filter_hash; 3589 3552 int ret; 3590 3553 3591 - mutex_lock(&ftrace_regex_lock); 3592 3554 if (file->f_mode & FMODE_READ) { 3593 3555 iter = m->private; 3594 - 3595 3556 seq_release(inode, file); 3596 3557 } else 3597 3558 iter = file->private_data; ··· 3601 3566 } 3602 3567 3603 3568 trace_parser_put(parser); 3569 + 3570 + mutex_lock(&iter->ops->regex_lock); 3604 3571 3605 3572 if (file->f_mode & FMODE_WRITE) { 3606 3573 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); ··· 3621 3584 3622 3585 mutex_unlock(&ftrace_lock); 3623 3586 } 3587 + 3588 + mutex_unlock(&iter->ops->regex_lock); 3624 3589 free_ftrace_hash(iter->hash); 3625 3590 kfree(iter); 3626 3591 3627 - mutex_unlock(&ftrace_regex_lock); 3628 3592 return 0; 3629 3593 } 3630 3594 ··· 4164 4126 4165 4127 static struct ftrace_ops global_ops = { 4166 4128 .func = ftrace_stub, 4167 - .flags = FTRACE_OPS_FL_RECURSION_SAFE, 4129 + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4130 + INIT_REGEX_LOCK(global_ops) 4168 4131 }; 4169 4132 4170 4133 static int __init ftrace_nodyn_init(void) ··· 4219 4180 } 4220 4181 4221 4182 static struct ftrace_ops control_ops = { 4222 - .func = ftrace_ops_control_func, 4223 - .flags = FTRACE_OPS_FL_RECURSION_SAFE, 4183 + .func = ftrace_ops_control_func, 4184 + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4185 + INIT_REGEX_LOCK(control_ops) 4224 4186 }; 4225 4187 4226 4188 static inline void ··· 4578 4538 int register_ftrace_function(struct ftrace_ops *ops) 4579 4539 { 4580 4540 int ret = -1; 4541 + 4542 + ftrace_ops_init(ops); 4581 4543 4582 4544 mutex_lock(&ftrace_lock); 4583 4545
+41 -13
kernel/trace/trace_events.c
··· 251 251 switch (enable) { 252 252 case 0: 253 253 /* 254 - * When soft_disable is set and enable is cleared, we want 254 + * When soft_disable is set and enable is cleared, the sm_ref 255 + * reference counter is decremented. If it reaches 0, we want 255 256 * to clear the SOFT_DISABLED flag but leave the event in the 256 257 * state that it was. That is, if the event was enabled and 257 258 * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED ··· 264 263 * "soft enable"s (clearing the SOFT_DISABLED bit) wont work. 265 264 */ 266 265 if (soft_disable) { 266 + if (atomic_dec_return(&file->sm_ref) > 0) 267 + break; 267 268 disable = file->flags & FTRACE_EVENT_FL_SOFT_DISABLED; 268 269 clear_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 269 270 } else ··· 294 291 */ 295 292 if (!soft_disable) 296 293 clear_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &file->flags); 297 - else 294 + else { 295 + if (atomic_inc_return(&file->sm_ref) > 1) 296 + break; 298 297 set_bit(FTRACE_EVENT_FL_SOFT_MODE_BIT, &file->flags); 298 + } 299 299 300 300 if (!(file->flags & FTRACE_EVENT_FL_ENABLED)) { 301 301 ··· 629 623 if (file->flags & FTRACE_EVENT_FL_ENABLED) { 630 624 if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED) 631 625 buf = "0*\n"; 626 + else if (file->flags & FTRACE_EVENT_FL_SOFT_MODE) 627 + buf = "1*\n"; 632 628 else 633 629 buf = "1\n"; 634 630 } else ··· 1529 1521 return 0; 1530 1522 } 1531 1523 1524 + static struct ftrace_event_file * 1525 + trace_create_new_event(struct ftrace_event_call *call, 1526 + struct trace_array *tr) 1527 + { 1528 + struct ftrace_event_file *file; 1529 + 1530 + file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1531 + if (!file) 1532 + return NULL; 1533 + 1534 + file->event_call = call; 1535 + file->tr = tr; 1536 + atomic_set(&file->sm_ref, 0); 1537 + list_add(&file->list, &tr->events); 1538 + 1539 + return file; 1540 + } 1541 + 1532 1542 /* Add an event to a trace directory */ 1533 1543 static int 1534 1544 __trace_add_new_event(struct ftrace_event_call *call, ··· 1558 1532 { 1559 1533 struct ftrace_event_file *file; 1560 1534 1561 - file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1535 + file = trace_create_new_event(call, tr); 1562 1536 if (!file) 1563 1537 return -ENOMEM; 1564 - 1565 - file->event_call = call; 1566 - file->tr = tr; 1567 - list_add(&file->list, &tr->events); 1568 1538 1569 1539 return event_create_dir(tr->event_dir, file, id, enable, filter, format); 1570 1540 } ··· 1576 1554 { 1577 1555 struct ftrace_event_file *file; 1578 1556 1579 - file = kmem_cache_alloc(file_cachep, GFP_TRACE); 1557 + file = trace_create_new_event(call, tr); 1580 1558 if (!file) 1581 1559 return -ENOMEM; 1582 - 1583 - file->event_call = call; 1584 - file->tr = tr; 1585 - list_add(&file->list, &tr->events); 1586 1560 1587 1561 return 0; 1588 1562 } ··· 2079 2061 if (ret < 0) 2080 2062 goto out_put; 2081 2063 ret = register_ftrace_function_probe(glob, ops, data); 2082 - if (!ret) 2064 + /* 2065 + * The above returns on success the # of functions enabled, 2066 + * but if it didn't find any functions it returns zero. 2067 + * Consider no functions a failure too. 2068 + */ 2069 + if (!ret) { 2070 + ret = -ENOENT; 2083 2071 goto out_disable; 2072 + } else if (ret < 0) 2073 + goto out_disable; 2074 + /* Just return zero, not the number of enabled functions */ 2075 + ret = 0; 2084 2076 out: 2085 2077 mutex_unlock(&event_mutex); 2086 2078 return ret;
+238 -53
kernel/trace/trace_kprobe.c
··· 27 27 /** 28 28 * Kprobe event core functions 29 29 */ 30 - 31 30 struct trace_probe { 32 31 struct list_head list; 33 32 struct kretprobe rp; /* Use rp.kp for kprobe use */ ··· 35 36 const char *symbol; /* symbol name */ 36 37 struct ftrace_event_class class; 37 38 struct ftrace_event_call call; 39 + struct ftrace_event_file **files; 38 40 ssize_t size; /* trace entry size */ 39 41 unsigned int nr_args; 40 42 struct probe_arg args[]; ··· 46 46 (sizeof(struct probe_arg) * (n))) 47 47 48 48 49 - static __kprobes int trace_probe_is_return(struct trace_probe *tp) 49 + static __kprobes bool trace_probe_is_return(struct trace_probe *tp) 50 50 { 51 51 return tp->rp.handler != NULL; 52 52 } ··· 183 183 return NULL; 184 184 } 185 185 186 - /* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ 187 - static int enable_trace_probe(struct trace_probe *tp, int flag) 186 + static int trace_probe_nr_files(struct trace_probe *tp) 187 + { 188 + struct ftrace_event_file **file = tp->files; 189 + int ret = 0; 190 + 191 + if (file) 192 + while (*(file++)) 193 + ret++; 194 + 195 + return ret; 196 + } 197 + 198 + static DEFINE_MUTEX(probe_enable_lock); 199 + 200 + /* 201 + * Enable trace_probe 202 + * if the file is NULL, enable "perf" handler, or enable "trace" handler. 203 + */ 204 + static int 205 + enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) 188 206 { 189 207 int ret = 0; 190 208 191 - tp->flags |= flag; 209 + mutex_lock(&probe_enable_lock); 210 + 211 + if (file) { 212 + struct ftrace_event_file **new, **old = tp->files; 213 + int n = trace_probe_nr_files(tp); 214 + 215 + /* 1 is for new one and 1 is for stopper */ 216 + new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), 217 + GFP_KERNEL); 218 + if (!new) { 219 + ret = -ENOMEM; 220 + goto out_unlock; 221 + } 222 + memcpy(new, old, n * sizeof(struct ftrace_event_file *)); 223 + new[n] = file; 224 + /* The last one keeps a NULL */ 225 + 226 + rcu_assign_pointer(tp->files, new); 227 + tp->flags |= TP_FLAG_TRACE; 228 + 229 + if (old) { 230 + /* Make sure the probe is done with old files */ 231 + synchronize_sched(); 232 + kfree(old); 233 + } 234 + } else 235 + tp->flags |= TP_FLAG_PROFILE; 236 + 192 237 if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && 193 238 !trace_probe_has_gone(tp)) { 194 239 if (trace_probe_is_return(tp)) ··· 242 197 ret = enable_kprobe(&tp->rp.kp); 243 198 } 244 199 200 + out_unlock: 201 + mutex_unlock(&probe_enable_lock); 202 + 245 203 return ret; 246 204 } 247 205 248 - /* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ 249 - static void disable_trace_probe(struct trace_probe *tp, int flag) 206 + static int 207 + trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) 250 208 { 251 - tp->flags &= ~flag; 209 + int i; 210 + 211 + if (tp->files) { 212 + for (i = 0; tp->files[i]; i++) 213 + if (tp->files[i] == file) 214 + return i; 215 + } 216 + 217 + return -1; 218 + } 219 + 220 + /* 221 + * Disable trace_probe 222 + * if the file is NULL, disable "perf" handler, or disable "trace" handler. 223 + */ 224 + static int 225 + disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) 226 + { 227 + int ret = 0; 228 + 229 + mutex_lock(&probe_enable_lock); 230 + 231 + if (file) { 232 + struct ftrace_event_file **new, **old = tp->files; 233 + int n = trace_probe_nr_files(tp); 234 + int i, j; 235 + 236 + if (n == 0 || trace_probe_file_index(tp, file) < 0) { 237 + ret = -EINVAL; 238 + goto out_unlock; 239 + } 240 + 241 + if (n == 1) { /* Remove the last file */ 242 + tp->flags &= ~TP_FLAG_TRACE; 243 + new = NULL; 244 + } else { 245 + new = kzalloc(n * sizeof(struct ftrace_event_file *), 246 + GFP_KERNEL); 247 + if (!new) { 248 + ret = -ENOMEM; 249 + goto out_unlock; 250 + } 251 + 252 + /* This copy & check loop copies the NULL stopper too */ 253 + for (i = 0, j = 0; j < n && i < n + 1; i++) 254 + if (old[i] != file) 255 + new[j++] = old[i]; 256 + } 257 + 258 + rcu_assign_pointer(tp->files, new); 259 + 260 + /* Make sure the probe is done with old files */ 261 + synchronize_sched(); 262 + kfree(old); 263 + } else 264 + tp->flags &= ~TP_FLAG_PROFILE; 265 + 252 266 if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { 253 267 if (trace_probe_is_return(tp)) 254 268 disable_kretprobe(&tp->rp); 255 269 else 256 270 disable_kprobe(&tp->rp.kp); 257 271 } 272 + 273 + out_unlock: 274 + mutex_unlock(&probe_enable_lock); 275 + 276 + return ret; 258 277 } 259 278 260 279 /* Internal register function - just handle k*probes and flags */ ··· 832 723 } 833 724 834 725 /* Kprobe handler */ 835 - static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) 726 + static __kprobes void 727 + __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, 728 + struct ftrace_event_file *ftrace_file) 836 729 { 837 - struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 838 730 struct kprobe_trace_entry_head *entry; 839 731 struct ring_buffer_event *event; 840 732 struct ring_buffer *buffer; ··· 843 733 unsigned long irq_flags; 844 734 struct ftrace_event_call *call = &tp->call; 845 735 846 - tp->nhit++; 736 + WARN_ON(call != ftrace_file->event_call); 737 + 738 + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) 739 + return; 847 740 848 741 local_save_flags(irq_flags); 849 742 pc = preempt_count(); ··· 854 741 dsize = __get_data_size(tp, regs); 855 742 size = sizeof(*entry) + tp->size + dsize; 856 743 857 - event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 858 - size, irq_flags, pc); 744 + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 745 + call->event.type, 746 + size, irq_flags, pc); 859 747 if (!event) 860 748 return; 861 749 862 750 entry = ring_buffer_event_data(event); 863 - entry->ip = (unsigned long)kp->addr; 751 + entry->ip = (unsigned long)tp->rp.kp.addr; 864 752 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 865 753 866 754 if (!filter_current_check_discard(buffer, call, entry, event)) ··· 869 755 irq_flags, pc, regs); 870 756 } 871 757 872 - /* Kretprobe handler */ 873 - static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, 874 - struct pt_regs *regs) 758 + static __kprobes void 759 + kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) 875 760 { 876 - struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 761 + struct ftrace_event_file **file = tp->files; 762 + 763 + /* Note: preempt is already disabled around the kprobe handler */ 764 + while (*file) { 765 + __kprobe_trace_func(tp, regs, *file); 766 + file++; 767 + } 768 + } 769 + 770 + /* Kretprobe handler */ 771 + static __kprobes void 772 + __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, 773 + struct pt_regs *regs, 774 + struct ftrace_event_file *ftrace_file) 775 + { 877 776 struct kretprobe_trace_entry_head *entry; 878 777 struct ring_buffer_event *event; 879 778 struct ring_buffer *buffer; ··· 894 767 unsigned long irq_flags; 895 768 struct ftrace_event_call *call = &tp->call; 896 769 770 + WARN_ON(call != ftrace_file->event_call); 771 + 772 + if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) 773 + return; 774 + 897 775 local_save_flags(irq_flags); 898 776 pc = preempt_count(); 899 777 900 778 dsize = __get_data_size(tp, regs); 901 779 size = sizeof(*entry) + tp->size + dsize; 902 780 903 - event = trace_current_buffer_lock_reserve(&buffer, call->event.type, 904 - size, irq_flags, pc); 781 + event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, 782 + call->event.type, 783 + size, irq_flags, pc); 905 784 if (!event) 906 785 return; 907 786 ··· 919 786 if (!filter_current_check_discard(buffer, call, entry, event)) 920 787 trace_buffer_unlock_commit_regs(buffer, event, 921 788 irq_flags, pc, regs); 789 + } 790 + 791 + static __kprobes void 792 + kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, 793 + struct pt_regs *regs) 794 + { 795 + struct ftrace_event_file **file = tp->files; 796 + 797 + /* Note: preempt is already disabled around the kprobe handler */ 798 + while (*file) { 799 + __kretprobe_trace_func(tp, ri, regs, *file); 800 + file++; 801 + } 922 802 } 923 803 924 804 /* Event entry printers */ ··· 1121 975 #ifdef CONFIG_PERF_EVENTS 1122 976 1123 977 /* Kprobe profile handler */ 1124 - static __kprobes void kprobe_perf_func(struct kprobe *kp, 1125 - struct pt_regs *regs) 978 + static __kprobes void 979 + kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) 1126 980 { 1127 - struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1128 981 struct ftrace_event_call *call = &tp->call; 1129 982 struct kprobe_trace_entry_head *entry; 1130 983 struct hlist_head *head; ··· 1142 997 if (!entry) 1143 998 return; 1144 999 1145 - entry->ip = (unsigned long)kp->addr; 1000 + entry->ip = (unsigned long)tp->rp.kp.addr; 1146 1001 memset(&entry[1], 0, dsize); 1147 1002 store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); 1148 1003 ··· 1152 1007 } 1153 1008 1154 1009 /* Kretprobe profile handler */ 1155 - static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, 1156 - struct pt_regs *regs) 1010 + static __kprobes void 1011 + kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, 1012 + struct pt_regs *regs) 1157 1013 { 1158 - struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1159 1014 struct ftrace_event_call *call = &tp->call; 1160 1015 struct kretprobe_trace_entry_head *entry; 1161 1016 struct hlist_head *head; ··· 1189 1044 enum trace_reg type, void *data) 1190 1045 { 1191 1046 struct trace_probe *tp = (struct trace_probe *)event->data; 1047 + struct ftrace_event_file *file = data; 1192 1048 1193 1049 switch (type) { 1194 1050 case TRACE_REG_REGISTER: 1195 - return enable_trace_probe(tp, TP_FLAG_TRACE); 1051 + return enable_trace_probe(tp, file); 1196 1052 case TRACE_REG_UNREGISTER: 1197 - disable_trace_probe(tp, TP_FLAG_TRACE); 1198 - return 0; 1053 + return disable_trace_probe(tp, file); 1199 1054 1200 1055 #ifdef CONFIG_PERF_EVENTS 1201 1056 case TRACE_REG_PERF_REGISTER: 1202 - return enable_trace_probe(tp, TP_FLAG_PROFILE); 1057 + return enable_trace_probe(tp, NULL); 1203 1058 case TRACE_REG_PERF_UNREGISTER: 1204 - disable_trace_probe(tp, TP_FLAG_PROFILE); 1205 - return 0; 1059 + return disable_trace_probe(tp, NULL); 1206 1060 case TRACE_REG_PERF_OPEN: 1207 1061 case TRACE_REG_PERF_CLOSE: 1208 1062 case TRACE_REG_PERF_ADD: ··· 1217 1073 { 1218 1074 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1219 1075 1076 + tp->nhit++; 1077 + 1220 1078 if (tp->flags & TP_FLAG_TRACE) 1221 - kprobe_trace_func(kp, regs); 1079 + kprobe_trace_func(tp, regs); 1222 1080 #ifdef CONFIG_PERF_EVENTS 1223 1081 if (tp->flags & TP_FLAG_PROFILE) 1224 - kprobe_perf_func(kp, regs); 1082 + kprobe_perf_func(tp, regs); 1225 1083 #endif 1226 1084 return 0; /* We don't tweek kernel, so just return 0 */ 1227 1085 } ··· 1233 1087 { 1234 1088 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1235 1089 1090 + tp->nhit++; 1091 + 1236 1092 if (tp->flags & TP_FLAG_TRACE) 1237 - kretprobe_trace_func(ri, regs); 1093 + kretprobe_trace_func(tp, ri, regs); 1238 1094 #ifdef CONFIG_PERF_EVENTS 1239 1095 if (tp->flags & TP_FLAG_PROFILE) 1240 - kretprobe_perf_func(ri, regs); 1096 + kretprobe_perf_func(tp, ri, regs); 1241 1097 #endif 1242 1098 return 0; /* We don't tweek kernel, so just return 0 */ 1243 1099 } ··· 1337 1189 return a1 + a2 + a3 + a4 + a5 + a6; 1338 1190 } 1339 1191 1192 + static struct ftrace_event_file * 1193 + find_trace_probe_file(struct trace_probe *tp, struct trace_array *tr) 1194 + { 1195 + struct ftrace_event_file *file; 1196 + 1197 + list_for_each_entry(file, &tr->events, list) 1198 + if (file->event_call == &tp->call) 1199 + return file; 1200 + 1201 + return NULL; 1202 + } 1203 + 1340 1204 static __init int kprobe_trace_self_tests_init(void) 1341 1205 { 1342 1206 int ret, warn = 0; 1343 1207 int (*target)(int, int, int, int, int, int); 1344 1208 struct trace_probe *tp; 1209 + struct ftrace_event_file *file; 1345 1210 1346 1211 target = kprobe_trace_selftest_target; 1347 1212 ··· 1364 1203 "$stack $stack0 +0($stack)", 1365 1204 create_trace_probe); 1366 1205 if (WARN_ON_ONCE(ret)) { 1367 - pr_warning("error on probing function entry.\n"); 1206 + pr_warn("error on probing function entry.\n"); 1368 1207 warn++; 1369 1208 } else { 1370 1209 /* Enable trace point */ 1371 1210 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); 1372 1211 if (WARN_ON_ONCE(tp == NULL)) { 1373 - pr_warning("error on getting new probe.\n"); 1212 + pr_warn("error on getting new probe.\n"); 1374 1213 warn++; 1375 - } else 1376 - enable_trace_probe(tp, TP_FLAG_TRACE); 1214 + } else { 1215 + file = find_trace_probe_file(tp, top_trace_array()); 1216 + if (WARN_ON_ONCE(file == NULL)) { 1217 + pr_warn("error on getting probe file.\n"); 1218 + warn++; 1219 + } else 1220 + enable_trace_probe(tp, file); 1221 + } 1377 1222 } 1378 1223 1379 1224 ret = traceprobe_command("r:testprobe2 kprobe_trace_selftest_target " 1380 1225 "$retval", create_trace_probe); 1381 1226 if (WARN_ON_ONCE(ret)) { 1382 - pr_warning("error on probing function return.\n"); 1227 + pr_warn("error on probing function return.\n"); 1383 1228 warn++; 1384 1229 } else { 1385 1230 /* Enable trace point */ 1386 1231 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); 1387 1232 if (WARN_ON_ONCE(tp == NULL)) { 1388 - pr_warning("error on getting new probe.\n"); 1233 + pr_warn("error on getting 2nd new probe.\n"); 1389 1234 warn++; 1390 - } else 1391 - enable_trace_probe(tp, TP_FLAG_TRACE); 1235 + } else { 1236 + file = find_trace_probe_file(tp, top_trace_array()); 1237 + if (WARN_ON_ONCE(file == NULL)) { 1238 + pr_warn("error on getting probe file.\n"); 1239 + warn++; 1240 + } else 1241 + enable_trace_probe(tp, file); 1242 + } 1392 1243 } 1393 1244 1394 1245 if (warn) ··· 1411 1238 /* Disable trace points before removing it */ 1412 1239 tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); 1413 1240 if (WARN_ON_ONCE(tp == NULL)) { 1414 - pr_warning("error on getting test probe.\n"); 1241 + pr_warn("error on getting test probe.\n"); 1415 1242 warn++; 1416 - } else 1417 - disable_trace_probe(tp, TP_FLAG_TRACE); 1243 + } else { 1244 + file = find_trace_probe_file(tp, top_trace_array()); 1245 + if (WARN_ON_ONCE(file == NULL)) { 1246 + pr_warn("error on getting probe file.\n"); 1247 + warn++; 1248 + } else 1249 + disable_trace_probe(tp, file); 1250 + } 1418 1251 1419 1252 tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); 1420 1253 if (WARN_ON_ONCE(tp == NULL)) { 1421 - pr_warning("error on getting 2nd test probe.\n"); 1254 + pr_warn("error on getting 2nd test probe.\n"); 1422 1255 warn++; 1423 - } else 1424 - disable_trace_probe(tp, TP_FLAG_TRACE); 1256 + } else { 1257 + file = find_trace_probe_file(tp, top_trace_array()); 1258 + if (WARN_ON_ONCE(file == NULL)) { 1259 + pr_warn("error on getting probe file.\n"); 1260 + warn++; 1261 + } else 1262 + disable_trace_probe(tp, file); 1263 + } 1425 1264 1426 1265 ret = traceprobe_command("-:testprobe", create_trace_probe); 1427 1266 if (WARN_ON_ONCE(ret)) { 1428 - pr_warning("error on deleting a probe.\n"); 1267 + pr_warn("error on deleting a probe.\n"); 1429 1268 warn++; 1430 1269 } 1431 1270 1432 1271 ret = traceprobe_command("-:testprobe2", create_trace_probe); 1433 1272 if (WARN_ON_ONCE(ret)) { 1434 - pr_warning("error on deleting a probe.\n"); 1273 + pr_warn("error on deleting a probe.\n"); 1435 1274 warn++; 1436 1275 } 1437 1276