Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'trace-fixes-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull fix for ftrace function tracer/profiler conflict from Steven Rostedt:
"The rewrite of the ftrace code that makes it possible to allow for
separate trampolines had a design flaw with the interaction between
the function and function_graph tracers.

The main flaw was the simplification of the use of multiple tracers
having the same filter (like function and function_graph, that use the
set_ftrace_filter file to filter their code). The design assumed that
the two tracers could never run simultaneously as only one tracer can
be used at a time. The problem with this assumption was that the
function profiler could be implemented on top of the function graph
tracer, and the function profiler could run at the same time as the
function tracer. This caused the assumption to be broken and when
ftrace detected this failed assumpiton it would spit out a nasty
warning and shut itself down.

Instead of using a single ftrace_ops that switches between the
function and function_graph callbacks, the two tracers can again use
their own ftrace_ops. But instead of having a complex hierarchy of
ftrace_ops, the filter fields are placed in its own structure and the
ftrace_ops can carefully use the same filter. This change took a bit
to be able to allow for this and currently only the global_ops can
share the same filter, but this new design can easily be modified to
allow for any ftrace_ops to share its filter with another ftrace_ops.

The first four patches deal with the change of allowing the ftrace_ops
to share the filter (and this needs to go to 3.16 as well).

The fifth patch fixes a bug that was also caused by the new changes
but only for archs other than x86, and only if those archs implement a
direct call to the function_graph tracer which they do not do yet but
will in the future. It does not need to go to stable, but needs to be
fixed before the other archs update their code to allow direct calls
to the function_graph trampoline"

* tag 'trace-fixes-v3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
ftrace: Use current addr when converting to nop in __ftrace_replace_code()
ftrace: Fix function_profiler and function tracer together
ftrace: Fix up trampoline accounting with looping on hash ops
ftrace: Update all ftrace_ops for a ftrace_hash_ops update
ftrace: Allow ftrace_ops to use the hashes from other ops

+168 -92
+11 -3
include/linux/ftrace.h
··· 102 102 FTRACE_OPS_FL_DELETED = 1 << 8, 103 103 }; 104 104 105 + #ifdef CONFIG_DYNAMIC_FTRACE 106 + /* The hash used to know what functions callbacks trace */ 107 + struct ftrace_ops_hash { 108 + struct ftrace_hash *notrace_hash; 109 + struct ftrace_hash *filter_hash; 110 + struct mutex regex_lock; 111 + }; 112 + #endif 113 + 105 114 /* 106 115 * Note, ftrace_ops can be referenced outside of RCU protection. 107 116 * (Although, for perf, the control ops prevent that). If ftrace_ops is ··· 130 121 int __percpu *disabled; 131 122 #ifdef CONFIG_DYNAMIC_FTRACE 132 123 int nr_trampolines; 133 - struct ftrace_hash *notrace_hash; 134 - struct ftrace_hash *filter_hash; 124 + struct ftrace_ops_hash local_hash; 125 + struct ftrace_ops_hash *func_hash; 135 126 struct ftrace_hash *tramp_hash; 136 - struct mutex regex_lock; 137 127 unsigned long trampoline; 138 128 #endif 139 129 };
+157 -89
kernel/trace/ftrace.c
··· 65 65 #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL) 66 66 67 67 #ifdef CONFIG_DYNAMIC_FTRACE 68 - #define INIT_REGEX_LOCK(opsname) \ 69 - .regex_lock = __MUTEX_INITIALIZER(opsname.regex_lock), 68 + #define INIT_OPS_HASH(opsname) \ 69 + .func_hash = &opsname.local_hash, \ 70 + .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 71 + #define ASSIGN_OPS_HASH(opsname, val) \ 72 + .func_hash = val, \ 73 + .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock), 70 74 #else 71 - #define INIT_REGEX_LOCK(opsname) 75 + #define INIT_OPS_HASH(opsname) 76 + #define ASSIGN_OPS_HASH(opsname, val) 72 77 #endif 73 78 74 79 static struct ftrace_ops ftrace_list_end __read_mostly = { 75 80 .func = ftrace_stub, 76 81 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB, 82 + INIT_OPS_HASH(ftrace_list_end) 77 83 }; 78 84 79 85 /* ftrace_enabled is a method to turn ftrace on or off */ ··· 146 140 { 147 141 #ifdef CONFIG_DYNAMIC_FTRACE 148 142 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) { 149 - mutex_init(&ops->regex_lock); 143 + mutex_init(&ops->local_hash.regex_lock); 144 + ops->func_hash = &ops->local_hash; 150 145 ops->flags |= FTRACE_OPS_FL_INITIALIZED; 151 146 } 152 147 #endif ··· 906 899 static struct ftrace_ops ftrace_profile_ops __read_mostly = { 907 900 .func = function_profile_call, 908 901 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 909 - INIT_REGEX_LOCK(ftrace_profile_ops) 902 + INIT_OPS_HASH(ftrace_profile_ops) 910 903 }; 911 904 912 905 static int register_ftrace_profiler(void) ··· 1088 1081 #define EMPTY_HASH ((struct ftrace_hash *)&empty_hash) 1089 1082 1090 1083 static struct ftrace_ops global_ops = { 1091 - .func = ftrace_stub, 1092 - .notrace_hash = EMPTY_HASH, 1093 - .filter_hash = EMPTY_HASH, 1094 - .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 1095 - INIT_REGEX_LOCK(global_ops) 1084 + .func = ftrace_stub, 1085 + .local_hash.notrace_hash = EMPTY_HASH, 1086 + .local_hash.filter_hash = EMPTY_HASH, 1087 + INIT_OPS_HASH(global_ops) 1088 + .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1089 + FTRACE_OPS_FL_INITIALIZED, 1096 1090 }; 1097 1091 1098 1092 struct ftrace_page { ··· 1234 1226 void ftrace_free_filter(struct ftrace_ops *ops) 1235 1227 { 1236 1228 ftrace_ops_init(ops); 1237 - free_ftrace_hash(ops->filter_hash); 1238 - free_ftrace_hash(ops->notrace_hash); 1229 + free_ftrace_hash(ops->func_hash->filter_hash); 1230 + free_ftrace_hash(ops->func_hash->notrace_hash); 1239 1231 } 1240 1232 1241 1233 static struct ftrace_hash *alloc_ftrace_hash(int size_bits) ··· 1296 1288 } 1297 1289 1298 1290 static void 1299 - ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash); 1291 + ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); 1300 1292 static void 1301 - ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash); 1293 + ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); 1302 1294 1303 1295 static int 1304 1296 ftrace_hash_move(struct ftrace_ops *ops, int enable, ··· 1350 1342 * Remove the current set, update the hash and add 1351 1343 * them back. 1352 1344 */ 1353 - ftrace_hash_rec_disable(ops, enable); 1345 + ftrace_hash_rec_disable_modify(ops, enable); 1354 1346 1355 1347 old_hash = *dst; 1356 1348 rcu_assign_pointer(*dst, new_hash); 1357 1349 free_ftrace_hash_rcu(old_hash); 1358 1350 1359 - ftrace_hash_rec_enable(ops, enable); 1351 + ftrace_hash_rec_enable_modify(ops, enable); 1360 1352 1361 1353 return 0; 1362 1354 } ··· 1390 1382 return 0; 1391 1383 #endif 1392 1384 1393 - filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); 1394 - notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); 1385 + filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); 1386 + notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); 1395 1387 1396 1388 if ((ftrace_hash_empty(filter_hash) || 1397 1389 ftrace_lookup_ip(filter_hash, ip)) && ··· 1511 1503 static void ftrace_remove_tramp(struct ftrace_ops *ops, 1512 1504 struct dyn_ftrace *rec) 1513 1505 { 1514 - struct ftrace_func_entry *entry; 1515 - 1516 - entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip); 1517 - if (!entry) 1506 + /* If TRAMP is not set, no ops should have a trampoline for this */ 1507 + if (!(rec->flags & FTRACE_FL_TRAMP)) 1518 1508 return; 1519 1509 1510 + rec->flags &= ~FTRACE_FL_TRAMP; 1511 + 1512 + if ((!ftrace_hash_empty(ops->func_hash->filter_hash) && 1513 + !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) || 1514 + ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) 1515 + return; 1520 1516 /* 1521 1517 * The tramp_hash entry will be removed at time 1522 1518 * of update. 1523 1519 */ 1524 1520 ops->nr_trampolines--; 1525 - rec->flags &= ~FTRACE_FL_TRAMP; 1526 1521 } 1527 1522 1528 - static void ftrace_clear_tramps(struct dyn_ftrace *rec) 1523 + static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops) 1529 1524 { 1530 1525 struct ftrace_ops *op; 1531 1526 1527 + /* If TRAMP is not set, no ops should have a trampoline for this */ 1528 + if (!(rec->flags & FTRACE_FL_TRAMP)) 1529 + return; 1530 + 1532 1531 do_for_each_ftrace_op(op, ftrace_ops_list) { 1532 + /* 1533 + * This function is called to clear other tramps 1534 + * not the one that is being updated. 1535 + */ 1536 + if (op == ops) 1537 + continue; 1533 1538 if (op->nr_trampolines) 1534 1539 ftrace_remove_tramp(op, rec); 1535 1540 } while_for_each_ftrace_op(op); ··· 1575 1554 * gets inversed. 1576 1555 */ 1577 1556 if (filter_hash) { 1578 - hash = ops->filter_hash; 1579 - other_hash = ops->notrace_hash; 1557 + hash = ops->func_hash->filter_hash; 1558 + other_hash = ops->func_hash->notrace_hash; 1580 1559 if (ftrace_hash_empty(hash)) 1581 1560 all = 1; 1582 1561 } else { 1583 1562 inc = !inc; 1584 - hash = ops->notrace_hash; 1585 - other_hash = ops->filter_hash; 1563 + hash = ops->func_hash->notrace_hash; 1564 + other_hash = ops->func_hash->filter_hash; 1586 1565 /* 1587 1566 * If the notrace hash has no items, 1588 1567 * then there's nothing to do. ··· 1643 1622 /* 1644 1623 * If we are adding another function callback 1645 1624 * to this function, and the previous had a 1646 - * trampoline used, then we need to go back to 1647 - * the default trampoline. 1625 + * custom trampoline in use, then we need to go 1626 + * back to the default trampoline. 1648 1627 */ 1649 - rec->flags &= ~FTRACE_FL_TRAMP; 1650 - 1651 - /* remove trampolines from any ops for this rec */ 1652 - ftrace_clear_tramps(rec); 1628 + ftrace_clear_tramps(rec, ops); 1653 1629 } 1654 1630 1655 1631 /* ··· 1698 1680 int filter_hash) 1699 1681 { 1700 1682 __ftrace_hash_rec_update(ops, filter_hash, 1); 1683 + } 1684 + 1685 + static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops, 1686 + int filter_hash, int inc) 1687 + { 1688 + struct ftrace_ops *op; 1689 + 1690 + __ftrace_hash_rec_update(ops, filter_hash, inc); 1691 + 1692 + if (ops->func_hash != &global_ops.local_hash) 1693 + return; 1694 + 1695 + /* 1696 + * If the ops shares the global_ops hash, then we need to update 1697 + * all ops that are enabled and use this hash. 1698 + */ 1699 + do_for_each_ftrace_op(op, ftrace_ops_list) { 1700 + /* Already done */ 1701 + if (op == ops) 1702 + continue; 1703 + if (op->func_hash == &global_ops.local_hash) 1704 + __ftrace_hash_rec_update(op, filter_hash, inc); 1705 + } while_for_each_ftrace_op(op); 1706 + } 1707 + 1708 + static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, 1709 + int filter_hash) 1710 + { 1711 + ftrace_hash_rec_update_modify(ops, filter_hash, 0); 1712 + } 1713 + 1714 + static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, 1715 + int filter_hash) 1716 + { 1717 + ftrace_hash_rec_update_modify(ops, filter_hash, 1); 1701 1718 } 1702 1719 1703 1720 static void print_ip_ins(const char *fmt, unsigned char *p) ··· 1949 1896 if (rec->flags & FTRACE_FL_TRAMP) { 1950 1897 ops = ftrace_find_tramp_ops_new(rec); 1951 1898 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) { 1952 - pr_warning("Bad trampoline accounting at: %p (%pS)\n", 1953 - (void *)rec->ip, (void *)rec->ip); 1899 + pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n", 1900 + (void *)rec->ip, (void *)rec->ip, rec->flags); 1954 1901 /* Ftrace is shutting down, return anything */ 1955 1902 return (unsigned long)FTRACE_ADDR; 1956 1903 } ··· 2017 1964 return ftrace_make_call(rec, ftrace_addr); 2018 1965 2019 1966 case FTRACE_UPDATE_MAKE_NOP: 2020 - return ftrace_make_nop(NULL, rec, ftrace_addr); 1967 + return ftrace_make_nop(NULL, rec, ftrace_old_addr); 2021 1968 2022 1969 case FTRACE_UPDATE_MODIFY_CALL: 2023 1970 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); ··· 2280 2227 } while_for_each_ftrace_rec(); 2281 2228 2282 2229 /* The number of recs in the hash must match nr_trampolines */ 2283 - FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines); 2230 + if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines)) 2231 + pr_warn("count=%ld trampolines=%d\n", 2232 + ops->tramp_hash->count, 2233 + ops->nr_trampolines); 2284 2234 2285 2235 return 0; 2286 2236 } ··· 2492 2436 * Filter_hash being empty will default to trace module. 2493 2437 * But notrace hash requires a test of individual module functions. 2494 2438 */ 2495 - return ftrace_hash_empty(ops->filter_hash) && 2496 - ftrace_hash_empty(ops->notrace_hash); 2439 + return ftrace_hash_empty(ops->func_hash->filter_hash) && 2440 + ftrace_hash_empty(ops->func_hash->notrace_hash); 2497 2441 } 2498 2442 2499 2443 /* ··· 2515 2459 return 0; 2516 2460 2517 2461 /* The function must be in the filter */ 2518 - if (!ftrace_hash_empty(ops->filter_hash) && 2519 - !ftrace_lookup_ip(ops->filter_hash, rec->ip)) 2462 + if (!ftrace_hash_empty(ops->func_hash->filter_hash) && 2463 + !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) 2520 2464 return 0; 2521 2465 2522 2466 /* If in notrace hash, we ignore it too */ 2523 - if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) 2467 + if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) 2524 2468 return 0; 2525 2469 2526 2470 return 1; ··· 2841 2785 } else { 2842 2786 rec = &iter->pg->records[iter->idx++]; 2843 2787 if (((iter->flags & FTRACE_ITER_FILTER) && 2844 - !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) || 2788 + !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) || 2845 2789 2846 2790 ((iter->flags & FTRACE_ITER_NOTRACE) && 2847 - !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) || 2791 + !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) || 2848 2792 2849 2793 ((iter->flags & FTRACE_ITER_ENABLED) && 2850 2794 !(rec->flags & FTRACE_FL_ENABLED))) { ··· 2893 2837 * functions are enabled. 2894 2838 */ 2895 2839 if ((iter->flags & FTRACE_ITER_FILTER && 2896 - ftrace_hash_empty(ops->filter_hash)) || 2840 + ftrace_hash_empty(ops->func_hash->filter_hash)) || 2897 2841 (iter->flags & FTRACE_ITER_NOTRACE && 2898 - ftrace_hash_empty(ops->notrace_hash))) { 2842 + ftrace_hash_empty(ops->func_hash->notrace_hash))) { 2899 2843 if (*pos > 0) 2900 2844 return t_hash_start(m, pos); 2901 2845 iter->flags |= FTRACE_ITER_PRINTALL; ··· 3057 3001 iter->ops = ops; 3058 3002 iter->flags = flag; 3059 3003 3060 - mutex_lock(&ops->regex_lock); 3004 + mutex_lock(&ops->func_hash->regex_lock); 3061 3005 3062 3006 if (flag & FTRACE_ITER_NOTRACE) 3063 - hash = ops->notrace_hash; 3007 + hash = ops->func_hash->notrace_hash; 3064 3008 else 3065 - hash = ops->filter_hash; 3009 + hash = ops->func_hash->filter_hash; 3066 3010 3067 3011 if (file->f_mode & FMODE_WRITE) { 3068 3012 const int size_bits = FTRACE_HASH_DEFAULT_BITS; ··· 3097 3041 file->private_data = iter; 3098 3042 3099 3043 out_unlock: 3100 - mutex_unlock(&ops->regex_lock); 3044 + mutex_unlock(&ops->func_hash->regex_lock); 3101 3045 3102 3046 return ret; 3103 3047 } ··· 3335 3279 { 3336 3280 .func = function_trace_probe_call, 3337 3281 .flags = FTRACE_OPS_FL_INITIALIZED, 3338 - INIT_REGEX_LOCK(trace_probe_ops) 3282 + INIT_OPS_HASH(trace_probe_ops) 3339 3283 }; 3340 3284 3341 3285 static int ftrace_probe_registered; ··· 3398 3342 void *data) 3399 3343 { 3400 3344 struct ftrace_func_probe *entry; 3401 - struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3345 + struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3402 3346 struct ftrace_hash *hash; 3403 3347 struct ftrace_page *pg; 3404 3348 struct dyn_ftrace *rec; ··· 3415 3359 if (WARN_ON(not)) 3416 3360 return -EINVAL; 3417 3361 3418 - mutex_lock(&trace_probe_ops.regex_lock); 3362 + mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3419 3363 3420 3364 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3421 3365 if (!hash) { ··· 3484 3428 out_unlock: 3485 3429 mutex_unlock(&ftrace_lock); 3486 3430 out: 3487 - mutex_unlock(&trace_probe_ops.regex_lock); 3431 + mutex_unlock(&trace_probe_ops.func_hash->regex_lock); 3488 3432 free_ftrace_hash(hash); 3489 3433 3490 3434 return count; ··· 3502 3446 struct ftrace_func_entry *rec_entry; 3503 3447 struct ftrace_func_probe *entry; 3504 3448 struct ftrace_func_probe *p; 3505 - struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash; 3449 + struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; 3506 3450 struct list_head free_list; 3507 3451 struct ftrace_hash *hash; 3508 3452 struct hlist_node *tmp; ··· 3524 3468 return; 3525 3469 } 3526 3470 3527 - mutex_lock(&trace_probe_ops.regex_lock); 3471 + mutex_lock(&trace_probe_ops.func_hash->regex_lock); 3528 3472 3529 3473 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); 3530 3474 if (!hash) ··· 3577 3521 mutex_unlock(&ftrace_lock); 3578 3522 3579 3523 out_unlock: 3580 - mutex_unlock(&trace_probe_ops.regex_lock); 3524 + mutex_unlock(&trace_probe_ops.func_hash->regex_lock); 3581 3525 free_ftrace_hash(hash); 3582 3526 } 3583 3527 ··· 3773 3717 if (unlikely(ftrace_disabled)) 3774 3718 return -ENODEV; 3775 3719 3776 - mutex_lock(&ops->regex_lock); 3720 + mutex_lock(&ops->func_hash->regex_lock); 3777 3721 3778 3722 if (enable) 3779 - orig_hash = &ops->filter_hash; 3723 + orig_hash = &ops->func_hash->filter_hash; 3780 3724 else 3781 - orig_hash = &ops->notrace_hash; 3725 + orig_hash = &ops->func_hash->notrace_hash; 3782 3726 3783 3727 if (reset) 3784 3728 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS); ··· 3808 3752 mutex_unlock(&ftrace_lock); 3809 3753 3810 3754 out_regex_unlock: 3811 - mutex_unlock(&ops->regex_lock); 3755 + mutex_unlock(&ops->func_hash->regex_lock); 3812 3756 3813 3757 free_ftrace_hash(hash); 3814 3758 return ret; ··· 4031 3975 4032 3976 trace_parser_put(parser); 4033 3977 4034 - mutex_lock(&iter->ops->regex_lock); 3978 + mutex_lock(&iter->ops->func_hash->regex_lock); 4035 3979 4036 3980 if (file->f_mode & FMODE_WRITE) { 4037 3981 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER); 4038 3982 4039 3983 if (filter_hash) 4040 - orig_hash = &iter->ops->filter_hash; 3984 + orig_hash = &iter->ops->func_hash->filter_hash; 4041 3985 else 4042 - orig_hash = &iter->ops->notrace_hash; 3986 + orig_hash = &iter->ops->func_hash->notrace_hash; 4043 3987 4044 3988 mutex_lock(&ftrace_lock); 4045 3989 ret = ftrace_hash_move(iter->ops, filter_hash, ··· 4050 3994 mutex_unlock(&ftrace_lock); 4051 3995 } 4052 3996 4053 - mutex_unlock(&iter->ops->regex_lock); 3997 + mutex_unlock(&iter->ops->func_hash->regex_lock); 4054 3998 free_ftrace_hash(iter->hash); 4055 3999 kfree(iter); 4056 4000 ··· 4667 4611 static struct ftrace_ops global_ops = { 4668 4612 .func = ftrace_stub, 4669 4613 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4670 - INIT_REGEX_LOCK(global_ops) 4671 4614 }; 4672 4615 4673 4616 static int __init ftrace_nodyn_init(void) ··· 4768 4713 static struct ftrace_ops control_ops = { 4769 4714 .func = ftrace_ops_control_func, 4770 4715 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 4771 - INIT_REGEX_LOCK(control_ops) 4716 + INIT_OPS_HASH(control_ops) 4772 4717 }; 4773 4718 4774 4719 static inline void ··· 5200 5145 5201 5146 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 5202 5147 5148 + static struct ftrace_ops graph_ops = { 5149 + .func = ftrace_stub, 5150 + .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5151 + FTRACE_OPS_FL_INITIALIZED | 5152 + FTRACE_OPS_FL_STUB, 5153 + #ifdef FTRACE_GRAPH_TRAMP_ADDR 5154 + .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5155 + #endif 5156 + ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) 5157 + }; 5158 + 5203 5159 static int ftrace_graph_active; 5204 5160 5205 5161 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) ··· 5373 5307 */ 5374 5308 static void update_function_graph_func(void) 5375 5309 { 5376 - if (ftrace_ops_list == &ftrace_list_end || 5377 - (ftrace_ops_list == &global_ops && 5378 - global_ops.next == &ftrace_list_end)) 5379 - ftrace_graph_entry = __ftrace_graph_entry; 5380 - else 5310 + struct ftrace_ops *op; 5311 + bool do_test = false; 5312 + 5313 + /* 5314 + * The graph and global ops share the same set of functions 5315 + * to test. If any other ops is on the list, then 5316 + * the graph tracing needs to test if its the function 5317 + * it should call. 5318 + */ 5319 + do_for_each_ftrace_op(op, ftrace_ops_list) { 5320 + if (op != &global_ops && op != &graph_ops && 5321 + op != &ftrace_list_end) { 5322 + do_test = true; 5323 + /* in double loop, break out with goto */ 5324 + goto out; 5325 + } 5326 + } while_for_each_ftrace_op(op); 5327 + out: 5328 + if (do_test) 5381 5329 ftrace_graph_entry = ftrace_graph_entry_test; 5330 + else 5331 + ftrace_graph_entry = __ftrace_graph_entry; 5382 5332 } 5383 5333 5384 5334 static struct notifier_block ftrace_suspend_notifier = { ··· 5435 5353 ftrace_graph_entry = ftrace_graph_entry_test; 5436 5354 update_function_graph_func(); 5437 5355 5438 - /* Function graph doesn't use the .func field of global_ops */ 5439 - global_ops.flags |= FTRACE_OPS_FL_STUB; 5440 - 5441 - #ifdef CONFIG_DYNAMIC_FTRACE 5442 - /* Optimize function graph calling (if implemented by arch) */ 5443 - if (FTRACE_GRAPH_TRAMP_ADDR != 0) 5444 - global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR; 5445 - #endif 5446 - 5447 - ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET); 5356 + ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); 5448 5357 5449 5358 out: 5450 5359 mutex_unlock(&ftrace_lock); ··· 5453 5380 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 5454 5381 ftrace_graph_entry = ftrace_graph_entry_stub; 5455 5382 __ftrace_graph_entry = ftrace_graph_entry_stub; 5456 - ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET); 5457 - global_ops.flags &= ~FTRACE_OPS_FL_STUB; 5458 - #ifdef CONFIG_DYNAMIC_FTRACE 5459 - if (FTRACE_GRAPH_TRAMP_ADDR != 0) 5460 - global_ops.trampoline = 0; 5461 - #endif 5383 + ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET); 5462 5384 unregister_pm_notifier(&ftrace_suspend_notifier); 5463 5385 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); 5464 5386