Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'probes-v7.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull probes fixes from Masami Hiramatsu:
"fprobe bug fixes:

- Prevent re-registration

Add an earlier check to reject re-registering an already active
fprobe before its state is modified during the initialization phase

- Robustness in failure paths:
- Ensure fprobes are correctly removed from all internal tables
and properly RCU-freed during registration failure
- Make unregister_fprobe() proceed with unregistration even if
temporary memory allocation fails

- RCU safety in module unloading

Avoid a potential "sleep in RCU" warning by removing a kcalloc()
call in the module notifier path. This also tries to remove
fprobe_hash_node even if memory allocation fails.

- Type-aware unregistration

Fix a bug where unregistering an fprobe did not account for
different types (entry-only vs entry-exit) at the same address,
which previously left "junk" entries in the underlying
ftrace/fgraph ops

- Unregistration of empty ftrace_ops

Avoid unneeded performance overhead due to making registered
ftrace_ops empty - which means 'trace all functions'. This counts
remaining entries and unregister ftrace_ops when it becomes empty.

Two new selftests to check above fixes:

- Module Unloading Test:

Specifically verifies that fprobe events on a module are correctly
cleaned up and do not trigger 'trace-all' behavior when the module
is removed.

- Multiple Fprobe Events Test:

Ensure that having multiple fprobes on the same function correctly
manages the ftrace hash map during removal"

* tag 'probes-v7.1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
selftests/ftrace: Add a testcase for multiple fprobe events
selftests/ftrace: Add a testcase for fprobe events on module
tracing/fprobe: Fix to unregister ftrace_ops if it is empty on module unloading
tracing/fprobe: Check the same type fprobe on table as the unregistered one
tracing/fprobe: Avoid kcalloc() in rcu_read_lock section
tracing/fprobe: Remove fprobe from hash in failure path
tracing/fprobe: Unregister fprobe even if memory allocation fails
tracing/fprobe: Reject registration of a registered fprobe before init

+471 -167
+315 -167
kernel/trace/fprobe.c
··· 4 4 */ 5 5 #define pr_fmt(fmt) "fprobe: " fmt 6 6 7 + #include <linux/cleanup.h> 7 8 #include <linux/err.h> 8 9 #include <linux/fprobe.h> 9 10 #include <linux/kallsyms.h> ··· 79 78 }; 80 79 81 80 /* Node insertion and deletion requires the fprobe_mutex */ 82 - static int insert_fprobe_node(struct fprobe_hlist_node *node) 81 + static int __insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp) 83 82 { 83 + int ret; 84 + 84 85 lockdep_assert_held(&fprobe_mutex); 85 86 86 - return rhltable_insert(&fprobe_ip_table, &node->hlist, fprobe_rht_params); 87 + ret = rhltable_insert(&fprobe_ip_table, &node->hlist, fprobe_rht_params); 88 + /* Set the fprobe pointer if insertion was successful. */ 89 + if (!ret) 90 + WRITE_ONCE(node->fp, fp); 91 + return ret; 87 92 } 88 93 89 - /* Return true if there are synonims */ 90 - static bool delete_fprobe_node(struct fprobe_hlist_node *node) 94 + static void __delete_fprobe_node(struct fprobe_hlist_node *node) 91 95 { 92 96 lockdep_assert_held(&fprobe_mutex); 93 - bool ret; 94 97 95 - /* Avoid double deleting */ 98 + /* Avoid double deleting and non-inserted nodes */ 96 99 if (READ_ONCE(node->fp) != NULL) { 97 100 WRITE_ONCE(node->fp, NULL); 98 101 rhltable_remove(&fprobe_ip_table, &node->hlist, 99 102 fprobe_rht_params); 100 103 } 101 - 102 - rcu_read_lock(); 103 - ret = !!rhltable_lookup(&fprobe_ip_table, &node->addr, 104 - fprobe_rht_params); 105 - rcu_read_unlock(); 106 - 107 - return ret; 108 104 } 109 105 110 106 /* Check existence of the fprobe */ 111 - static bool is_fprobe_still_exist(struct fprobe *fp) 107 + static bool fprobe_registered(struct fprobe *fp) 112 108 { 113 109 struct hlist_head *head; 114 110 struct fprobe_hlist *fph; ··· 118 120 } 119 121 return false; 120 122 } 121 - NOKPROBE_SYMBOL(is_fprobe_still_exist); 123 + NOKPROBE_SYMBOL(fprobe_registered); 122 124 123 125 static int add_fprobe_hash(struct fprobe *fp) 124 126 { ··· 129 131 130 132 if (WARN_ON_ONCE(!fph)) 131 133 return -EINVAL; 132 - 133 - if (is_fprobe_still_exist(fp)) 134 - return -EEXIST; 135 134 136 135 head = &fprobe_table[hash_ptr(fp, FPROBE_HASH_BITS)]; 137 136 hlist_add_head_rcu(&fp->hlist_array->hlist, head); ··· 144 149 if (WARN_ON_ONCE(!fph)) 145 150 return -EINVAL; 146 151 147 - if (!is_fprobe_still_exist(fp)) 152 + if (!fprobe_registered(fp)) 148 153 return -ENOENT; 149 154 150 155 fph->fp = NULL; ··· 250 255 return ret; 251 256 } 252 257 258 + static int fprobe_fgraph_entry(struct ftrace_graph_ent *trace, struct fgraph_ops *gops, 259 + struct ftrace_regs *fregs); 260 + static void fprobe_return(struct ftrace_graph_ret *trace, 261 + struct fgraph_ops *gops, 262 + struct ftrace_regs *fregs); 263 + 264 + static struct fgraph_ops fprobe_graph_ops = { 265 + .entryfunc = fprobe_fgraph_entry, 266 + .retfunc = fprobe_return, 267 + }; 268 + /* Number of fgraph fprobe nodes */ 269 + static int nr_fgraph_fprobes; 270 + /* Is fprobe_graph_ops registered? */ 271 + static bool fprobe_graph_registered; 272 + 273 + /* Add @addrs to the ftrace filter and register fgraph if needed. */ 274 + static int fprobe_graph_add_ips(unsigned long *addrs, int num) 275 + { 276 + int ret; 277 + 278 + lockdep_assert_held(&fprobe_mutex); 279 + 280 + ret = ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 0, 0); 281 + if (ret) 282 + return ret; 283 + 284 + if (!fprobe_graph_registered) { 285 + ret = register_ftrace_graph(&fprobe_graph_ops); 286 + if (WARN_ON_ONCE(ret)) { 287 + ftrace_free_filter(&fprobe_graph_ops.ops); 288 + return ret; 289 + } 290 + fprobe_graph_registered = true; 291 + } 292 + return 0; 293 + } 294 + 295 + static void __fprobe_graph_unregister(void) 296 + { 297 + if (fprobe_graph_registered) { 298 + unregister_ftrace_graph(&fprobe_graph_ops); 299 + ftrace_free_filter(&fprobe_graph_ops.ops); 300 + fprobe_graph_registered = false; 301 + } 302 + } 303 + 304 + /* Remove @addrs from the ftrace filter and unregister fgraph if possible. */ 305 + static void fprobe_graph_remove_ips(unsigned long *addrs, int num) 306 + { 307 + lockdep_assert_held(&fprobe_mutex); 308 + 309 + if (!nr_fgraph_fprobes) 310 + __fprobe_graph_unregister(); 311 + else if (num) 312 + ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0); 313 + } 314 + 253 315 #if defined(CONFIG_DYNAMIC_FTRACE_WITH_ARGS) || defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) 316 + 254 317 /* ftrace_ops callback, this processes fprobes which have only entry_handler. */ 255 318 static void fprobe_ftrace_entry(unsigned long ip, unsigned long parent_ip, 256 319 struct ftrace_ops *ops, struct ftrace_regs *fregs) ··· 351 298 .func = fprobe_ftrace_entry, 352 299 .flags = FTRACE_OPS_FL_SAVE_ARGS, 353 300 }; 354 - static int fprobe_ftrace_active; 301 + /* Number of ftrace fprobe nodes */ 302 + static int nr_ftrace_fprobes; 303 + /* Is fprobe_ftrace_ops registered? */ 304 + static bool fprobe_ftrace_registered; 355 305 356 306 static int fprobe_ftrace_add_ips(unsigned long *addrs, int num) 357 307 { ··· 366 310 if (ret) 367 311 return ret; 368 312 369 - if (!fprobe_ftrace_active) { 313 + if (!fprobe_ftrace_registered) { 370 314 ret = register_ftrace_function(&fprobe_ftrace_ops); 371 315 if (ret) { 372 316 ftrace_free_filter(&fprobe_ftrace_ops); 373 317 return ret; 374 318 } 319 + fprobe_ftrace_registered = true; 375 320 } 376 - fprobe_ftrace_active++; 377 321 return 0; 322 + } 323 + 324 + static void __fprobe_ftrace_unregister(void) 325 + { 326 + if (fprobe_ftrace_registered) { 327 + unregister_ftrace_function(&fprobe_ftrace_ops); 328 + ftrace_free_filter(&fprobe_ftrace_ops); 329 + fprobe_ftrace_registered = false; 330 + } 378 331 } 379 332 380 333 static void fprobe_ftrace_remove_ips(unsigned long *addrs, int num) 381 334 { 382 335 lockdep_assert_held(&fprobe_mutex); 383 336 384 - fprobe_ftrace_active--; 385 - if (!fprobe_ftrace_active) 386 - unregister_ftrace_function(&fprobe_ftrace_ops); 387 - if (num) 337 + if (!nr_ftrace_fprobes) 338 + __fprobe_ftrace_unregister(); 339 + else if (num) 388 340 ftrace_set_filter_ips(&fprobe_ftrace_ops, addrs, num, 1, 0); 389 341 } 390 342 ··· 401 337 return !fp->exit_handler; 402 338 } 403 339 404 - #ifdef CONFIG_MODULES 405 - static void fprobe_set_ips(unsigned long *ips, unsigned int cnt, int remove, 406 - int reset) 340 + /* Node insertion and deletion requires the fprobe_mutex */ 341 + static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp) 407 342 { 408 - ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, remove, reset); 409 - ftrace_set_filter_ips(&fprobe_ftrace_ops, ips, cnt, remove, reset); 343 + int ret; 344 + 345 + lockdep_assert_held(&fprobe_mutex); 346 + 347 + ret = __insert_fprobe_node(node, fp); 348 + if (!ret) { 349 + if (fprobe_is_ftrace(fp)) 350 + nr_ftrace_fprobes++; 351 + else 352 + nr_fgraph_fprobes++; 353 + } 354 + 355 + return ret; 356 + } 357 + 358 + static void delete_fprobe_node(struct fprobe_hlist_node *node) 359 + { 360 + struct fprobe *fp; 361 + 362 + lockdep_assert_held(&fprobe_mutex); 363 + 364 + fp = READ_ONCE(node->fp); 365 + if (fp) { 366 + if (fprobe_is_ftrace(fp)) 367 + nr_ftrace_fprobes--; 368 + else 369 + nr_fgraph_fprobes--; 370 + } 371 + __delete_fprobe_node(node); 372 + } 373 + 374 + static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace) 375 + { 376 + struct rhlist_head *head, *pos; 377 + struct fprobe_hlist_node *node; 378 + struct fprobe *fp; 379 + 380 + guard(rcu)(); 381 + head = rhltable_lookup(&fprobe_ip_table, &ip, 382 + fprobe_rht_params); 383 + if (!head) 384 + return false; 385 + /* We have to check the same type on the list. */ 386 + rhl_for_each_entry_rcu(node, pos, head, hlist) { 387 + if (node->addr != ip) 388 + break; 389 + fp = READ_ONCE(node->fp); 390 + if (likely(fp)) { 391 + if ((!ftrace && fp->exit_handler) || 392 + (ftrace && !fp->exit_handler)) 393 + return true; 394 + } 395 + } 396 + 397 + return false; 398 + } 399 + 400 + #ifdef CONFIG_MODULES 401 + static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt) 402 + { 403 + if (!nr_fgraph_fprobes) 404 + __fprobe_graph_unregister(); 405 + else if (cnt) 406 + ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0); 407 + 408 + if (!nr_ftrace_fprobes) 409 + __fprobe_ftrace_unregister(); 410 + else if (cnt) 411 + ftrace_set_filter_ips(&fprobe_ftrace_ops, ips, cnt, 1, 0); 410 412 } 411 413 #endif 412 414 #else ··· 490 360 return false; 491 361 } 492 362 493 - #ifdef CONFIG_MODULES 494 - static void fprobe_set_ips(unsigned long *ips, unsigned int cnt, int remove, 495 - int reset) 363 + /* Node insertion and deletion requires the fprobe_mutex */ 364 + static int insert_fprobe_node(struct fprobe_hlist_node *node, struct fprobe *fp) 496 365 { 497 - ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, remove, reset); 366 + int ret; 367 + 368 + lockdep_assert_held(&fprobe_mutex); 369 + 370 + ret = __insert_fprobe_node(node, fp); 371 + if (!ret) 372 + nr_fgraph_fprobes++; 373 + 374 + return ret; 375 + } 376 + 377 + static void delete_fprobe_node(struct fprobe_hlist_node *node) 378 + { 379 + struct fprobe *fp; 380 + 381 + lockdep_assert_held(&fprobe_mutex); 382 + 383 + fp = READ_ONCE(node->fp); 384 + if (fp) 385 + nr_fgraph_fprobes--; 386 + __delete_fprobe_node(node); 387 + } 388 + 389 + static bool fprobe_exists_on_hash(unsigned long ip, bool ftrace __maybe_unused) 390 + { 391 + struct rhlist_head *head, *pos; 392 + struct fprobe_hlist_node *node; 393 + struct fprobe *fp; 394 + 395 + guard(rcu)(); 396 + head = rhltable_lookup(&fprobe_ip_table, &ip, 397 + fprobe_rht_params); 398 + if (!head) 399 + return false; 400 + /* We only need to check fp is there. */ 401 + rhl_for_each_entry_rcu(node, pos, head, hlist) { 402 + if (node->addr != ip) 403 + break; 404 + fp = READ_ONCE(node->fp); 405 + if (likely(fp)) 406 + return true; 407 + } 408 + 409 + return false; 410 + } 411 + 412 + #ifdef CONFIG_MODULES 413 + static void fprobe_remove_ips(unsigned long *ips, unsigned int cnt) 414 + { 415 + if (!nr_fgraph_fprobes) 416 + __fprobe_graph_unregister(); 417 + else if (cnt) 418 + ftrace_set_filter_ips(&fprobe_graph_ops.ops, ips, cnt, 1, 0); 498 419 } 499 420 #endif 500 421 #endif /* !CONFIG_DYNAMIC_FTRACE_WITH_ARGS && !CONFIG_DYNAMIC_FTRACE_WITH_REGS */ ··· 661 480 if (!fp) 662 481 break; 663 482 curr += FPROBE_HEADER_SIZE_IN_LONG; 664 - if (is_fprobe_still_exist(fp) && !fprobe_disabled(fp)) { 483 + if (fprobe_registered(fp) && !fprobe_disabled(fp)) { 665 484 if (WARN_ON_ONCE(curr + size > size_words)) 666 485 break; 667 486 fp->exit_handler(fp, trace->func, ret_ip, fregs, ··· 673 492 } 674 493 NOKPROBE_SYMBOL(fprobe_return); 675 494 676 - static struct fgraph_ops fprobe_graph_ops = { 677 - .entryfunc = fprobe_fgraph_entry, 678 - .retfunc = fprobe_return, 679 - }; 680 - static int fprobe_graph_active; 681 - 682 - /* Add @addrs to the ftrace filter and register fgraph if needed. */ 683 - static int fprobe_graph_add_ips(unsigned long *addrs, int num) 684 - { 685 - int ret; 686 - 687 - lockdep_assert_held(&fprobe_mutex); 688 - 689 - ret = ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 0, 0); 690 - if (ret) 691 - return ret; 692 - 693 - if (!fprobe_graph_active) { 694 - ret = register_ftrace_graph(&fprobe_graph_ops); 695 - if (WARN_ON_ONCE(ret)) { 696 - ftrace_free_filter(&fprobe_graph_ops.ops); 697 - return ret; 698 - } 699 - } 700 - fprobe_graph_active++; 701 - return 0; 702 - } 703 - 704 - /* Remove @addrs from the ftrace filter and unregister fgraph if possible. */ 705 - static void fprobe_graph_remove_ips(unsigned long *addrs, int num) 706 - { 707 - lockdep_assert_held(&fprobe_mutex); 708 - 709 - fprobe_graph_active--; 710 - /* Q: should we unregister it ? */ 711 - if (!fprobe_graph_active) 712 - unregister_ftrace_graph(&fprobe_graph_ops); 713 - 714 - if (num) 715 - ftrace_set_filter_ips(&fprobe_graph_ops.ops, addrs, num, 1, 0); 716 - } 717 - 718 495 #ifdef CONFIG_MODULES 719 496 720 - #define FPROBE_IPS_BATCH_INIT 8 497 + #define FPROBE_IPS_BATCH_INIT 128 721 498 /* instruction pointer address list */ 722 499 struct fprobe_addr_list { 723 500 int index; ··· 683 544 unsigned long *addrs; 684 545 }; 685 546 686 - static int fprobe_addr_list_add(struct fprobe_addr_list *alist, unsigned long addr) 687 - { 688 - unsigned long *addrs; 689 - 690 - /* Previously we failed to expand the list. */ 691 - if (alist->index == alist->size) 692 - return -ENOSPC; 693 - 694 - alist->addrs[alist->index++] = addr; 695 - if (alist->index < alist->size) 696 - return 0; 697 - 698 - /* Expand the address list */ 699 - addrs = kcalloc(alist->size * 2, sizeof(*addrs), GFP_KERNEL); 700 - if (!addrs) 701 - return -ENOMEM; 702 - 703 - memcpy(addrs, alist->addrs, alist->size * sizeof(*addrs)); 704 - alist->size *= 2; 705 - kfree(alist->addrs); 706 - alist->addrs = addrs; 707 - 708 - return 0; 709 - } 710 - 711 - static void fprobe_remove_node_in_module(struct module *mod, struct fprobe_hlist_node *node, 547 + static int fprobe_remove_node_in_module(struct module *mod, struct fprobe_hlist_node *node, 712 548 struct fprobe_addr_list *alist) 713 549 { 550 + lockdep_assert_in_rcu_read_lock(); 551 + 714 552 if (!within_module(node->addr, mod)) 715 - return; 716 - if (delete_fprobe_node(node)) 717 - return; 553 + return 0; 554 + 555 + delete_fprobe_node(node); 556 + /* If no address list is available, we can't track this address. */ 557 + if (!alist->addrs) 558 + return 0; 718 559 /* 719 - * If failed to update alist, just continue to update hlist. 720 - * Therefore, at list user handler will not hit anymore. 560 + * Don't care the type here, because all fprobes on the same 561 + * address must be removed eventually. 721 562 */ 722 - fprobe_addr_list_add(alist, node->addr); 563 + if (!rhltable_lookup(&fprobe_ip_table, &node->addr, fprobe_rht_params)) { 564 + alist->addrs[alist->index++] = node->addr; 565 + if (alist->index == alist->size) 566 + return -ENOSPC; 567 + } 568 + 569 + return 0; 723 570 } 724 571 725 572 /* Handle module unloading to manage fprobe_ip_table. */ ··· 716 591 struct fprobe_hlist_node *node; 717 592 struct rhashtable_iter iter; 718 593 struct module *mod = data; 594 + bool retry; 719 595 720 596 if (val != MODULE_STATE_GOING) 721 597 return NOTIFY_DONE; 722 598 723 599 alist.addrs = kcalloc(alist.size, sizeof(*alist.addrs), GFP_KERNEL); 724 - /* If failed to alloc memory, we can not remove ips from hash. */ 725 - if (!alist.addrs) 726 - return NOTIFY_DONE; 600 + /* 601 + * If failed to alloc memory, ftrace_ops will not be able to remove ips from 602 + * hash, but we can still remove nodes from fprobe_ip_table, so we can avoid 603 + * the potential wrong callback. So just print a warning here and try to 604 + * continue without address list. 605 + */ 606 + WARN_ONCE(!alist.addrs, 607 + "Failed to allocate memory for fprobe_addr_list, ftrace_ops will not be updated"); 727 608 728 609 mutex_lock(&fprobe_mutex); 610 + again: 611 + retry = false; 612 + alist.index = 0; 729 613 rhltable_walk_enter(&fprobe_ip_table, &iter); 730 614 do { 731 615 rhashtable_walk_start(&iter); 732 616 733 617 while ((node = rhashtable_walk_next(&iter)) && !IS_ERR(node)) 734 - fprobe_remove_node_in_module(mod, node, &alist); 618 + if (fprobe_remove_node_in_module(mod, node, &alist) < 0) { 619 + retry = true; 620 + break; 621 + } 735 622 736 623 rhashtable_walk_stop(&iter); 737 - } while (node == ERR_PTR(-EAGAIN)); 624 + } while (node == ERR_PTR(-EAGAIN) && !retry); 738 625 rhashtable_walk_exit(&iter); 626 + /* Remove any ips from hash table(s) */ 627 + fprobe_remove_ips(alist.addrs, alist.index); 628 + /* 629 + * If we break rhashtable walk loop except for -EAGAIN, we need 630 + * to restart looping from start for safety. Anyway, this is 631 + * not a hotpath. 632 + */ 633 + if (retry) 634 + goto again; 739 635 740 - if (alist.index > 0) 741 - fprobe_set_ips(alist.addrs, alist.index, 1, 0); 742 636 mutex_unlock(&fprobe_mutex); 743 637 744 638 kfree(alist.addrs); ··· 901 757 fp->hlist_array = hlist_array; 902 758 hlist_array->fp = fp; 903 759 for (i = 0; i < num; i++) { 904 - hlist_array->array[i].fp = fp; 905 760 addr = ftrace_location(addrs[i]); 906 761 if (!addr) { 907 762 fprobe_fail_cleanup(fp); ··· 964 821 } 965 822 EXPORT_SYMBOL_GPL(register_fprobe); 966 823 824 + static int unregister_fprobe_nolock(struct fprobe *fp); 825 + 967 826 /** 968 827 * register_fprobe_ips() - Register fprobe to ftrace by address. 969 828 * @fp: A fprobe data structure to be registered. ··· 984 839 struct fprobe_hlist *hlist_array; 985 840 int ret, i; 986 841 842 + guard(mutex)(&fprobe_mutex); 843 + if (fprobe_registered(fp)) 844 + return -EEXIST; 845 + 987 846 ret = fprobe_init(fp, addrs, num); 988 847 if (ret) 989 848 return ret; 990 849 991 - mutex_lock(&fprobe_mutex); 992 - 993 - hlist_array = fp->hlist_array; 994 850 if (fprobe_is_ftrace(fp)) 995 851 ret = fprobe_ftrace_add_ips(addrs, num); 996 852 else 997 853 ret = fprobe_graph_add_ips(addrs, num); 998 - 999 - if (!ret) { 1000 - add_fprobe_hash(fp); 1001 - for (i = 0; i < hlist_array->size; i++) { 1002 - ret = insert_fprobe_node(&hlist_array->array[i]); 1003 - if (ret) 1004 - break; 1005 - } 1006 - /* fallback on insert error */ 1007 - if (ret) { 1008 - for (i--; i >= 0; i--) 1009 - delete_fprobe_node(&hlist_array->array[i]); 1010 - } 1011 - } 1012 - mutex_unlock(&fprobe_mutex); 1013 - 1014 - if (ret) 854 + if (ret) { 1015 855 fprobe_fail_cleanup(fp); 856 + return ret; 857 + } 858 + 859 + hlist_array = fp->hlist_array; 860 + ret = add_fprobe_hash(fp); 861 + for (i = 0; i < hlist_array->size && !ret; i++) 862 + ret = insert_fprobe_node(&hlist_array->array[i], fp); 863 + 864 + if (ret) { 865 + unregister_fprobe_nolock(fp); 866 + /* In error case, wait for clean up safely. */ 867 + synchronize_rcu(); 868 + } 1016 869 1017 870 return ret; 1018 871 } ··· 1054 911 return true; 1055 912 } 1056 913 1057 - /** 1058 - * unregister_fprobe() - Unregister fprobe. 1059 - * @fp: A fprobe data structure to be unregistered. 1060 - * 1061 - * Unregister fprobe (and remove ftrace hooks from the function entries). 1062 - * 1063 - * Return 0 if @fp is unregistered successfully, -errno if not. 1064 - */ 1065 - int unregister_fprobe(struct fprobe *fp) 914 + static int unregister_fprobe_nolock(struct fprobe *fp) 1066 915 { 1067 - struct fprobe_hlist *hlist_array; 916 + struct fprobe_hlist *hlist_array = fp->hlist_array; 1068 917 unsigned long *addrs = NULL; 1069 - int ret = 0, i, count; 918 + int i, count; 1070 919 1071 - mutex_lock(&fprobe_mutex); 1072 - if (!fp || !is_fprobe_still_exist(fp)) { 1073 - ret = -EINVAL; 1074 - goto out; 1075 - } 1076 - 1077 - hlist_array = fp->hlist_array; 1078 920 addrs = kcalloc(hlist_array->size, sizeof(unsigned long), GFP_KERNEL); 1079 - if (!addrs) { 1080 - ret = -ENOMEM; /* TODO: Fallback to one-by-one loop */ 1081 - goto out; 1082 - } 921 + /* 922 + * This will remove fprobe_hash_node from the hash table even if 923 + * memory allocation fails. However, ftrace_ops will not be updated. 924 + * Anyway, when the last fprobe is unregistered, ftrace_ops is also 925 + * unregistered. 926 + */ 927 + if (!addrs) 928 + pr_warn("Failed to allocate working array. ftrace_ops may not sync.\n"); 1083 929 1084 930 /* Remove non-synonim ips from table and hash */ 1085 931 count = 0; 1086 932 for (i = 0; i < hlist_array->size; i++) { 1087 - if (!delete_fprobe_node(&hlist_array->array[i])) 933 + delete_fprobe_node(&hlist_array->array[i]); 934 + if (addrs && !fprobe_exists_on_hash(hlist_array->array[i].addr, 935 + fprobe_is_ftrace(fp))) 1088 936 addrs[count++] = hlist_array->array[i].addr; 1089 937 } 1090 938 del_fprobe_hash(fp); ··· 1087 953 1088 954 kfree_rcu(hlist_array, rcu); 1089 955 fp->hlist_array = NULL; 1090 - 1091 - out: 1092 - mutex_unlock(&fprobe_mutex); 1093 - 1094 956 kfree(addrs); 1095 - return ret; 957 + 958 + return 0; 959 + } 960 + 961 + /** 962 + * unregister_fprobe() - Unregister fprobe. 963 + * @fp: A fprobe data structure to be unregistered. 964 + * 965 + * Unregister fprobe (and remove ftrace hooks from the function entries). 966 + * 967 + * Return 0 if @fp is unregistered successfully, -errno if not. 968 + */ 969 + int unregister_fprobe(struct fprobe *fp) 970 + { 971 + guard(mutex)(&fprobe_mutex); 972 + if (!fp || !fprobe_registered(fp)) 973 + return -EINVAL; 974 + 975 + return unregister_fprobe_nolock(fp); 1096 976 } 1097 977 EXPORT_SYMBOL_GPL(unregister_fprobe); 1098 978
+87
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_fprobe_module.tc
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # description: Generic dynamic event - add/remove fprobe events on module 4 + # requires: dynamic_events "f[:[<group>/][<event>]] <func-name>[%return] [<args>]":README enabled_functions 5 + 6 + rmmod trace-events-sample ||: 7 + if ! modprobe trace-events-sample ; then 8 + echo "No trace-events sample module - please make CONFIG_SAMPLE_TRACE_EVENTS=m" 9 + exit_unresolved; 10 + fi 11 + trap "lsmod | grep -q trace_events_sample && rmmod trace-events-sample" EXIT 12 + 13 + echo 0 > events/enable 14 + echo > dynamic_events 15 + 16 + FUNC1='foo_bar*' 17 + FUNC2='vfs_read' 18 + 19 + :;: "Add an event on the test module" ;: 20 + echo "f:test1 $FUNC1" >> dynamic_events 21 + echo 1 > events/fprobes/test1/enable 22 + 23 + :;: "Ensure it is enabled" ;: 24 + funcs=`cat enabled_functions | wc -l` 25 + test $funcs -ne 0 26 + 27 + :;: "Check the enabled_functions is cleared on unloading" ;: 28 + rmmod trace-events-sample 29 + funcs=`cat enabled_functions | wc -l` 30 + test $funcs -eq 0 31 + 32 + :;: "Check it is kept clean" ;: 33 + modprobe trace-events-sample 34 + echo 1 > events/fprobes/test1/enable || echo "OK" 35 + funcs=`cat enabled_functions | wc -l` 36 + test $funcs -eq 0 37 + 38 + :;: "Add another event not on the test module" ;: 39 + echo "f:test2 $FUNC2" >> dynamic_events 40 + echo 1 > events/fprobes/test2/enable 41 + 42 + :;: "Ensure it is enabled" ;: 43 + ofuncs=`cat enabled_functions | wc -l` 44 + test $ofuncs -ne 0 45 + 46 + :;: "Disable and remove the first event" 47 + echo 0 > events/fprobes/test1/enable 48 + echo "-:fprobes/test1" >> dynamic_events 49 + funcs=`cat enabled_functions | wc -l` 50 + test $ofuncs -eq $funcs 51 + 52 + :;: "Disable and remove other events" ;: 53 + echo 0 > events/fprobes/enable 54 + echo > dynamic_events 55 + funcs=`cat enabled_functions | wc -l` 56 + test $funcs -eq 0 57 + 58 + rmmod trace-events-sample 59 + 60 + :;: "Add events on kernel and test module" ;: 61 + modprobe trace-events-sample 62 + echo "f:test1 $FUNC1" >> dynamic_events 63 + echo 1 > events/fprobes/test1/enable 64 + echo "f:test2 $FUNC2" >> dynamic_events 65 + echo 1 > events/fprobes/test2/enable 66 + ofuncs=`cat enabled_functions | wc -l` 67 + test $ofuncs -ne 0 68 + 69 + :;: "Unload module (ftrace entry should be removed)" ;: 70 + rmmod trace-events-sample 71 + funcs=`cat enabled_functions | wc -l` 72 + test $funcs -ne 0 73 + test $ofuncs -ne $funcs 74 + 75 + :;: "Disable and remove core-kernel fprobe event" ;: 76 + echo 0 > events/fprobes/test2/enable 77 + echo "-:fprobes/test2" >> dynamic_events 78 + 79 + :;: "Ensure ftrace is disabled." ;: 80 + funcs=`cat enabled_functions | wc -l` 81 + test $funcs -eq 0 82 + 83 + echo 0 > events/fprobes/enable 84 + echo > dynamic_events 85 + 86 + trap "" EXIT 87 + clear_trace
+69
tools/testing/selftests/ftrace/test.d/dynevent/add_remove_multiple_fprobe.tc
··· 1 + #!/bin/sh 2 + # SPDX-License-Identifier: GPL-2.0 3 + # description: Generic dynamic event - add/remove multiple fprobe events on the same function 4 + # requires: dynamic_events "f[:[<group>/][<event>]] <func-name>[%return] [<args>]":README enabled_functions 5 + 6 + echo 0 > events/enable 7 + echo > dynamic_events 8 + 9 + PLACE=vfs_read 10 + PLACE2=vfs_open 11 + 12 + :;: 'Ensure no other ftrace user' ;: 13 + test `cat enabled_functions | wc -l` -eq 0 || exit_unresolved 14 + 15 + :;: 'Test case 1: leave entry event' ;: 16 + :;: 'Add entry and exit events on the same place' ;: 17 + echo "f:event1 ${PLACE}" >> dynamic_events 18 + echo "f:event2 ${PLACE}%return" >> dynamic_events 19 + 20 + :;: 'Enable both of them' ;: 21 + echo 1 > events/fprobes/enable 22 + test `cat enabled_functions | wc -l` -eq 1 23 + 24 + :;: 'Disable and remove exit event' ;: 25 + echo 0 > events/fprobes/event2/enable 26 + echo -:event2 >> dynamic_events 27 + 28 + :;: 'Disable and remove all events' ;: 29 + echo 0 > events/fprobes/enable 30 + echo > dynamic_events 31 + 32 + :;: 'Add another event' ;: 33 + echo "f:event3 ${PLACE2}%return" > dynamic_events 34 + echo 1 > events/fprobes/enable 35 + test `cat enabled_functions | wc -l` -eq 1 36 + 37 + :;: 'No other ftrace user' ;: 38 + echo 0 > events/fprobes/enable 39 + echo > dynamic_events 40 + test `cat enabled_functions | wc -l` -eq 0 41 + 42 + :;: 'Test case 2: leave exit event' ;: 43 + :;: 'Add entry and exit events on the same place' ;: 44 + echo "f:event1 ${PLACE}" >> dynamic_events 45 + echo "f:event2 ${PLACE}%return" >> dynamic_events 46 + 47 + :;: 'Enable both of them' ;: 48 + echo 1 > events/fprobes/enable 49 + test `cat enabled_functions | wc -l` -eq 1 50 + 51 + :;: 'Disable and remove entry event' ;: 52 + echo 0 > events/fprobes/event1/enable 53 + echo -:event1 >> dynamic_events 54 + 55 + :;: 'Disable and remove all events' ;: 56 + echo 0 > events/fprobes/enable 57 + echo > dynamic_events 58 + 59 + :;: 'Add another event' ;: 60 + echo "f:event3 ${PLACE2}" > dynamic_events 61 + echo 1 > events/fprobes/enable 62 + test `cat enabled_functions | wc -l` -eq 1 63 + 64 + :;: 'No other ftrace user' ;: 65 + echo 0 > events/fprobes/enable 66 + echo > dynamic_events 67 + test `cat enabled_functions | wc -l` -eq 0 68 + 69 + clear_trace