diff options
Diffstat (limited to 'kernel/trace/trace_kprobe.c')
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 332 |
1 files changed, 233 insertions, 99 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f925c45f0afa..5fb3697bf0e5 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -343,6 +343,14 @@ DEFINE_BASIC_FETCH_FUNCS(deref) | |||
| 343 | DEFINE_FETCH_deref(string) | 343 | DEFINE_FETCH_deref(string) |
| 344 | DEFINE_FETCH_deref(string_size) | 344 | DEFINE_FETCH_deref(string_size) |
| 345 | 345 | ||
| 346 | static __kprobes void update_deref_fetch_param(struct deref_fetch_param *data) | ||
| 347 | { | ||
| 348 | if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) | ||
| 349 | update_deref_fetch_param(data->orig.data); | ||
| 350 | else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) | ||
| 351 | update_symbol_cache(data->orig.data); | ||
| 352 | } | ||
| 353 | |||
| 346 | static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) | 354 | static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) |
| 347 | { | 355 | { |
| 348 | if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) | 356 | if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) |
| @@ -377,6 +385,19 @@ DEFINE_BASIC_FETCH_FUNCS(bitfield) | |||
| 377 | #define fetch_bitfield_string_size NULL | 385 | #define fetch_bitfield_string_size NULL |
| 378 | 386 | ||
| 379 | static __kprobes void | 387 | static __kprobes void |
| 388 | update_bitfield_fetch_param(struct bitfield_fetch_param *data) | ||
| 389 | { | ||
| 390 | /* | ||
| 391 | * Don't check the bitfield itself, because this must be the | ||
| 392 | * last fetch function. | ||
| 393 | */ | ||
| 394 | if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) | ||
| 395 | update_deref_fetch_param(data->orig.data); | ||
| 396 | else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) | ||
| 397 | update_symbol_cache(data->orig.data); | ||
| 398 | } | ||
| 399 | |||
| 400 | static __kprobes void | ||
| 380 | free_bitfield_fetch_param(struct bitfield_fetch_param *data) | 401 | free_bitfield_fetch_param(struct bitfield_fetch_param *data) |
| 381 | { | 402 | { |
| 382 | /* | 403 | /* |
| @@ -389,6 +410,7 @@ free_bitfield_fetch_param(struct bitfield_fetch_param *data) | |||
| 389 | free_symbol_cache(data->orig.data); | 410 | free_symbol_cache(data->orig.data); |
| 390 | kfree(data); | 411 | kfree(data); |
| 391 | } | 412 | } |
| 413 | |||
| 392 | /* Default (unsigned long) fetch type */ | 414 | /* Default (unsigned long) fetch type */ |
| 393 | #define __DEFAULT_FETCH_TYPE(t) u##t | 415 | #define __DEFAULT_FETCH_TYPE(t) u##t |
| 394 | #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) | 416 | #define _DEFAULT_FETCH_TYPE(t) __DEFAULT_FETCH_TYPE(t) |
| @@ -536,6 +558,7 @@ struct probe_arg { | |||
| 536 | /* Flags for trace_probe */ | 558 | /* Flags for trace_probe */ |
| 537 | #define TP_FLAG_TRACE 1 | 559 | #define TP_FLAG_TRACE 1 |
| 538 | #define TP_FLAG_PROFILE 2 | 560 | #define TP_FLAG_PROFILE 2 |
| 561 | #define TP_FLAG_REGISTERED 4 | ||
| 539 | 562 | ||
| 540 | struct trace_probe { | 563 | struct trace_probe { |
| 541 | struct list_head list; | 564 | struct list_head list; |
| @@ -555,16 +578,49 @@ struct trace_probe { | |||
| 555 | (sizeof(struct probe_arg) * (n))) | 578 | (sizeof(struct probe_arg) * (n))) |
| 556 | 579 | ||
| 557 | 580 | ||
| 558 | static __kprobes int probe_is_return(struct trace_probe *tp) | 581 | static __kprobes int trace_probe_is_return(struct trace_probe *tp) |
| 559 | { | 582 | { |
| 560 | return tp->rp.handler != NULL; | 583 | return tp->rp.handler != NULL; |
| 561 | } | 584 | } |
| 562 | 585 | ||
| 563 | static __kprobes const char *probe_symbol(struct trace_probe *tp) | 586 | static __kprobes const char *trace_probe_symbol(struct trace_probe *tp) |
| 564 | { | 587 | { |
| 565 | return tp->symbol ? tp->symbol : "unknown"; | 588 | return tp->symbol ? tp->symbol : "unknown"; |
| 566 | } | 589 | } |
| 567 | 590 | ||
| 591 | static __kprobes unsigned long trace_probe_offset(struct trace_probe *tp) | ||
| 592 | { | ||
| 593 | return tp->rp.kp.offset; | ||
| 594 | } | ||
| 595 | |||
| 596 | static __kprobes bool trace_probe_is_enabled(struct trace_probe *tp) | ||
| 597 | { | ||
| 598 | return !!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE)); | ||
| 599 | } | ||
| 600 | |||
| 601 | static __kprobes bool trace_probe_is_registered(struct trace_probe *tp) | ||
| 602 | { | ||
| 603 | return !!(tp->flags & TP_FLAG_REGISTERED); | ||
| 604 | } | ||
| 605 | |||
| 606 | static __kprobes bool trace_probe_has_gone(struct trace_probe *tp) | ||
| 607 | { | ||
| 608 | return !!(kprobe_gone(&tp->rp.kp)); | ||
| 609 | } | ||
| 610 | |||
| 611 | static __kprobes bool trace_probe_within_module(struct trace_probe *tp, | ||
| 612 | struct module *mod) | ||
| 613 | { | ||
| 614 | int len = strlen(mod->name); | ||
| 615 | const char *name = trace_probe_symbol(tp); | ||
| 616 | return strncmp(mod->name, name, len) == 0 && name[len] == ':'; | ||
| 617 | } | ||
| 618 | |||
| 619 | static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) | ||
| 620 | { | ||
| 621 | return !!strchr(trace_probe_symbol(tp), ':'); | ||
| 622 | } | ||
| 623 | |||
| 568 | static int register_probe_event(struct trace_probe *tp); | 624 | static int register_probe_event(struct trace_probe *tp); |
| 569 | static void unregister_probe_event(struct trace_probe *tp); | 625 | static void unregister_probe_event(struct trace_probe *tp); |
| 570 | 626 | ||
| @@ -646,6 +702,16 @@ error: | |||
| 646 | return ERR_PTR(ret); | 702 | return ERR_PTR(ret); |
| 647 | } | 703 | } |
| 648 | 704 | ||
| 705 | static void update_probe_arg(struct probe_arg *arg) | ||
| 706 | { | ||
| 707 | if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) | ||
| 708 | update_bitfield_fetch_param(arg->fetch.data); | ||
| 709 | else if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) | ||
| 710 | update_deref_fetch_param(arg->fetch.data); | ||
| 711 | else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) | ||
| 712 | update_symbol_cache(arg->fetch.data); | ||
| 713 | } | ||
| 714 | |||
| 649 | static void free_probe_arg(struct probe_arg *arg) | 715 | static void free_probe_arg(struct probe_arg *arg) |
| 650 | { | 716 | { |
| 651 | if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) | 717 | if (CHECK_FETCH_FUNCS(bitfield, arg->fetch.fn)) |
| @@ -671,7 +737,7 @@ static void free_trace_probe(struct trace_probe *tp) | |||
| 671 | kfree(tp); | 737 | kfree(tp); |
| 672 | } | 738 | } |
| 673 | 739 | ||
| 674 | static struct trace_probe *find_probe_event(const char *event, | 740 | static struct trace_probe *find_trace_probe(const char *event, |
| 675 | const char *group) | 741 | const char *group) |
| 676 | { | 742 | { |
| 677 | struct trace_probe *tp; | 743 | struct trace_probe *tp; |
| @@ -683,13 +749,96 @@ static struct trace_probe *find_probe_event(const char *event, | |||
| 683 | return NULL; | 749 | return NULL; |
| 684 | } | 750 | } |
| 685 | 751 | ||
| 752 | /* Enable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ | ||
| 753 | static int enable_trace_probe(struct trace_probe *tp, int flag) | ||
| 754 | { | ||
| 755 | int ret = 0; | ||
| 756 | |||
| 757 | tp->flags |= flag; | ||
| 758 | if (trace_probe_is_enabled(tp) && trace_probe_is_registered(tp) && | ||
| 759 | !trace_probe_has_gone(tp)) { | ||
| 760 | if (trace_probe_is_return(tp)) | ||
| 761 | ret = enable_kretprobe(&tp->rp); | ||
| 762 | else | ||
| 763 | ret = enable_kprobe(&tp->rp.kp); | ||
| 764 | } | ||
| 765 | |||
| 766 | return ret; | ||
| 767 | } | ||
| 768 | |||
| 769 | /* Disable trace_probe - @flag must be TP_FLAG_TRACE or TP_FLAG_PROFILE */ | ||
| 770 | static void disable_trace_probe(struct trace_probe *tp, int flag) | ||
| 771 | { | ||
| 772 | tp->flags &= ~flag; | ||
| 773 | if (!trace_probe_is_enabled(tp) && trace_probe_is_registered(tp)) { | ||
| 774 | if (trace_probe_is_return(tp)) | ||
| 775 | disable_kretprobe(&tp->rp); | ||
| 776 | else | ||
| 777 | disable_kprobe(&tp->rp.kp); | ||
| 778 | } | ||
| 779 | } | ||
| 780 | |||
| 781 | /* Internal register function - just handle k*probes and flags */ | ||
| 782 | static int __register_trace_probe(struct trace_probe *tp) | ||
| 783 | { | ||
| 784 | int i, ret; | ||
| 785 | |||
| 786 | if (trace_probe_is_registered(tp)) | ||
| 787 | return -EINVAL; | ||
| 788 | |||
| 789 | for (i = 0; i < tp->nr_args; i++) | ||
| 790 | update_probe_arg(&tp->args[i]); | ||
| 791 | |||
| 792 | /* Set/clear disabled flag according to tp->flag */ | ||
| 793 | if (trace_probe_is_enabled(tp)) | ||
| 794 | tp->rp.kp.flags &= ~KPROBE_FLAG_DISABLED; | ||
| 795 | else | ||
| 796 | tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; | ||
| 797 | |||
| 798 | if (trace_probe_is_return(tp)) | ||
| 799 | ret = register_kretprobe(&tp->rp); | ||
| 800 | else | ||
| 801 | ret = register_kprobe(&tp->rp.kp); | ||
| 802 | |||
| 803 | if (ret == 0) | ||
| 804 | tp->flags |= TP_FLAG_REGISTERED; | ||
| 805 | else { | ||
| 806 | pr_warning("Could not insert probe at %s+%lu: %d\n", | ||
| 807 | trace_probe_symbol(tp), trace_probe_offset(tp), ret); | ||
| 808 | if (ret == -ENOENT && trace_probe_is_on_module(tp)) { | ||
| 809 | pr_warning("This probe might be able to register after" | ||
| 810 | "target module is loaded. Continue.\n"); | ||
| 811 | ret = 0; | ||
| 812 | } else if (ret == -EILSEQ) { | ||
| 813 | pr_warning("Probing address(0x%p) is not an " | ||
| 814 | "instruction boundary.\n", | ||
| 815 | tp->rp.kp.addr); | ||
| 816 | ret = -EINVAL; | ||
| 817 | } | ||
| 818 | } | ||
| 819 | |||
| 820 | return ret; | ||
| 821 | } | ||
| 822 | |||
| 823 | /* Internal unregister function - just handle k*probes and flags */ | ||
| 824 | static void __unregister_trace_probe(struct trace_probe *tp) | ||
| 825 | { | ||
| 826 | if (trace_probe_is_registered(tp)) { | ||
| 827 | if (trace_probe_is_return(tp)) | ||
| 828 | unregister_kretprobe(&tp->rp); | ||
| 829 | else | ||
| 830 | unregister_kprobe(&tp->rp.kp); | ||
| 831 | tp->flags &= ~TP_FLAG_REGISTERED; | ||
| 832 | /* Cleanup kprobe for reuse */ | ||
| 833 | if (tp->rp.kp.symbol_name) | ||
| 834 | tp->rp.kp.addr = NULL; | ||
| 835 | } | ||
| 836 | } | ||
| 837 | |||
| 686 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ | 838 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ |
| 687 | static void unregister_trace_probe(struct trace_probe *tp) | 839 | static void unregister_trace_probe(struct trace_probe *tp) |
| 688 | { | 840 | { |
| 689 | if (probe_is_return(tp)) | 841 | __unregister_trace_probe(tp); |
| 690 | unregister_kretprobe(&tp->rp); | ||
| 691 | else | ||
| 692 | unregister_kprobe(&tp->rp.kp); | ||
| 693 | list_del(&tp->list); | 842 | list_del(&tp->list); |
| 694 | unregister_probe_event(tp); | 843 | unregister_probe_event(tp); |
| 695 | } | 844 | } |
| @@ -702,41 +851,65 @@ static int register_trace_probe(struct trace_probe *tp) | |||
| 702 | 851 | ||
| 703 | mutex_lock(&probe_lock); | 852 | mutex_lock(&probe_lock); |
| 704 | 853 | ||
| 705 | /* register as an event */ | 854 | /* Delete old (same name) event if exist */ |
| 706 | old_tp = find_probe_event(tp->call.name, tp->call.class->system); | 855 | old_tp = find_trace_probe(tp->call.name, tp->call.class->system); |
| 707 | if (old_tp) { | 856 | if (old_tp) { |
| 708 | /* delete old event */ | ||
| 709 | unregister_trace_probe(old_tp); | 857 | unregister_trace_probe(old_tp); |
| 710 | free_trace_probe(old_tp); | 858 | free_trace_probe(old_tp); |
| 711 | } | 859 | } |
| 860 | |||
| 861 | /* Register new event */ | ||
| 712 | ret = register_probe_event(tp); | 862 | ret = register_probe_event(tp); |
| 713 | if (ret) { | 863 | if (ret) { |
| 714 | pr_warning("Failed to register probe event(%d)\n", ret); | 864 | pr_warning("Failed to register probe event(%d)\n", ret); |
| 715 | goto end; | 865 | goto end; |
| 716 | } | 866 | } |
| 717 | 867 | ||
| 718 | tp->rp.kp.flags |= KPROBE_FLAG_DISABLED; | 868 | /* Register k*probe */ |
| 719 | if (probe_is_return(tp)) | 869 | ret = __register_trace_probe(tp); |
| 720 | ret = register_kretprobe(&tp->rp); | 870 | if (ret < 0) |
| 721 | else | ||
| 722 | ret = register_kprobe(&tp->rp.kp); | ||
| 723 | |||
| 724 | if (ret) { | ||
| 725 | pr_warning("Could not insert probe(%d)\n", ret); | ||
| 726 | if (ret == -EILSEQ) { | ||
| 727 | pr_warning("Probing address(0x%p) is not an " | ||
| 728 | "instruction boundary.\n", | ||
| 729 | tp->rp.kp.addr); | ||
| 730 | ret = -EINVAL; | ||
| 731 | } | ||
| 732 | unregister_probe_event(tp); | 871 | unregister_probe_event(tp); |
| 733 | } else | 872 | else |
| 734 | list_add_tail(&tp->list, &probe_list); | 873 | list_add_tail(&tp->list, &probe_list); |
| 874 | |||
| 735 | end: | 875 | end: |
| 736 | mutex_unlock(&probe_lock); | 876 | mutex_unlock(&probe_lock); |
| 737 | return ret; | 877 | return ret; |
| 738 | } | 878 | } |
| 739 | 879 | ||
| 880 | /* Module notifier call back, checking event on the module */ | ||
| 881 | static int trace_probe_module_callback(struct notifier_block *nb, | ||
| 882 | unsigned long val, void *data) | ||
| 883 | { | ||
| 884 | struct module *mod = data; | ||
| 885 | struct trace_probe *tp; | ||
| 886 | int ret; | ||
| 887 | |||
| 888 | if (val != MODULE_STATE_COMING) | ||
| 889 | return NOTIFY_DONE; | ||
| 890 | |||
| 891 | /* Update probes on coming module */ | ||
| 892 | mutex_lock(&probe_lock); | ||
| 893 | list_for_each_entry(tp, &probe_list, list) { | ||
| 894 | if (trace_probe_within_module(tp, mod)) { | ||
| 895 | __unregister_trace_probe(tp); | ||
| 896 | ret = __register_trace_probe(tp); | ||
| 897 | if (ret) | ||
| 898 | pr_warning("Failed to re-register probe %s on" | ||
| 899 | "%s: %d\n", | ||
| 900 | tp->call.name, mod->name, ret); | ||
| 901 | } | ||
| 902 | } | ||
| 903 | mutex_unlock(&probe_lock); | ||
| 904 | |||
| 905 | return NOTIFY_DONE; | ||
| 906 | } | ||
| 907 | |||
| 908 | static struct notifier_block trace_probe_module_nb = { | ||
| 909 | .notifier_call = trace_probe_module_callback, | ||
| 910 | .priority = 1 /* Invoked after kprobe module callback */ | ||
| 911 | }; | ||
| 912 | |||
| 740 | /* Split symbol and offset. */ | 913 | /* Split symbol and offset. */ |
| 741 | static int split_symbol_offset(char *symbol, unsigned long *offset) | 914 | static int split_symbol_offset(char *symbol, unsigned long *offset) |
| 742 | { | 915 | { |
| @@ -962,8 +1135,8 @@ static int create_trace_probe(int argc, char **argv) | |||
| 962 | { | 1135 | { |
| 963 | /* | 1136 | /* |
| 964 | * Argument syntax: | 1137 | * Argument syntax: |
| 965 | * - Add kprobe: p[:[GRP/]EVENT] KSYM[+OFFS]|KADDR [FETCHARGS] | 1138 | * - Add kprobe: p[:[GRP/]EVENT] [MOD:]KSYM[+OFFS]|KADDR [FETCHARGS] |
| 966 | * - Add kretprobe: r[:[GRP/]EVENT] KSYM[+0] [FETCHARGS] | 1139 | * - Add kretprobe: r[:[GRP/]EVENT] [MOD:]KSYM[+0] [FETCHARGS] |
| 967 | * Fetch args: | 1140 | * Fetch args: |
| 968 | * $retval : fetch return value | 1141 | * $retval : fetch return value |
| 969 | * $stack : fetch stack address | 1142 | * $stack : fetch stack address |
| @@ -1025,7 +1198,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 1025 | return -EINVAL; | 1198 | return -EINVAL; |
| 1026 | } | 1199 | } |
| 1027 | mutex_lock(&probe_lock); | 1200 | mutex_lock(&probe_lock); |
| 1028 | tp = find_probe_event(event, group); | 1201 | tp = find_trace_probe(event, group); |
| 1029 | if (!tp) { | 1202 | if (!tp) { |
| 1030 | mutex_unlock(&probe_lock); | 1203 | mutex_unlock(&probe_lock); |
| 1031 | pr_info("Event %s/%s doesn't exist.\n", group, event); | 1204 | pr_info("Event %s/%s doesn't exist.\n", group, event); |
| @@ -1144,7 +1317,7 @@ error: | |||
| 1144 | return ret; | 1317 | return ret; |
| 1145 | } | 1318 | } |
| 1146 | 1319 | ||
| 1147 | static void cleanup_all_probes(void) | 1320 | static void release_all_trace_probes(void) |
| 1148 | { | 1321 | { |
| 1149 | struct trace_probe *tp; | 1322 | struct trace_probe *tp; |
| 1150 | 1323 | ||
| @@ -1158,7 +1331,6 @@ static void cleanup_all_probes(void) | |||
| 1158 | mutex_unlock(&probe_lock); | 1331 | mutex_unlock(&probe_lock); |
| 1159 | } | 1332 | } |
| 1160 | 1333 | ||
| 1161 | |||
| 1162 | /* Probes listing interfaces */ | 1334 | /* Probes listing interfaces */ |
| 1163 | static void *probes_seq_start(struct seq_file *m, loff_t *pos) | 1335 | static void *probes_seq_start(struct seq_file *m, loff_t *pos) |
| 1164 | { | 1336 | { |
| @@ -1181,15 +1353,16 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 1181 | struct trace_probe *tp = v; | 1353 | struct trace_probe *tp = v; |
| 1182 | int i; | 1354 | int i; |
| 1183 | 1355 | ||
| 1184 | seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); | 1356 | seq_printf(m, "%c", trace_probe_is_return(tp) ? 'r' : 'p'); |
| 1185 | seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); | 1357 | seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); |
| 1186 | 1358 | ||
| 1187 | if (!tp->symbol) | 1359 | if (!tp->symbol) |
| 1188 | seq_printf(m, " 0x%p", tp->rp.kp.addr); | 1360 | seq_printf(m, " 0x%p", tp->rp.kp.addr); |
| 1189 | else if (tp->rp.kp.offset) | 1361 | else if (tp->rp.kp.offset) |
| 1190 | seq_printf(m, " %s+%u", probe_symbol(tp), tp->rp.kp.offset); | 1362 | seq_printf(m, " %s+%u", trace_probe_symbol(tp), |
| 1363 | tp->rp.kp.offset); | ||
| 1191 | else | 1364 | else |
| 1192 | seq_printf(m, " %s", probe_symbol(tp)); | 1365 | seq_printf(m, " %s", trace_probe_symbol(tp)); |
| 1193 | 1366 | ||
| 1194 | for (i = 0; i < tp->nr_args; i++) | 1367 | for (i = 0; i < tp->nr_args; i++) |
| 1195 | seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); | 1368 | seq_printf(m, " %s=%s", tp->args[i].name, tp->args[i].comm); |
| @@ -1209,7 +1382,7 @@ static int probes_open(struct inode *inode, struct file *file) | |||
| 1209 | { | 1382 | { |
| 1210 | if ((file->f_mode & FMODE_WRITE) && | 1383 | if ((file->f_mode & FMODE_WRITE) && |
| 1211 | (file->f_flags & O_TRUNC)) | 1384 | (file->f_flags & O_TRUNC)) |
| 1212 | cleanup_all_probes(); | 1385 | release_all_trace_probes(); |
| 1213 | 1386 | ||
| 1214 | return seq_open(file, &probes_seq_op); | 1387 | return seq_open(file, &probes_seq_op); |
| 1215 | } | 1388 | } |
| @@ -1397,7 +1570,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
| 1397 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1570 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
| 1398 | 1571 | ||
| 1399 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1572 | if (!filter_current_check_discard(buffer, call, entry, event)) |
| 1400 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1573 | trace_nowake_buffer_unlock_commit_regs(buffer, event, |
| 1574 | irq_flags, pc, regs); | ||
| 1401 | } | 1575 | } |
| 1402 | 1576 | ||
| 1403 | /* Kretprobe handler */ | 1577 | /* Kretprobe handler */ |
| @@ -1429,7 +1603,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
| 1429 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 1603 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
| 1430 | 1604 | ||
| 1431 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1605 | if (!filter_current_check_discard(buffer, call, entry, event)) |
| 1432 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1606 | trace_nowake_buffer_unlock_commit_regs(buffer, event, |
| 1607 | irq_flags, pc, regs); | ||
| 1433 | } | 1608 | } |
| 1434 | 1609 | ||
| 1435 | /* Event entry printers */ | 1610 | /* Event entry printers */ |
| @@ -1511,30 +1686,6 @@ partial: | |||
| 1511 | return TRACE_TYPE_PARTIAL_LINE; | 1686 | return TRACE_TYPE_PARTIAL_LINE; |
| 1512 | } | 1687 | } |
| 1513 | 1688 | ||
| 1514 | static int probe_event_enable(struct ftrace_event_call *call) | ||
| 1515 | { | ||
| 1516 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
| 1517 | |||
| 1518 | tp->flags |= TP_FLAG_TRACE; | ||
| 1519 | if (probe_is_return(tp)) | ||
| 1520 | return enable_kretprobe(&tp->rp); | ||
| 1521 | else | ||
| 1522 | return enable_kprobe(&tp->rp.kp); | ||
| 1523 | } | ||
| 1524 | |||
| 1525 | static void probe_event_disable(struct ftrace_event_call *call) | ||
| 1526 | { | ||
| 1527 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
| 1528 | |||
| 1529 | tp->flags &= ~TP_FLAG_TRACE; | ||
| 1530 | if (!(tp->flags & (TP_FLAG_TRACE | TP_FLAG_PROFILE))) { | ||
| 1531 | if (probe_is_return(tp)) | ||
| 1532 | disable_kretprobe(&tp->rp); | ||
| 1533 | else | ||
| 1534 | disable_kprobe(&tp->rp.kp); | ||
| 1535 | } | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | #undef DEFINE_FIELD | 1689 | #undef DEFINE_FIELD |
| 1539 | #define DEFINE_FIELD(type, item, name, is_signed) \ | 1690 | #define DEFINE_FIELD(type, item, name, is_signed) \ |
| 1540 | do { \ | 1691 | do { \ |
| @@ -1596,7 +1747,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | |||
| 1596 | 1747 | ||
| 1597 | const char *fmt, *arg; | 1748 | const char *fmt, *arg; |
| 1598 | 1749 | ||
| 1599 | if (!probe_is_return(tp)) { | 1750 | if (!trace_probe_is_return(tp)) { |
| 1600 | fmt = "(%lx)"; | 1751 | fmt = "(%lx)"; |
| 1601 | arg = "REC->" FIELD_STRING_IP; | 1752 | arg = "REC->" FIELD_STRING_IP; |
| 1602 | } else { | 1753 | } else { |
| @@ -1713,49 +1864,25 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
| 1713 | head = this_cpu_ptr(call->perf_events); | 1864 | head = this_cpu_ptr(call->perf_events); |
| 1714 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); | 1865 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); |
| 1715 | } | 1866 | } |
| 1716 | |||
| 1717 | static int probe_perf_enable(struct ftrace_event_call *call) | ||
| 1718 | { | ||
| 1719 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
| 1720 | |||
| 1721 | tp->flags |= TP_FLAG_PROFILE; | ||
| 1722 | |||
| 1723 | if (probe_is_return(tp)) | ||
| 1724 | return enable_kretprobe(&tp->rp); | ||
| 1725 | else | ||
| 1726 | return enable_kprobe(&tp->rp.kp); | ||
| 1727 | } | ||
| 1728 | |||
| 1729 | static void probe_perf_disable(struct ftrace_event_call *call) | ||
| 1730 | { | ||
| 1731 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
| 1732 | |||
| 1733 | tp->flags &= ~TP_FLAG_PROFILE; | ||
| 1734 | |||
| 1735 | if (!(tp->flags & TP_FLAG_TRACE)) { | ||
| 1736 | if (probe_is_return(tp)) | ||
| 1737 | disable_kretprobe(&tp->rp); | ||
| 1738 | else | ||
| 1739 | disable_kprobe(&tp->rp.kp); | ||
| 1740 | } | ||
| 1741 | } | ||
| 1742 | #endif /* CONFIG_PERF_EVENTS */ | 1867 | #endif /* CONFIG_PERF_EVENTS */ |
| 1743 | 1868 | ||
| 1744 | static __kprobes | 1869 | static __kprobes |
| 1745 | int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) | 1870 | int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) |
| 1746 | { | 1871 | { |
| 1872 | struct trace_probe *tp = (struct trace_probe *)event->data; | ||
| 1873 | |||
| 1747 | switch (type) { | 1874 | switch (type) { |
| 1748 | case TRACE_REG_REGISTER: | 1875 | case TRACE_REG_REGISTER: |
| 1749 | return probe_event_enable(event); | 1876 | return enable_trace_probe(tp, TP_FLAG_TRACE); |
| 1750 | case TRACE_REG_UNREGISTER: | 1877 | case TRACE_REG_UNREGISTER: |
| 1751 | probe_event_disable(event); | 1878 | disable_trace_probe(tp, TP_FLAG_TRACE); |
| 1752 | return 0; | 1879 | return 0; |
| 1753 | 1880 | ||
| 1754 | #ifdef CONFIG_PERF_EVENTS | 1881 | #ifdef CONFIG_PERF_EVENTS |
| 1755 | case TRACE_REG_PERF_REGISTER: | 1882 | case TRACE_REG_PERF_REGISTER: |
| 1756 | return probe_perf_enable(event); | 1883 | return enable_trace_probe(tp, TP_FLAG_PROFILE); |
| 1757 | case TRACE_REG_PERF_UNREGISTER: | 1884 | case TRACE_REG_PERF_UNREGISTER: |
| 1758 | probe_perf_disable(event); | 1885 | disable_trace_probe(tp, TP_FLAG_PROFILE); |
| 1759 | return 0; | 1886 | return 0; |
| 1760 | #endif | 1887 | #endif |
| 1761 | } | 1888 | } |
| @@ -1805,7 +1932,7 @@ static int register_probe_event(struct trace_probe *tp) | |||
| 1805 | 1932 | ||
| 1806 | /* Initialize ftrace_event_call */ | 1933 | /* Initialize ftrace_event_call */ |
| 1807 | INIT_LIST_HEAD(&call->class->fields); | 1934 | INIT_LIST_HEAD(&call->class->fields); |
| 1808 | if (probe_is_return(tp)) { | 1935 | if (trace_probe_is_return(tp)) { |
| 1809 | call->event.funcs = &kretprobe_funcs; | 1936 | call->event.funcs = &kretprobe_funcs; |
| 1810 | call->class->define_fields = kretprobe_event_define_fields; | 1937 | call->class->define_fields = kretprobe_event_define_fields; |
| 1811 | } else { | 1938 | } else { |
| @@ -1844,6 +1971,9 @@ static __init int init_kprobe_trace(void) | |||
| 1844 | struct dentry *d_tracer; | 1971 | struct dentry *d_tracer; |
| 1845 | struct dentry *entry; | 1972 | struct dentry *entry; |
| 1846 | 1973 | ||
| 1974 | if (register_module_notifier(&trace_probe_module_nb)) | ||
| 1975 | return -EINVAL; | ||
| 1976 | |||
| 1847 | d_tracer = tracing_init_dentry(); | 1977 | d_tracer = tracing_init_dentry(); |
| 1848 | if (!d_tracer) | 1978 | if (!d_tracer) |
| 1849 | return 0; | 1979 | return 0; |
| @@ -1870,8 +2000,12 @@ fs_initcall(init_kprobe_trace); | |||
| 1870 | 2000 | ||
| 1871 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 2001 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| 1872 | 2002 | ||
| 1873 | static int kprobe_trace_selftest_target(int a1, int a2, int a3, | 2003 | /* |
| 1874 | int a4, int a5, int a6) | 2004 | * The "__used" keeps gcc from removing the function symbol |
| 2005 | * from the kallsyms table. | ||
| 2006 | */ | ||
| 2007 | static __used int kprobe_trace_selftest_target(int a1, int a2, int a3, | ||
| 2008 | int a4, int a5, int a6) | ||
| 1875 | { | 2009 | { |
| 1876 | return a1 + a2 + a3 + a4 + a5 + a6; | 2010 | return a1 + a2 + a3 + a4 + a5 + a6; |
| 1877 | } | 2011 | } |
| @@ -1893,12 +2027,12 @@ static __init int kprobe_trace_self_tests_init(void) | |||
| 1893 | warn++; | 2027 | warn++; |
| 1894 | } else { | 2028 | } else { |
| 1895 | /* Enable trace point */ | 2029 | /* Enable trace point */ |
| 1896 | tp = find_probe_event("testprobe", KPROBE_EVENT_SYSTEM); | 2030 | tp = find_trace_probe("testprobe", KPROBE_EVENT_SYSTEM); |
| 1897 | if (WARN_ON_ONCE(tp == NULL)) { | 2031 | if (WARN_ON_ONCE(tp == NULL)) { |
| 1898 | pr_warning("error on getting new probe.\n"); | 2032 | pr_warning("error on getting new probe.\n"); |
| 1899 | warn++; | 2033 | warn++; |
| 1900 | } else | 2034 | } else |
| 1901 | probe_event_enable(&tp->call); | 2035 | enable_trace_probe(tp, TP_FLAG_TRACE); |
| 1902 | } | 2036 | } |
| 1903 | 2037 | ||
| 1904 | ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " | 2038 | ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " |
| @@ -1908,12 +2042,12 @@ static __init int kprobe_trace_self_tests_init(void) | |||
| 1908 | warn++; | 2042 | warn++; |
| 1909 | } else { | 2043 | } else { |
| 1910 | /* Enable trace point */ | 2044 | /* Enable trace point */ |
| 1911 | tp = find_probe_event("testprobe2", KPROBE_EVENT_SYSTEM); | 2045 | tp = find_trace_probe("testprobe2", KPROBE_EVENT_SYSTEM); |
| 1912 | if (WARN_ON_ONCE(tp == NULL)) { | 2046 | if (WARN_ON_ONCE(tp == NULL)) { |
| 1913 | pr_warning("error on getting new probe.\n"); | 2047 | pr_warning("error on getting new probe.\n"); |
| 1914 | warn++; | 2048 | warn++; |
| 1915 | } else | 2049 | } else |
| 1916 | probe_event_enable(&tp->call); | 2050 | enable_trace_probe(tp, TP_FLAG_TRACE); |
| 1917 | } | 2051 | } |
| 1918 | 2052 | ||
| 1919 | if (warn) | 2053 | if (warn) |
| @@ -1934,7 +2068,7 @@ static __init int kprobe_trace_self_tests_init(void) | |||
| 1934 | } | 2068 | } |
| 1935 | 2069 | ||
| 1936 | end: | 2070 | end: |
| 1937 | cleanup_all_probes(); | 2071 | release_all_trace_probes(); |
| 1938 | if (warn) | 2072 | if (warn) |
| 1939 | pr_cont("NG: Some tests are failed. Please check them.\n"); | 2073 | pr_cont("NG: Some tests are failed. Please check them.\n"); |
| 1940 | else | 2074 | else |
