diff options
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/blktrace.c | 36 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 161 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 85 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 51 | ||||
| -rw-r--r-- | kernel/trace/trace_branch.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_event_perf.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_events.c | 32 | ||||
| -rw-r--r-- | kernel/trace/trace_events_filter.c | 218 | ||||
| -rw-r--r-- | kernel/trace/trace_export.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_functions_graph.c | 82 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_mmiotrace.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 19 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_switch.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_stat.c | 41 | ||||
| -rw-r--r-- | kernel/trace/trace_syscalls.c | 42 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 3 |
17 files changed, 567 insertions, 221 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index b8b8560bfb95..f785aef65799 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/export.h> | 26 | #include <linux/export.h> |
| 27 | #include <linux/time.h> | 27 | #include <linux/time.h> |
| 28 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
| 29 | #include <linux/list.h> | ||
| 29 | 30 | ||
| 30 | #include <trace/events/block.h> | 31 | #include <trace/events/block.h> |
| 31 | 32 | ||
| @@ -38,6 +39,9 @@ static unsigned int blktrace_seq __read_mostly = 1; | |||
| 38 | static struct trace_array *blk_tr; | 39 | static struct trace_array *blk_tr; |
| 39 | static bool blk_tracer_enabled __read_mostly; | 40 | static bool blk_tracer_enabled __read_mostly; |
| 40 | 41 | ||
| 42 | static LIST_HEAD(running_trace_list); | ||
| 43 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock); | ||
| 44 | |||
| 41 | /* Select an alternative, minimalistic output than the original one */ | 45 | /* Select an alternative, minimalistic output than the original one */ |
| 42 | #define TRACE_BLK_OPT_CLASSIC 0x1 | 46 | #define TRACE_BLK_OPT_CLASSIC 0x1 |
| 43 | 47 | ||
| @@ -107,10 +111,18 @@ record_it: | |||
| 107 | * Send out a notify for this process, if we haven't done so since a trace | 111 | * Send out a notify for this process, if we haven't done so since a trace |
| 108 | * started | 112 | * started |
| 109 | */ | 113 | */ |
| 110 | static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) | 114 | static void trace_note_tsk(struct task_struct *tsk) |
| 111 | { | 115 | { |
| 116 | unsigned long flags; | ||
| 117 | struct blk_trace *bt; | ||
| 118 | |||
| 112 | tsk->btrace_seq = blktrace_seq; | 119 | tsk->btrace_seq = blktrace_seq; |
| 113 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); | 120 | spin_lock_irqsave(&running_trace_lock, flags); |
| 121 | list_for_each_entry(bt, &running_trace_list, running_list) { | ||
| 122 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, | ||
| 123 | sizeof(tsk->comm)); | ||
| 124 | } | ||
| 125 | spin_unlock_irqrestore(&running_trace_lock, flags); | ||
| 114 | } | 126 | } |
| 115 | 127 | ||
| 116 | static void trace_note_time(struct blk_trace *bt) | 128 | static void trace_note_time(struct blk_trace *bt) |
| @@ -229,16 +241,15 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
| 229 | goto record_it; | 241 | goto record_it; |
| 230 | } | 242 | } |
| 231 | 243 | ||
| 244 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | ||
| 245 | trace_note_tsk(tsk); | ||
| 246 | |||
| 232 | /* | 247 | /* |
| 233 | * A word about the locking here - we disable interrupts to reserve | 248 | * A word about the locking here - we disable interrupts to reserve |
| 234 | * some space in the relay per-cpu buffer, to prevent an irq | 249 | * some space in the relay per-cpu buffer, to prevent an irq |
| 235 | * from coming in and stepping on our toes. | 250 | * from coming in and stepping on our toes. |
| 236 | */ | 251 | */ |
| 237 | local_irq_save(flags); | 252 | local_irq_save(flags); |
| 238 | |||
| 239 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | ||
| 240 | trace_note_tsk(bt, tsk); | ||
| 241 | |||
| 242 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); | 253 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); |
| 243 | if (t) { | 254 | if (t) { |
| 244 | sequence = per_cpu_ptr(bt->sequence, cpu); | 255 | sequence = per_cpu_ptr(bt->sequence, cpu); |
| @@ -477,6 +488,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
| 477 | bt->dir = dir; | 488 | bt->dir = dir; |
| 478 | bt->dev = dev; | 489 | bt->dev = dev; |
| 479 | atomic_set(&bt->dropped, 0); | 490 | atomic_set(&bt->dropped, 0); |
| 491 | INIT_LIST_HEAD(&bt->running_list); | ||
| 480 | 492 | ||
| 481 | ret = -EIO; | 493 | ret = -EIO; |
| 482 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, | 494 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, |
| @@ -567,13 +579,12 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name, | |||
| 567 | .end_lba = cbuts.end_lba, | 579 | .end_lba = cbuts.end_lba, |
| 568 | .pid = cbuts.pid, | 580 | .pid = cbuts.pid, |
| 569 | }; | 581 | }; |
| 570 | memcpy(&buts.name, &cbuts.name, 32); | ||
| 571 | 582 | ||
| 572 | ret = do_blk_trace_setup(q, name, dev, bdev, &buts); | 583 | ret = do_blk_trace_setup(q, name, dev, bdev, &buts); |
| 573 | if (ret) | 584 | if (ret) |
| 574 | return ret; | 585 | return ret; |
| 575 | 586 | ||
| 576 | if (copy_to_user(arg, &buts.name, 32)) { | 587 | if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) { |
| 577 | blk_trace_remove(q); | 588 | blk_trace_remove(q); |
| 578 | return -EFAULT; | 589 | return -EFAULT; |
| 579 | } | 590 | } |
| @@ -601,6 +612,9 @@ int blk_trace_startstop(struct request_queue *q, int start) | |||
| 601 | blktrace_seq++; | 612 | blktrace_seq++; |
| 602 | smp_mb(); | 613 | smp_mb(); |
| 603 | bt->trace_state = Blktrace_running; | 614 | bt->trace_state = Blktrace_running; |
| 615 | spin_lock_irq(&running_trace_lock); | ||
| 616 | list_add(&bt->running_list, &running_trace_list); | ||
| 617 | spin_unlock_irq(&running_trace_lock); | ||
| 604 | 618 | ||
| 605 | trace_note_time(bt); | 619 | trace_note_time(bt); |
| 606 | ret = 0; | 620 | ret = 0; |
| @@ -608,6 +622,9 @@ int blk_trace_startstop(struct request_queue *q, int start) | |||
| 608 | } else { | 622 | } else { |
| 609 | if (bt->trace_state == Blktrace_running) { | 623 | if (bt->trace_state == Blktrace_running) { |
| 610 | bt->trace_state = Blktrace_stopped; | 624 | bt->trace_state = Blktrace_stopped; |
| 625 | spin_lock_irq(&running_trace_lock); | ||
| 626 | list_del_init(&bt->running_list); | ||
| 627 | spin_unlock_irq(&running_trace_lock); | ||
| 611 | relay_flush(bt->rchan); | 628 | relay_flush(bt->rchan); |
| 612 | ret = 0; | 629 | ret = 0; |
| 613 | } | 630 | } |
| @@ -1472,6 +1489,9 @@ static int blk_trace_remove_queue(struct request_queue *q) | |||
| 1472 | if (atomic_dec_and_test(&blk_probes_ref)) | 1489 | if (atomic_dec_and_test(&blk_probes_ref)) |
| 1473 | blk_unregister_tracepoints(); | 1490 | blk_unregister_tracepoints(); |
| 1474 | 1491 | ||
| 1492 | spin_lock_irq(&running_trace_lock); | ||
| 1493 | list_del(&bt->running_list); | ||
| 1494 | spin_unlock_irq(&running_trace_lock); | ||
| 1475 | blk_trace_free(bt); | 1495 | blk_trace_free(bt); |
| 1476 | return 0; | 1496 | return 0; |
| 1477 | } | 1497 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 03cf44ac54d3..22fa55696760 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -3307,7 +3307,11 @@ void unregister_ftrace_function_probe_all(char *glob) | |||
| 3307 | static LIST_HEAD(ftrace_commands); | 3307 | static LIST_HEAD(ftrace_commands); |
| 3308 | static DEFINE_MUTEX(ftrace_cmd_mutex); | 3308 | static DEFINE_MUTEX(ftrace_cmd_mutex); |
| 3309 | 3309 | ||
| 3310 | int register_ftrace_command(struct ftrace_func_command *cmd) | 3310 | /* |
| 3311 | * Currently we only register ftrace commands from __init, so mark this | ||
| 3312 | * __init too. | ||
| 3313 | */ | ||
| 3314 | __init int register_ftrace_command(struct ftrace_func_command *cmd) | ||
| 3311 | { | 3315 | { |
| 3312 | struct ftrace_func_command *p; | 3316 | struct ftrace_func_command *p; |
| 3313 | int ret = 0; | 3317 | int ret = 0; |
| @@ -3326,7 +3330,11 @@ int register_ftrace_command(struct ftrace_func_command *cmd) | |||
| 3326 | return ret; | 3330 | return ret; |
| 3327 | } | 3331 | } |
| 3328 | 3332 | ||
| 3329 | int unregister_ftrace_command(struct ftrace_func_command *cmd) | 3333 | /* |
| 3334 | * Currently we only unregister ftrace commands from __init, so mark | ||
| 3335 | * this __init too. | ||
| 3336 | */ | ||
| 3337 | __init int unregister_ftrace_command(struct ftrace_func_command *cmd) | ||
| 3330 | { | 3338 | { |
| 3331 | struct ftrace_func_command *p, *n; | 3339 | struct ftrace_func_command *p, *n; |
| 3332 | int ret = -ENODEV; | 3340 | int ret = -ENODEV; |
| @@ -3641,7 +3649,7 @@ __setup("ftrace_filter=", set_ftrace_filter); | |||
| 3641 | 3649 | ||
| 3642 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 3650 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 3643 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | 3651 | static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; |
| 3644 | static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); | 3652 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); |
| 3645 | 3653 | ||
| 3646 | static int __init set_graph_function(char *str) | 3654 | static int __init set_graph_function(char *str) |
| 3647 | { | 3655 | { |
| @@ -3659,7 +3667,7 @@ static void __init set_ftrace_early_graph(char *buf) | |||
| 3659 | func = strsep(&buf, ","); | 3667 | func = strsep(&buf, ","); |
| 3660 | /* we allow only one expression at a time */ | 3668 | /* we allow only one expression at a time */ |
| 3661 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | 3669 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, |
| 3662 | func); | 3670 | FTRACE_GRAPH_MAX_FUNCS, func); |
| 3663 | if (ret) | 3671 | if (ret) |
| 3664 | printk(KERN_DEBUG "ftrace: function %s not " | 3672 | printk(KERN_DEBUG "ftrace: function %s not " |
| 3665 | "traceable\n", func); | 3673 | "traceable\n", func); |
| @@ -3776,15 +3784,25 @@ static const struct file_operations ftrace_notrace_fops = { | |||
| 3776 | static DEFINE_MUTEX(graph_lock); | 3784 | static DEFINE_MUTEX(graph_lock); |
| 3777 | 3785 | ||
| 3778 | int ftrace_graph_count; | 3786 | int ftrace_graph_count; |
| 3779 | int ftrace_graph_filter_enabled; | 3787 | int ftrace_graph_notrace_count; |
| 3780 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | 3788 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; |
| 3789 | unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | ||
| 3790 | |||
| 3791 | struct ftrace_graph_data { | ||
| 3792 | unsigned long *table; | ||
| 3793 | size_t size; | ||
| 3794 | int *count; | ||
| 3795 | const struct seq_operations *seq_ops; | ||
| 3796 | }; | ||
| 3781 | 3797 | ||
| 3782 | static void * | 3798 | static void * |
| 3783 | __g_next(struct seq_file *m, loff_t *pos) | 3799 | __g_next(struct seq_file *m, loff_t *pos) |
| 3784 | { | 3800 | { |
| 3785 | if (*pos >= ftrace_graph_count) | 3801 | struct ftrace_graph_data *fgd = m->private; |
| 3802 | |||
| 3803 | if (*pos >= *fgd->count) | ||
| 3786 | return NULL; | 3804 | return NULL; |
| 3787 | return &ftrace_graph_funcs[*pos]; | 3805 | return &fgd->table[*pos]; |
| 3788 | } | 3806 | } |
| 3789 | 3807 | ||
| 3790 | static void * | 3808 | static void * |
| @@ -3796,10 +3814,12 @@ g_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 3796 | 3814 | ||
| 3797 | static void *g_start(struct seq_file *m, loff_t *pos) | 3815 | static void *g_start(struct seq_file *m, loff_t *pos) |
| 3798 | { | 3816 | { |
| 3817 | struct ftrace_graph_data *fgd = m->private; | ||
| 3818 | |||
| 3799 | mutex_lock(&graph_lock); | 3819 | mutex_lock(&graph_lock); |
| 3800 | 3820 | ||
| 3801 | /* Nothing, tell g_show to print all functions are enabled */ | 3821 | /* Nothing, tell g_show to print all functions are enabled */ |
| 3802 | if (!ftrace_graph_filter_enabled && !*pos) | 3822 | if (!*fgd->count && !*pos) |
| 3803 | return (void *)1; | 3823 | return (void *)1; |
| 3804 | 3824 | ||
| 3805 | return __g_next(m, pos); | 3825 | return __g_next(m, pos); |
| @@ -3835,38 +3855,88 @@ static const struct seq_operations ftrace_graph_seq_ops = { | |||
| 3835 | }; | 3855 | }; |
| 3836 | 3856 | ||
| 3837 | static int | 3857 | static int |
| 3838 | ftrace_graph_open(struct inode *inode, struct file *file) | 3858 | __ftrace_graph_open(struct inode *inode, struct file *file, |
| 3859 | struct ftrace_graph_data *fgd) | ||
| 3839 | { | 3860 | { |
| 3840 | int ret = 0; | 3861 | int ret = 0; |
| 3841 | 3862 | ||
| 3842 | if (unlikely(ftrace_disabled)) | ||
| 3843 | return -ENODEV; | ||
| 3844 | |||
| 3845 | mutex_lock(&graph_lock); | 3863 | mutex_lock(&graph_lock); |
| 3846 | if ((file->f_mode & FMODE_WRITE) && | 3864 | if ((file->f_mode & FMODE_WRITE) && |
| 3847 | (file->f_flags & O_TRUNC)) { | 3865 | (file->f_flags & O_TRUNC)) { |
| 3848 | ftrace_graph_filter_enabled = 0; | 3866 | *fgd->count = 0; |
| 3849 | ftrace_graph_count = 0; | 3867 | memset(fgd->table, 0, fgd->size * sizeof(*fgd->table)); |
| 3850 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | ||
| 3851 | } | 3868 | } |
| 3852 | mutex_unlock(&graph_lock); | 3869 | mutex_unlock(&graph_lock); |
| 3853 | 3870 | ||
| 3854 | if (file->f_mode & FMODE_READ) | 3871 | if (file->f_mode & FMODE_READ) { |
| 3855 | ret = seq_open(file, &ftrace_graph_seq_ops); | 3872 | ret = seq_open(file, fgd->seq_ops); |
| 3873 | if (!ret) { | ||
| 3874 | struct seq_file *m = file->private_data; | ||
| 3875 | m->private = fgd; | ||
| 3876 | } | ||
| 3877 | } else | ||
| 3878 | file->private_data = fgd; | ||
| 3856 | 3879 | ||
| 3857 | return ret; | 3880 | return ret; |
| 3858 | } | 3881 | } |
| 3859 | 3882 | ||
| 3860 | static int | 3883 | static int |
| 3884 | ftrace_graph_open(struct inode *inode, struct file *file) | ||
| 3885 | { | ||
| 3886 | struct ftrace_graph_data *fgd; | ||
| 3887 | |||
| 3888 | if (unlikely(ftrace_disabled)) | ||
| 3889 | return -ENODEV; | ||
| 3890 | |||
| 3891 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | ||
| 3892 | if (fgd == NULL) | ||
| 3893 | return -ENOMEM; | ||
| 3894 | |||
| 3895 | fgd->table = ftrace_graph_funcs; | ||
| 3896 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | ||
| 3897 | fgd->count = &ftrace_graph_count; | ||
| 3898 | fgd->seq_ops = &ftrace_graph_seq_ops; | ||
| 3899 | |||
| 3900 | return __ftrace_graph_open(inode, file, fgd); | ||
| 3901 | } | ||
| 3902 | |||
| 3903 | static int | ||
| 3904 | ftrace_graph_notrace_open(struct inode *inode, struct file *file) | ||
| 3905 | { | ||
| 3906 | struct ftrace_graph_data *fgd; | ||
| 3907 | |||
| 3908 | if (unlikely(ftrace_disabled)) | ||
| 3909 | return -ENODEV; | ||
| 3910 | |||
| 3911 | fgd = kmalloc(sizeof(*fgd), GFP_KERNEL); | ||
| 3912 | if (fgd == NULL) | ||
| 3913 | return -ENOMEM; | ||
| 3914 | |||
| 3915 | fgd->table = ftrace_graph_notrace_funcs; | ||
| 3916 | fgd->size = FTRACE_GRAPH_MAX_FUNCS; | ||
| 3917 | fgd->count = &ftrace_graph_notrace_count; | ||
| 3918 | fgd->seq_ops = &ftrace_graph_seq_ops; | ||
| 3919 | |||
| 3920 | return __ftrace_graph_open(inode, file, fgd); | ||
| 3921 | } | ||
| 3922 | |||
| 3923 | static int | ||
| 3861 | ftrace_graph_release(struct inode *inode, struct file *file) | 3924 | ftrace_graph_release(struct inode *inode, struct file *file) |
| 3862 | { | 3925 | { |
| 3863 | if (file->f_mode & FMODE_READ) | 3926 | if (file->f_mode & FMODE_READ) { |
| 3927 | struct seq_file *m = file->private_data; | ||
| 3928 | |||
| 3929 | kfree(m->private); | ||
| 3864 | seq_release(inode, file); | 3930 | seq_release(inode, file); |
| 3931 | } else { | ||
| 3932 | kfree(file->private_data); | ||
| 3933 | } | ||
| 3934 | |||
| 3865 | return 0; | 3935 | return 0; |
| 3866 | } | 3936 | } |
| 3867 | 3937 | ||
| 3868 | static int | 3938 | static int |
| 3869 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) | 3939 | ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) |
| 3870 | { | 3940 | { |
| 3871 | struct dyn_ftrace *rec; | 3941 | struct dyn_ftrace *rec; |
| 3872 | struct ftrace_page *pg; | 3942 | struct ftrace_page *pg; |
| @@ -3879,7 +3949,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
| 3879 | 3949 | ||
| 3880 | /* decode regex */ | 3950 | /* decode regex */ |
| 3881 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); | 3951 | type = filter_parse_regex(buffer, strlen(buffer), &search, ¬); |
| 3882 | if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS) | 3952 | if (!not && *idx >= size) |
| 3883 | return -EBUSY; | 3953 | return -EBUSY; |
| 3884 | 3954 | ||
| 3885 | search_len = strlen(search); | 3955 | search_len = strlen(search); |
| @@ -3907,7 +3977,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) | |||
| 3907 | fail = 0; | 3977 | fail = 0; |
| 3908 | if (!exists) { | 3978 | if (!exists) { |
| 3909 | array[(*idx)++] = rec->ip; | 3979 | array[(*idx)++] = rec->ip; |
| 3910 | if (*idx >= FTRACE_GRAPH_MAX_FUNCS) | 3980 | if (*idx >= size) |
| 3911 | goto out; | 3981 | goto out; |
| 3912 | } | 3982 | } |
| 3913 | } else { | 3983 | } else { |
| @@ -3925,8 +3995,6 @@ out: | |||
| 3925 | if (fail) | 3995 | if (fail) |
| 3926 | return -EINVAL; | 3996 | return -EINVAL; |
| 3927 | 3997 | ||
| 3928 | ftrace_graph_filter_enabled = !!(*idx); | ||
| 3929 | |||
| 3930 | return 0; | 3998 | return 0; |
| 3931 | } | 3999 | } |
| 3932 | 4000 | ||
| @@ -3935,36 +4003,33 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
| 3935 | size_t cnt, loff_t *ppos) | 4003 | size_t cnt, loff_t *ppos) |
| 3936 | { | 4004 | { |
| 3937 | struct trace_parser parser; | 4005 | struct trace_parser parser; |
| 3938 | ssize_t read, ret; | 4006 | ssize_t read, ret = 0; |
| 4007 | struct ftrace_graph_data *fgd = file->private_data; | ||
| 3939 | 4008 | ||
| 3940 | if (!cnt) | 4009 | if (!cnt) |
| 3941 | return 0; | 4010 | return 0; |
| 3942 | 4011 | ||
| 3943 | mutex_lock(&graph_lock); | 4012 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) |
| 3944 | 4013 | return -ENOMEM; | |
| 3945 | if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { | ||
| 3946 | ret = -ENOMEM; | ||
| 3947 | goto out_unlock; | ||
| 3948 | } | ||
| 3949 | 4014 | ||
| 3950 | read = trace_get_user(&parser, ubuf, cnt, ppos); | 4015 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
| 3951 | 4016 | ||
| 3952 | if (read >= 0 && trace_parser_loaded((&parser))) { | 4017 | if (read >= 0 && trace_parser_loaded((&parser))) { |
| 3953 | parser.buffer[parser.idx] = 0; | 4018 | parser.buffer[parser.idx] = 0; |
| 3954 | 4019 | ||
| 4020 | mutex_lock(&graph_lock); | ||
| 4021 | |||
| 3955 | /* we allow only one expression at a time */ | 4022 | /* we allow only one expression at a time */ |
| 3956 | ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count, | 4023 | ret = ftrace_set_func(fgd->table, fgd->count, fgd->size, |
| 3957 | parser.buffer); | 4024 | parser.buffer); |
| 3958 | if (ret) | 4025 | |
| 3959 | goto out_free; | 4026 | mutex_unlock(&graph_lock); |
| 3960 | } | 4027 | } |
| 3961 | 4028 | ||
| 3962 | ret = read; | 4029 | if (!ret) |
| 4030 | ret = read; | ||
| 3963 | 4031 | ||
| 3964 | out_free: | ||
| 3965 | trace_parser_put(&parser); | 4032 | trace_parser_put(&parser); |
| 3966 | out_unlock: | ||
| 3967 | mutex_unlock(&graph_lock); | ||
| 3968 | 4033 | ||
| 3969 | return ret; | 4034 | return ret; |
| 3970 | } | 4035 | } |
| @@ -3976,6 +4041,14 @@ static const struct file_operations ftrace_graph_fops = { | |||
| 3976 | .llseek = ftrace_filter_lseek, | 4041 | .llseek = ftrace_filter_lseek, |
| 3977 | .release = ftrace_graph_release, | 4042 | .release = ftrace_graph_release, |
| 3978 | }; | 4043 | }; |
| 4044 | |||
| 4045 | static const struct file_operations ftrace_graph_notrace_fops = { | ||
| 4046 | .open = ftrace_graph_notrace_open, | ||
| 4047 | .read = seq_read, | ||
| 4048 | .write = ftrace_graph_write, | ||
| 4049 | .llseek = ftrace_filter_lseek, | ||
| 4050 | .release = ftrace_graph_release, | ||
| 4051 | }; | ||
| 3979 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4052 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 3980 | 4053 | ||
| 3981 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | 4054 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) |
| @@ -3997,6 +4070,9 @@ static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | |||
| 3997 | trace_create_file("set_graph_function", 0444, d_tracer, | 4070 | trace_create_file("set_graph_function", 0444, d_tracer, |
| 3998 | NULL, | 4071 | NULL, |
| 3999 | &ftrace_graph_fops); | 4072 | &ftrace_graph_fops); |
| 4073 | trace_create_file("set_graph_notrace", 0444, d_tracer, | ||
| 4074 | NULL, | ||
| 4075 | &ftrace_graph_notrace_fops); | ||
| 4000 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 4076 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 4001 | 4077 | ||
| 4002 | return 0; | 4078 | return 0; |
| @@ -4320,12 +4396,21 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
| 4320 | */ | 4396 | */ |
| 4321 | preempt_disable_notrace(); | 4397 | preempt_disable_notrace(); |
| 4322 | trace_recursion_set(TRACE_CONTROL_BIT); | 4398 | trace_recursion_set(TRACE_CONTROL_BIT); |
| 4399 | |||
| 4400 | /* | ||
| 4401 | * Control funcs (perf) uses RCU. Only trace if | ||
| 4402 | * RCU is currently active. | ||
| 4403 | */ | ||
| 4404 | if (!rcu_is_watching()) | ||
| 4405 | goto out; | ||
| 4406 | |||
| 4323 | do_for_each_ftrace_op(op, ftrace_control_list) { | 4407 | do_for_each_ftrace_op(op, ftrace_control_list) { |
| 4324 | if (!(op->flags & FTRACE_OPS_FL_STUB) && | 4408 | if (!(op->flags & FTRACE_OPS_FL_STUB) && |
| 4325 | !ftrace_function_local_disabled(op) && | 4409 | !ftrace_function_local_disabled(op) && |
| 4326 | ftrace_ops_test(op, ip, regs)) | 4410 | ftrace_ops_test(op, ip, regs)) |
| 4327 | op->func(ip, parent_ip, op, regs); | 4411 | op->func(ip, parent_ip, op, regs); |
| 4328 | } while_for_each_ftrace_op(op); | 4412 | } while_for_each_ftrace_op(op); |
| 4413 | out: | ||
| 4329 | trace_recursion_clear(TRACE_CONTROL_BIT); | 4414 | trace_recursion_clear(TRACE_CONTROL_BIT); |
| 4330 | preempt_enable_notrace(); | 4415 | preempt_enable_notrace(); |
| 4331 | } | 4416 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7974ba20557d..9d20cd9743ef 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -235,13 +235,33 @@ void trace_array_put(struct trace_array *this_tr) | |||
| 235 | mutex_unlock(&trace_types_lock); | 235 | mutex_unlock(&trace_types_lock); |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | int filter_current_check_discard(struct ring_buffer *buffer, | 238 | int filter_check_discard(struct ftrace_event_file *file, void *rec, |
| 239 | struct ftrace_event_call *call, void *rec, | 239 | struct ring_buffer *buffer, |
| 240 | struct ring_buffer_event *event) | 240 | struct ring_buffer_event *event) |
| 241 | { | 241 | { |
| 242 | return filter_check_discard(call, rec, buffer, event); | 242 | if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) && |
| 243 | !filter_match_preds(file->filter, rec)) { | ||
| 244 | ring_buffer_discard_commit(buffer, event); | ||
| 245 | return 1; | ||
| 246 | } | ||
| 247 | |||
| 248 | return 0; | ||
| 249 | } | ||
| 250 | EXPORT_SYMBOL_GPL(filter_check_discard); | ||
| 251 | |||
| 252 | int call_filter_check_discard(struct ftrace_event_call *call, void *rec, | ||
| 253 | struct ring_buffer *buffer, | ||
| 254 | struct ring_buffer_event *event) | ||
| 255 | { | ||
| 256 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && | ||
| 257 | !filter_match_preds(call->filter, rec)) { | ||
| 258 | ring_buffer_discard_commit(buffer, event); | ||
| 259 | return 1; | ||
| 260 | } | ||
| 261 | |||
| 262 | return 0; | ||
| 243 | } | 263 | } |
| 244 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 264 | EXPORT_SYMBOL_GPL(call_filter_check_discard); |
| 245 | 265 | ||
| 246 | cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) | 266 | cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
| 247 | { | 267 | { |
| @@ -843,9 +863,12 @@ int trace_get_user(struct trace_parser *parser, const char __user *ubuf, | |||
| 843 | if (isspace(ch)) { | 863 | if (isspace(ch)) { |
| 844 | parser->buffer[parser->idx] = 0; | 864 | parser->buffer[parser->idx] = 0; |
| 845 | parser->cont = false; | 865 | parser->cont = false; |
| 846 | } else { | 866 | } else if (parser->idx < parser->size - 1) { |
| 847 | parser->cont = true; | 867 | parser->cont = true; |
| 848 | parser->buffer[parser->idx++] = ch; | 868 | parser->buffer[parser->idx++] = ch; |
| 869 | } else { | ||
| 870 | ret = -EINVAL; | ||
| 871 | goto out; | ||
| 849 | } | 872 | } |
| 850 | 873 | ||
| 851 | *ppos += read; | 874 | *ppos += read; |
| @@ -1261,21 +1284,6 @@ int is_tracing_stopped(void) | |||
| 1261 | } | 1284 | } |
| 1262 | 1285 | ||
| 1263 | /** | 1286 | /** |
| 1264 | * ftrace_off_permanent - disable all ftrace code permanently | ||
| 1265 | * | ||
| 1266 | * This should only be called when a serious anomally has | ||
| 1267 | * been detected. This will turn off the function tracing, | ||
| 1268 | * ring buffers, and other tracing utilites. It takes no | ||
| 1269 | * locks and can be called from any context. | ||
| 1270 | */ | ||
| 1271 | void ftrace_off_permanent(void) | ||
| 1272 | { | ||
| 1273 | tracing_disabled = 1; | ||
| 1274 | ftrace_stop(); | ||
| 1275 | tracing_off_permanent(); | ||
| 1276 | } | ||
| 1277 | |||
| 1278 | /** | ||
| 1279 | * tracing_start - quick start of the tracer | 1287 | * tracing_start - quick start of the tracer |
| 1280 | * | 1288 | * |
| 1281 | * If tracing is enabled but was stopped by tracing_stop, | 1289 | * If tracing is enabled but was stopped by tracing_stop, |
| @@ -1509,7 +1517,8 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
| 1509 | #endif | 1517 | #endif |
| 1510 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | | 1518 | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | |
| 1511 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | | 1519 | ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | |
| 1512 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 1520 | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | |
| 1521 | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); | ||
| 1513 | } | 1522 | } |
| 1514 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); | 1523 | EXPORT_SYMBOL_GPL(tracing_generic_entry_update); |
| 1515 | 1524 | ||
| @@ -1630,7 +1639,7 @@ trace_function(struct trace_array *tr, | |||
| 1630 | entry->ip = ip; | 1639 | entry->ip = ip; |
| 1631 | entry->parent_ip = parent_ip; | 1640 | entry->parent_ip = parent_ip; |
| 1632 | 1641 | ||
| 1633 | if (!filter_check_discard(call, entry, buffer, event)) | 1642 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 1634 | __buffer_unlock_commit(buffer, event); | 1643 | __buffer_unlock_commit(buffer, event); |
| 1635 | } | 1644 | } |
| 1636 | 1645 | ||
| @@ -1714,7 +1723,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, | |||
| 1714 | 1723 | ||
| 1715 | entry->size = trace.nr_entries; | 1724 | entry->size = trace.nr_entries; |
| 1716 | 1725 | ||
| 1717 | if (!filter_check_discard(call, entry, buffer, event)) | 1726 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 1718 | __buffer_unlock_commit(buffer, event); | 1727 | __buffer_unlock_commit(buffer, event); |
| 1719 | 1728 | ||
| 1720 | out: | 1729 | out: |
| @@ -1816,7 +1825,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) | |||
| 1816 | trace.entries = entry->caller; | 1825 | trace.entries = entry->caller; |
| 1817 | 1826 | ||
| 1818 | save_stack_trace_user(&trace); | 1827 | save_stack_trace_user(&trace); |
| 1819 | if (!filter_check_discard(call, entry, buffer, event)) | 1828 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 1820 | __buffer_unlock_commit(buffer, event); | 1829 | __buffer_unlock_commit(buffer, event); |
| 1821 | 1830 | ||
| 1822 | out_drop_count: | 1831 | out_drop_count: |
| @@ -2008,7 +2017,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
| 2008 | entry->fmt = fmt; | 2017 | entry->fmt = fmt; |
| 2009 | 2018 | ||
| 2010 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); | 2019 | memcpy(entry->buf, tbuffer, sizeof(u32) * len); |
| 2011 | if (!filter_check_discard(call, entry, buffer, event)) { | 2020 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
| 2012 | __buffer_unlock_commit(buffer, event); | 2021 | __buffer_unlock_commit(buffer, event); |
| 2013 | ftrace_trace_stack(buffer, flags, 6, pc); | 2022 | ftrace_trace_stack(buffer, flags, 6, pc); |
| 2014 | } | 2023 | } |
| @@ -2063,7 +2072,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, | |||
| 2063 | 2072 | ||
| 2064 | memcpy(&entry->buf, tbuffer, len); | 2073 | memcpy(&entry->buf, tbuffer, len); |
| 2065 | entry->buf[len] = '\0'; | 2074 | entry->buf[len] = '\0'; |
| 2066 | if (!filter_check_discard(call, entry, buffer, event)) { | 2075 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
| 2067 | __buffer_unlock_commit(buffer, event); | 2076 | __buffer_unlock_commit(buffer, event); |
| 2068 | ftrace_trace_stack(buffer, flags, 6, pc); | 2077 | ftrace_trace_stack(buffer, flags, 6, pc); |
| 2069 | } | 2078 | } |
| @@ -2760,7 +2769,7 @@ static void show_snapshot_main_help(struct seq_file *m) | |||
| 2760 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); | 2769 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); |
| 2761 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | 2770 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); |
| 2762 | seq_printf(m, "# Takes a snapshot of the main buffer.\n"); | 2771 | seq_printf(m, "# Takes a snapshot of the main buffer.\n"); |
| 2763 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); | 2772 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); |
| 2764 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | 2773 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); |
| 2765 | seq_printf(m, "# is not a '0' or '1')\n"); | 2774 | seq_printf(m, "# is not a '0' or '1')\n"); |
| 2766 | } | 2775 | } |
| @@ -2964,6 +2973,11 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
| 2964 | return 0; | 2973 | return 0; |
| 2965 | } | 2974 | } |
| 2966 | 2975 | ||
| 2976 | bool tracing_is_disabled(void) | ||
| 2977 | { | ||
| 2978 | return (tracing_disabled) ? true: false; | ||
| 2979 | } | ||
| 2980 | |||
| 2967 | /* | 2981 | /* |
| 2968 | * Open and update trace_array ref count. | 2982 | * Open and update trace_array ref count. |
| 2969 | * Must have the current trace_array passed to it. | 2983 | * Must have the current trace_array passed to it. |
| @@ -5454,12 +5468,12 @@ static struct ftrace_func_command ftrace_snapshot_cmd = { | |||
| 5454 | .func = ftrace_trace_snapshot_callback, | 5468 | .func = ftrace_trace_snapshot_callback, |
| 5455 | }; | 5469 | }; |
| 5456 | 5470 | ||
| 5457 | static int register_snapshot_cmd(void) | 5471 | static __init int register_snapshot_cmd(void) |
| 5458 | { | 5472 | { |
| 5459 | return register_ftrace_command(&ftrace_snapshot_cmd); | 5473 | return register_ftrace_command(&ftrace_snapshot_cmd); |
| 5460 | } | 5474 | } |
| 5461 | #else | 5475 | #else |
| 5462 | static inline int register_snapshot_cmd(void) { return 0; } | 5476 | static inline __init int register_snapshot_cmd(void) { return 0; } |
| 5463 | #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ | 5477 | #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */ |
| 5464 | 5478 | ||
| 5465 | struct dentry *tracing_init_dentry_tr(struct trace_array *tr) | 5479 | struct dentry *tracing_init_dentry_tr(struct trace_array *tr) |
| @@ -6253,6 +6267,17 @@ void trace_init_global_iter(struct trace_iterator *iter) | |||
| 6253 | iter->trace = iter->tr->current_trace; | 6267 | iter->trace = iter->tr->current_trace; |
| 6254 | iter->cpu_file = RING_BUFFER_ALL_CPUS; | 6268 | iter->cpu_file = RING_BUFFER_ALL_CPUS; |
| 6255 | iter->trace_buffer = &global_trace.trace_buffer; | 6269 | iter->trace_buffer = &global_trace.trace_buffer; |
| 6270 | |||
| 6271 | if (iter->trace && iter->trace->open) | ||
| 6272 | iter->trace->open(iter); | ||
| 6273 | |||
| 6274 | /* Annotate start of buffers if we had overruns */ | ||
| 6275 | if (ring_buffer_overruns(iter->trace_buffer->buffer)) | ||
| 6276 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | ||
| 6277 | |||
| 6278 | /* Output in nanoseconds only if we are using a clock in nanoseconds. */ | ||
| 6279 | if (trace_clocks[iter->tr->clock_id].in_ns) | ||
| 6280 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | ||
| 6256 | } | 6281 | } |
| 6257 | 6282 | ||
| 6258 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) | 6283 | void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 10c86fb7a2b4..ea189e027b80 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -124,6 +124,7 @@ enum trace_flag_type { | |||
| 124 | TRACE_FLAG_NEED_RESCHED = 0x04, | 124 | TRACE_FLAG_NEED_RESCHED = 0x04, |
| 125 | TRACE_FLAG_HARDIRQ = 0x08, | 125 | TRACE_FLAG_HARDIRQ = 0x08, |
| 126 | TRACE_FLAG_SOFTIRQ = 0x10, | 126 | TRACE_FLAG_SOFTIRQ = 0x10, |
| 127 | TRACE_FLAG_PREEMPT_RESCHED = 0x20, | ||
| 127 | }; | 128 | }; |
| 128 | 129 | ||
| 129 | #define TRACE_BUF_SIZE 1024 | 130 | #define TRACE_BUF_SIZE 1024 |
| @@ -192,8 +193,8 @@ struct trace_array { | |||
| 192 | #ifdef CONFIG_FTRACE_SYSCALLS | 193 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 193 | int sys_refcount_enter; | 194 | int sys_refcount_enter; |
| 194 | int sys_refcount_exit; | 195 | int sys_refcount_exit; |
| 195 | DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | 196 | struct ftrace_event_file __rcu *enter_syscall_files[NR_syscalls]; |
| 196 | DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | 197 | struct ftrace_event_file __rcu *exit_syscall_files[NR_syscalls]; |
| 197 | #endif | 198 | #endif |
| 198 | int stop_count; | 199 | int stop_count; |
| 199 | int clock_id; | 200 | int clock_id; |
| @@ -514,6 +515,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf); | |||
| 514 | void tracing_reset_current(int cpu); | 515 | void tracing_reset_current(int cpu); |
| 515 | void tracing_reset_all_online_cpus(void); | 516 | void tracing_reset_all_online_cpus(void); |
| 516 | int tracing_open_generic(struct inode *inode, struct file *filp); | 517 | int tracing_open_generic(struct inode *inode, struct file *filp); |
| 518 | bool tracing_is_disabled(void); | ||
| 517 | struct dentry *trace_create_file(const char *name, | 519 | struct dentry *trace_create_file(const char *name, |
| 518 | umode_t mode, | 520 | umode_t mode, |
| 519 | struct dentry *parent, | 521 | struct dentry *parent, |
| @@ -711,6 +713,8 @@ extern unsigned long trace_flags; | |||
| 711 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 713 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
| 712 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | 714 | #define TRACE_GRAPH_PRINT_DURATION 0x10 |
| 713 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 | 715 | #define TRACE_GRAPH_PRINT_ABS_TIME 0x20 |
| 716 | #define TRACE_GRAPH_PRINT_FILL_SHIFT 28 | ||
| 717 | #define TRACE_GRAPH_PRINT_FILL_MASK (0x3 << TRACE_GRAPH_PRINT_FILL_SHIFT) | ||
| 714 | 718 | ||
| 715 | extern enum print_line_t | 719 | extern enum print_line_t |
| 716 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | 720 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); |
| @@ -730,15 +734,16 @@ extern void __trace_graph_return(struct trace_array *tr, | |||
| 730 | #ifdef CONFIG_DYNAMIC_FTRACE | 734 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 731 | /* TODO: make this variable */ | 735 | /* TODO: make this variable */ |
| 732 | #define FTRACE_GRAPH_MAX_FUNCS 32 | 736 | #define FTRACE_GRAPH_MAX_FUNCS 32 |
| 733 | extern int ftrace_graph_filter_enabled; | ||
| 734 | extern int ftrace_graph_count; | 737 | extern int ftrace_graph_count; |
| 735 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | 738 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; |
| 739 | extern int ftrace_graph_notrace_count; | ||
| 740 | extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS]; | ||
| 736 | 741 | ||
| 737 | static inline int ftrace_graph_addr(unsigned long addr) | 742 | static inline int ftrace_graph_addr(unsigned long addr) |
| 738 | { | 743 | { |
| 739 | int i; | 744 | int i; |
| 740 | 745 | ||
| 741 | if (!ftrace_graph_filter_enabled) | 746 | if (!ftrace_graph_count) |
| 742 | return 1; | 747 | return 1; |
| 743 | 748 | ||
| 744 | for (i = 0; i < ftrace_graph_count; i++) { | 749 | for (i = 0; i < ftrace_graph_count; i++) { |
| @@ -758,11 +763,31 @@ static inline int ftrace_graph_addr(unsigned long addr) | |||
| 758 | 763 | ||
| 759 | return 0; | 764 | return 0; |
| 760 | } | 765 | } |
| 766 | |||
| 767 | static inline int ftrace_graph_notrace_addr(unsigned long addr) | ||
| 768 | { | ||
| 769 | int i; | ||
| 770 | |||
| 771 | if (!ftrace_graph_notrace_count) | ||
| 772 | return 0; | ||
| 773 | |||
| 774 | for (i = 0; i < ftrace_graph_notrace_count; i++) { | ||
| 775 | if (addr == ftrace_graph_notrace_funcs[i]) | ||
| 776 | return 1; | ||
| 777 | } | ||
| 778 | |||
| 779 | return 0; | ||
| 780 | } | ||
| 761 | #else | 781 | #else |
| 762 | static inline int ftrace_graph_addr(unsigned long addr) | 782 | static inline int ftrace_graph_addr(unsigned long addr) |
| 763 | { | 783 | { |
| 764 | return 1; | 784 | return 1; |
| 765 | } | 785 | } |
| 786 | |||
| 787 | static inline int ftrace_graph_notrace_addr(unsigned long addr) | ||
| 788 | { | ||
| 789 | return 0; | ||
| 790 | } | ||
| 766 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 791 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 767 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | 792 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 768 | static inline enum print_line_t | 793 | static inline enum print_line_t |
| @@ -986,9 +1011,9 @@ struct filter_pred { | |||
| 986 | 1011 | ||
| 987 | extern enum regex_type | 1012 | extern enum regex_type |
| 988 | filter_parse_regex(char *buff, int len, char **search, int *not); | 1013 | filter_parse_regex(char *buff, int len, char **search, int *not); |
| 989 | extern void print_event_filter(struct ftrace_event_call *call, | 1014 | extern void print_event_filter(struct ftrace_event_file *file, |
| 990 | struct trace_seq *s); | 1015 | struct trace_seq *s); |
| 991 | extern int apply_event_filter(struct ftrace_event_call *call, | 1016 | extern int apply_event_filter(struct ftrace_event_file *file, |
| 992 | char *filter_string); | 1017 | char *filter_string); |
| 993 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | 1018 | extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, |
| 994 | char *filter_string); | 1019 | char *filter_string); |
| @@ -999,20 +1024,6 @@ extern int filter_assign_type(const char *type); | |||
| 999 | struct ftrace_event_field * | 1024 | struct ftrace_event_field * |
| 1000 | trace_find_event_field(struct ftrace_event_call *call, char *name); | 1025 | trace_find_event_field(struct ftrace_event_call *call, char *name); |
| 1001 | 1026 | ||
| 1002 | static inline int | ||
| 1003 | filter_check_discard(struct ftrace_event_call *call, void *rec, | ||
| 1004 | struct ring_buffer *buffer, | ||
| 1005 | struct ring_buffer_event *event) | ||
| 1006 | { | ||
| 1007 | if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && | ||
| 1008 | !filter_match_preds(call->filter, rec)) { | ||
| 1009 | ring_buffer_discard_commit(buffer, event); | ||
| 1010 | return 1; | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | return 0; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | extern void trace_event_enable_cmd_record(bool enable); | 1027 | extern void trace_event_enable_cmd_record(bool enable); |
| 1017 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); | 1028 | extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr); |
| 1018 | extern int event_trace_del_tracer(struct trace_array *tr); | 1029 | extern int event_trace_del_tracer(struct trace_array *tr); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index d594da0dc03c..697fb9bac8f0 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
| @@ -78,7 +78,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
| 78 | entry->line = f->line; | 78 | entry->line = f->line; |
| 79 | entry->correct = val == expect; | 79 | entry->correct = val == expect; |
| 80 | 80 | ||
| 81 | if (!filter_check_discard(call, entry, buffer, event)) | 81 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 82 | __buffer_unlock_commit(buffer, event); | 82 | __buffer_unlock_commit(buffer, event); |
| 83 | 83 | ||
| 84 | out: | 84 | out: |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 80c36bcf66e8..78e27e3b52ac 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
| @@ -26,7 +26,7 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event, | |||
| 26 | { | 26 | { |
| 27 | /* The ftrace function trace is allowed only for root. */ | 27 | /* The ftrace function trace is allowed only for root. */ |
| 28 | if (ftrace_event_is_function(tp_event) && | 28 | if (ftrace_event_is_function(tp_event) && |
| 29 | perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) | 29 | perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN)) |
| 30 | return -EPERM; | 30 | return -EPERM; |
| 31 | 31 | ||
| 32 | /* No tracing, just counting, so no obvious leak */ | 32 | /* No tracing, just counting, so no obvious leak */ |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 368a4d50cc30..f919a2e21bf3 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -989,7 +989,7 @@ static ssize_t | |||
| 989 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 989 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
| 990 | loff_t *ppos) | 990 | loff_t *ppos) |
| 991 | { | 991 | { |
| 992 | struct ftrace_event_call *call; | 992 | struct ftrace_event_file *file; |
| 993 | struct trace_seq *s; | 993 | struct trace_seq *s; |
| 994 | int r = -ENODEV; | 994 | int r = -ENODEV; |
| 995 | 995 | ||
| @@ -1004,12 +1004,12 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
| 1004 | trace_seq_init(s); | 1004 | trace_seq_init(s); |
| 1005 | 1005 | ||
| 1006 | mutex_lock(&event_mutex); | 1006 | mutex_lock(&event_mutex); |
| 1007 | call = event_file_data(filp); | 1007 | file = event_file_data(filp); |
| 1008 | if (call) | 1008 | if (file) |
| 1009 | print_event_filter(call, s); | 1009 | print_event_filter(file, s); |
| 1010 | mutex_unlock(&event_mutex); | 1010 | mutex_unlock(&event_mutex); |
| 1011 | 1011 | ||
| 1012 | if (call) | 1012 | if (file) |
| 1013 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 1013 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); |
| 1014 | 1014 | ||
| 1015 | kfree(s); | 1015 | kfree(s); |
| @@ -1021,7 +1021,7 @@ static ssize_t | |||
| 1021 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1021 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
| 1022 | loff_t *ppos) | 1022 | loff_t *ppos) |
| 1023 | { | 1023 | { |
| 1024 | struct ftrace_event_call *call; | 1024 | struct ftrace_event_file *file; |
| 1025 | char *buf; | 1025 | char *buf; |
| 1026 | int err = -ENODEV; | 1026 | int err = -ENODEV; |
| 1027 | 1027 | ||
| @@ -1039,9 +1039,9 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 1039 | buf[cnt] = '\0'; | 1039 | buf[cnt] = '\0'; |
| 1040 | 1040 | ||
| 1041 | mutex_lock(&event_mutex); | 1041 | mutex_lock(&event_mutex); |
| 1042 | call = event_file_data(filp); | 1042 | file = event_file_data(filp); |
| 1043 | if (call) | 1043 | if (file) |
| 1044 | err = apply_event_filter(call, buf); | 1044 | err = apply_event_filter(file, buf); |
| 1045 | mutex_unlock(&event_mutex); | 1045 | mutex_unlock(&event_mutex); |
| 1046 | 1046 | ||
| 1047 | free_page((unsigned long) buf); | 1047 | free_page((unsigned long) buf); |
| @@ -1062,6 +1062,9 @@ static int subsystem_open(struct inode *inode, struct file *filp) | |||
| 1062 | struct trace_array *tr; | 1062 | struct trace_array *tr; |
| 1063 | int ret; | 1063 | int ret; |
| 1064 | 1064 | ||
| 1065 | if (tracing_is_disabled()) | ||
| 1066 | return -ENODEV; | ||
| 1067 | |||
| 1065 | /* Make sure the system still exists */ | 1068 | /* Make sure the system still exists */ |
| 1066 | mutex_lock(&trace_types_lock); | 1069 | mutex_lock(&trace_types_lock); |
| 1067 | mutex_lock(&event_mutex); | 1070 | mutex_lock(&event_mutex); |
| @@ -1108,6 +1111,9 @@ static int system_tr_open(struct inode *inode, struct file *filp) | |||
| 1108 | struct trace_array *tr = inode->i_private; | 1111 | struct trace_array *tr = inode->i_private; |
| 1109 | int ret; | 1112 | int ret; |
| 1110 | 1113 | ||
| 1114 | if (tracing_is_disabled()) | ||
| 1115 | return -ENODEV; | ||
| 1116 | |||
| 1111 | if (trace_array_get(tr) < 0) | 1117 | if (trace_array_get(tr) < 0) |
| 1112 | return -ENODEV; | 1118 | return -ENODEV; |
| 1113 | 1119 | ||
| @@ -1124,11 +1130,12 @@ static int system_tr_open(struct inode *inode, struct file *filp) | |||
| 1124 | if (ret < 0) { | 1130 | if (ret < 0) { |
| 1125 | trace_array_put(tr); | 1131 | trace_array_put(tr); |
| 1126 | kfree(dir); | 1132 | kfree(dir); |
| 1133 | return ret; | ||
| 1127 | } | 1134 | } |
| 1128 | 1135 | ||
| 1129 | filp->private_data = dir; | 1136 | filp->private_data = dir; |
| 1130 | 1137 | ||
| 1131 | return ret; | 1138 | return 0; |
| 1132 | } | 1139 | } |
| 1133 | 1140 | ||
| 1134 | static int subsystem_release(struct inode *inode, struct file *file) | 1141 | static int subsystem_release(struct inode *inode, struct file *file) |
| @@ -1539,7 +1546,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file) | |||
| 1539 | return -1; | 1546 | return -1; |
| 1540 | } | 1547 | } |
| 1541 | } | 1548 | } |
| 1542 | trace_create_file("filter", 0644, file->dir, call, | 1549 | trace_create_file("filter", 0644, file->dir, file, |
| 1543 | &ftrace_event_filter_fops); | 1550 | &ftrace_event_filter_fops); |
| 1544 | 1551 | ||
| 1545 | trace_create_file("format", 0444, file->dir, call, | 1552 | trace_create_file("format", 0444, file->dir, call, |
| @@ -1577,6 +1584,7 @@ static void event_remove(struct ftrace_event_call *call) | |||
| 1577 | if (file->event_call != call) | 1584 | if (file->event_call != call) |
| 1578 | continue; | 1585 | continue; |
| 1579 | ftrace_event_enable_disable(file, 0); | 1586 | ftrace_event_enable_disable(file, 0); |
| 1587 | destroy_preds(file); | ||
| 1580 | /* | 1588 | /* |
| 1581 | * The do_for_each_event_file() is | 1589 | * The do_for_each_event_file() is |
| 1582 | * a double loop. After finding the call for this | 1590 | * a double loop. After finding the call for this |
| @@ -1700,7 +1708,7 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) | |||
| 1700 | { | 1708 | { |
| 1701 | event_remove(call); | 1709 | event_remove(call); |
| 1702 | trace_destroy_fields(call); | 1710 | trace_destroy_fields(call); |
| 1703 | destroy_preds(call); | 1711 | destroy_call_preds(call); |
| 1704 | } | 1712 | } |
| 1705 | 1713 | ||
| 1706 | static int probe_remove_event_call(struct ftrace_event_call *call) | 1714 | static int probe_remove_event_call(struct ftrace_event_call *call) |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 97daa8cf958d..2468f56dc5db 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -637,10 +637,18 @@ static void append_filter_err(struct filter_parse_state *ps, | |||
| 637 | free_page((unsigned long) buf); | 637 | free_page((unsigned long) buf); |
| 638 | } | 638 | } |
| 639 | 639 | ||
| 640 | static inline struct event_filter *event_filter(struct ftrace_event_file *file) | ||
| 641 | { | ||
| 642 | if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 643 | return file->event_call->filter; | ||
| 644 | else | ||
| 645 | return file->filter; | ||
| 646 | } | ||
| 647 | |||
| 640 | /* caller must hold event_mutex */ | 648 | /* caller must hold event_mutex */ |
| 641 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | 649 | void print_event_filter(struct ftrace_event_file *file, struct trace_seq *s) |
| 642 | { | 650 | { |
| 643 | struct event_filter *filter = call->filter; | 651 | struct event_filter *filter = event_filter(file); |
| 644 | 652 | ||
| 645 | if (filter && filter->filter_string) | 653 | if (filter && filter->filter_string) |
| 646 | trace_seq_printf(s, "%s\n", filter->filter_string); | 654 | trace_seq_printf(s, "%s\n", filter->filter_string); |
| @@ -766,11 +774,21 @@ static void __free_preds(struct event_filter *filter) | |||
| 766 | filter->n_preds = 0; | 774 | filter->n_preds = 0; |
| 767 | } | 775 | } |
| 768 | 776 | ||
| 769 | static void filter_disable(struct ftrace_event_call *call) | 777 | static void call_filter_disable(struct ftrace_event_call *call) |
| 770 | { | 778 | { |
| 771 | call->flags &= ~TRACE_EVENT_FL_FILTERED; | 779 | call->flags &= ~TRACE_EVENT_FL_FILTERED; |
| 772 | } | 780 | } |
| 773 | 781 | ||
| 782 | static void filter_disable(struct ftrace_event_file *file) | ||
| 783 | { | ||
| 784 | struct ftrace_event_call *call = file->event_call; | ||
| 785 | |||
| 786 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 787 | call_filter_disable(call); | ||
| 788 | else | ||
| 789 | file->flags &= ~FTRACE_EVENT_FL_FILTERED; | ||
| 790 | } | ||
| 791 | |||
| 774 | static void __free_filter(struct event_filter *filter) | 792 | static void __free_filter(struct event_filter *filter) |
| 775 | { | 793 | { |
| 776 | if (!filter) | 794 | if (!filter) |
| @@ -781,16 +799,30 @@ static void __free_filter(struct event_filter *filter) | |||
| 781 | kfree(filter); | 799 | kfree(filter); |
| 782 | } | 800 | } |
| 783 | 801 | ||
| 802 | void destroy_call_preds(struct ftrace_event_call *call) | ||
| 803 | { | ||
| 804 | __free_filter(call->filter); | ||
| 805 | call->filter = NULL; | ||
| 806 | } | ||
| 807 | |||
| 808 | static void destroy_file_preds(struct ftrace_event_file *file) | ||
| 809 | { | ||
| 810 | __free_filter(file->filter); | ||
| 811 | file->filter = NULL; | ||
| 812 | } | ||
| 813 | |||
| 784 | /* | 814 | /* |
| 785 | * Called when destroying the ftrace_event_call. | 815 | * Called when destroying the ftrace_event_file. |
| 786 | * The call is being freed, so we do not need to worry about | 816 | * The file is being freed, so we do not need to worry about |
| 787 | * the call being currently used. This is for module code removing | 817 | * the file being currently used. This is for module code removing |
| 788 | * the tracepoints from within it. | 818 | * the tracepoints from within it. |
| 789 | */ | 819 | */ |
| 790 | void destroy_preds(struct ftrace_event_call *call) | 820 | void destroy_preds(struct ftrace_event_file *file) |
| 791 | { | 821 | { |
| 792 | __free_filter(call->filter); | 822 | if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) |
| 793 | call->filter = NULL; | 823 | destroy_call_preds(file->event_call); |
| 824 | else | ||
| 825 | destroy_file_preds(file); | ||
| 794 | } | 826 | } |
| 795 | 827 | ||
| 796 | static struct event_filter *__alloc_filter(void) | 828 | static struct event_filter *__alloc_filter(void) |
| @@ -825,28 +857,56 @@ static int __alloc_preds(struct event_filter *filter, int n_preds) | |||
| 825 | return 0; | 857 | return 0; |
| 826 | } | 858 | } |
| 827 | 859 | ||
| 828 | static void filter_free_subsystem_preds(struct event_subsystem *system) | 860 | static inline void __remove_filter(struct ftrace_event_file *file) |
| 829 | { | 861 | { |
| 862 | struct ftrace_event_call *call = file->event_call; | ||
| 863 | |||
| 864 | filter_disable(file); | ||
| 865 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 866 | remove_filter_string(call->filter); | ||
| 867 | else | ||
| 868 | remove_filter_string(file->filter); | ||
| 869 | } | ||
| 870 | |||
| 871 | static void filter_free_subsystem_preds(struct event_subsystem *system, | ||
| 872 | struct trace_array *tr) | ||
| 873 | { | ||
| 874 | struct ftrace_event_file *file; | ||
| 830 | struct ftrace_event_call *call; | 875 | struct ftrace_event_call *call; |
| 831 | 876 | ||
| 832 | list_for_each_entry(call, &ftrace_events, list) { | 877 | list_for_each_entry(file, &tr->events, list) { |
| 878 | call = file->event_call; | ||
| 833 | if (strcmp(call->class->system, system->name) != 0) | 879 | if (strcmp(call->class->system, system->name) != 0) |
| 834 | continue; | 880 | continue; |
| 835 | 881 | ||
| 836 | filter_disable(call); | 882 | __remove_filter(file); |
| 837 | remove_filter_string(call->filter); | ||
| 838 | } | 883 | } |
| 839 | } | 884 | } |
| 840 | 885 | ||
| 841 | static void filter_free_subsystem_filters(struct event_subsystem *system) | 886 | static inline void __free_subsystem_filter(struct ftrace_event_file *file) |
| 842 | { | 887 | { |
| 888 | struct ftrace_event_call *call = file->event_call; | ||
| 889 | |||
| 890 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) { | ||
| 891 | __free_filter(call->filter); | ||
| 892 | call->filter = NULL; | ||
| 893 | } else { | ||
| 894 | __free_filter(file->filter); | ||
| 895 | file->filter = NULL; | ||
| 896 | } | ||
| 897 | } | ||
| 898 | |||
| 899 | static void filter_free_subsystem_filters(struct event_subsystem *system, | ||
| 900 | struct trace_array *tr) | ||
| 901 | { | ||
| 902 | struct ftrace_event_file *file; | ||
| 843 | struct ftrace_event_call *call; | 903 | struct ftrace_event_call *call; |
| 844 | 904 | ||
| 845 | list_for_each_entry(call, &ftrace_events, list) { | 905 | list_for_each_entry(file, &tr->events, list) { |
| 906 | call = file->event_call; | ||
| 846 | if (strcmp(call->class->system, system->name) != 0) | 907 | if (strcmp(call->class->system, system->name) != 0) |
| 847 | continue; | 908 | continue; |
| 848 | __free_filter(call->filter); | 909 | __free_subsystem_filter(file); |
| 849 | call->filter = NULL; | ||
| 850 | } | 910 | } |
| 851 | } | 911 | } |
| 852 | 912 | ||
| @@ -1617,15 +1677,85 @@ fail: | |||
| 1617 | return err; | 1677 | return err; |
| 1618 | } | 1678 | } |
| 1619 | 1679 | ||
| 1680 | static inline void event_set_filtered_flag(struct ftrace_event_file *file) | ||
| 1681 | { | ||
| 1682 | struct ftrace_event_call *call = file->event_call; | ||
| 1683 | |||
| 1684 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 1685 | call->flags |= TRACE_EVENT_FL_FILTERED; | ||
| 1686 | else | ||
| 1687 | file->flags |= FTRACE_EVENT_FL_FILTERED; | ||
| 1688 | } | ||
| 1689 | |||
| 1690 | static inline void event_set_filter(struct ftrace_event_file *file, | ||
| 1691 | struct event_filter *filter) | ||
| 1692 | { | ||
| 1693 | struct ftrace_event_call *call = file->event_call; | ||
| 1694 | |||
| 1695 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 1696 | rcu_assign_pointer(call->filter, filter); | ||
| 1697 | else | ||
| 1698 | rcu_assign_pointer(file->filter, filter); | ||
| 1699 | } | ||
| 1700 | |||
| 1701 | static inline void event_clear_filter(struct ftrace_event_file *file) | ||
| 1702 | { | ||
| 1703 | struct ftrace_event_call *call = file->event_call; | ||
| 1704 | |||
| 1705 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 1706 | RCU_INIT_POINTER(call->filter, NULL); | ||
| 1707 | else | ||
| 1708 | RCU_INIT_POINTER(file->filter, NULL); | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | static inline void | ||
| 1712 | event_set_no_set_filter_flag(struct ftrace_event_file *file) | ||
| 1713 | { | ||
| 1714 | struct ftrace_event_call *call = file->event_call; | ||
| 1715 | |||
| 1716 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 1717 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; | ||
| 1718 | else | ||
| 1719 | file->flags |= FTRACE_EVENT_FL_NO_SET_FILTER; | ||
| 1720 | } | ||
| 1721 | |||
| 1722 | static inline void | ||
| 1723 | event_clear_no_set_filter_flag(struct ftrace_event_file *file) | ||
| 1724 | { | ||
| 1725 | struct ftrace_event_call *call = file->event_call; | ||
| 1726 | |||
| 1727 | if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) | ||
| 1728 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; | ||
| 1729 | else | ||
| 1730 | file->flags &= ~FTRACE_EVENT_FL_NO_SET_FILTER; | ||
| 1731 | } | ||
| 1732 | |||
| 1733 | static inline bool | ||
| 1734 | event_no_set_filter_flag(struct ftrace_event_file *file) | ||
| 1735 | { | ||
| 1736 | struct ftrace_event_call *call = file->event_call; | ||
| 1737 | |||
| 1738 | if (file->flags & FTRACE_EVENT_FL_NO_SET_FILTER) | ||
| 1739 | return true; | ||
| 1740 | |||
| 1741 | if ((call->flags & TRACE_EVENT_FL_USE_CALL_FILTER) && | ||
| 1742 | (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)) | ||
| 1743 | return true; | ||
| 1744 | |||
| 1745 | return false; | ||
| 1746 | } | ||
| 1747 | |||
| 1620 | struct filter_list { | 1748 | struct filter_list { |
| 1621 | struct list_head list; | 1749 | struct list_head list; |
| 1622 | struct event_filter *filter; | 1750 | struct event_filter *filter; |
| 1623 | }; | 1751 | }; |
| 1624 | 1752 | ||
| 1625 | static int replace_system_preds(struct event_subsystem *system, | 1753 | static int replace_system_preds(struct event_subsystem *system, |
| 1754 | struct trace_array *tr, | ||
| 1626 | struct filter_parse_state *ps, | 1755 | struct filter_parse_state *ps, |
| 1627 | char *filter_string) | 1756 | char *filter_string) |
| 1628 | { | 1757 | { |
| 1758 | struct ftrace_event_file *file; | ||
| 1629 | struct ftrace_event_call *call; | 1759 | struct ftrace_event_call *call; |
| 1630 | struct filter_list *filter_item; | 1760 | struct filter_list *filter_item; |
| 1631 | struct filter_list *tmp; | 1761 | struct filter_list *tmp; |
| @@ -1633,8 +1763,8 @@ static int replace_system_preds(struct event_subsystem *system, | |||
| 1633 | bool fail = true; | 1763 | bool fail = true; |
| 1634 | int err; | 1764 | int err; |
| 1635 | 1765 | ||
| 1636 | list_for_each_entry(call, &ftrace_events, list) { | 1766 | list_for_each_entry(file, &tr->events, list) { |
| 1637 | 1767 | call = file->event_call; | |
| 1638 | if (strcmp(call->class->system, system->name) != 0) | 1768 | if (strcmp(call->class->system, system->name) != 0) |
| 1639 | continue; | 1769 | continue; |
| 1640 | 1770 | ||
| @@ -1644,18 +1774,20 @@ static int replace_system_preds(struct event_subsystem *system, | |||
| 1644 | */ | 1774 | */ |
| 1645 | err = replace_preds(call, NULL, ps, filter_string, true); | 1775 | err = replace_preds(call, NULL, ps, filter_string, true); |
| 1646 | if (err) | 1776 | if (err) |
| 1647 | call->flags |= TRACE_EVENT_FL_NO_SET_FILTER; | 1777 | event_set_no_set_filter_flag(file); |
| 1648 | else | 1778 | else |
| 1649 | call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER; | 1779 | event_clear_no_set_filter_flag(file); |
| 1650 | } | 1780 | } |
| 1651 | 1781 | ||
| 1652 | list_for_each_entry(call, &ftrace_events, list) { | 1782 | list_for_each_entry(file, &tr->events, list) { |
| 1653 | struct event_filter *filter; | 1783 | struct event_filter *filter; |
| 1654 | 1784 | ||
| 1785 | call = file->event_call; | ||
| 1786 | |||
| 1655 | if (strcmp(call->class->system, system->name) != 0) | 1787 | if (strcmp(call->class->system, system->name) != 0) |
| 1656 | continue; | 1788 | continue; |
| 1657 | 1789 | ||
| 1658 | if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER) | 1790 | if (event_no_set_filter_flag(file)) |
| 1659 | continue; | 1791 | continue; |
| 1660 | 1792 | ||
| 1661 | filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); | 1793 | filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL); |
| @@ -1676,17 +1808,17 @@ static int replace_system_preds(struct event_subsystem *system, | |||
| 1676 | 1808 | ||
| 1677 | err = replace_preds(call, filter, ps, filter_string, false); | 1809 | err = replace_preds(call, filter, ps, filter_string, false); |
| 1678 | if (err) { | 1810 | if (err) { |
| 1679 | filter_disable(call); | 1811 | filter_disable(file); |
| 1680 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); | 1812 | parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0); |
| 1681 | append_filter_err(ps, filter); | 1813 | append_filter_err(ps, filter); |
| 1682 | } else | 1814 | } else |
| 1683 | call->flags |= TRACE_EVENT_FL_FILTERED; | 1815 | event_set_filtered_flag(file); |
| 1684 | /* | 1816 | /* |
| 1685 | * Regardless of if this returned an error, we still | 1817 | * Regardless of if this returned an error, we still |
| 1686 | * replace the filter for the call. | 1818 | * replace the filter for the call. |
| 1687 | */ | 1819 | */ |
| 1688 | filter = call->filter; | 1820 | filter = event_filter(file); |
| 1689 | rcu_assign_pointer(call->filter, filter_item->filter); | 1821 | event_set_filter(file, filter_item->filter); |
| 1690 | filter_item->filter = filter; | 1822 | filter_item->filter = filter; |
| 1691 | 1823 | ||
| 1692 | fail = false; | 1824 | fail = false; |
| @@ -1816,6 +1948,7 @@ static int create_filter(struct ftrace_event_call *call, | |||
| 1816 | * and always remembers @filter_str. | 1948 | * and always remembers @filter_str. |
| 1817 | */ | 1949 | */ |
| 1818 | static int create_system_filter(struct event_subsystem *system, | 1950 | static int create_system_filter(struct event_subsystem *system, |
| 1951 | struct trace_array *tr, | ||
| 1819 | char *filter_str, struct event_filter **filterp) | 1952 | char *filter_str, struct event_filter **filterp) |
| 1820 | { | 1953 | { |
| 1821 | struct event_filter *filter = NULL; | 1954 | struct event_filter *filter = NULL; |
| @@ -1824,7 +1957,7 @@ static int create_system_filter(struct event_subsystem *system, | |||
| 1824 | 1957 | ||
| 1825 | err = create_filter_start(filter_str, true, &ps, &filter); | 1958 | err = create_filter_start(filter_str, true, &ps, &filter); |
| 1826 | if (!err) { | 1959 | if (!err) { |
| 1827 | err = replace_system_preds(system, ps, filter_str); | 1960 | err = replace_system_preds(system, tr, ps, filter_str); |
| 1828 | if (!err) { | 1961 | if (!err) { |
| 1829 | /* System filters just show a default message */ | 1962 | /* System filters just show a default message */ |
| 1830 | kfree(filter->filter_string); | 1963 | kfree(filter->filter_string); |
| @@ -1840,20 +1973,25 @@ static int create_system_filter(struct event_subsystem *system, | |||
| 1840 | } | 1973 | } |
| 1841 | 1974 | ||
| 1842 | /* caller must hold event_mutex */ | 1975 | /* caller must hold event_mutex */ |
| 1843 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | 1976 | int apply_event_filter(struct ftrace_event_file *file, char *filter_string) |
| 1844 | { | 1977 | { |
| 1978 | struct ftrace_event_call *call = file->event_call; | ||
| 1845 | struct event_filter *filter; | 1979 | struct event_filter *filter; |
| 1846 | int err; | 1980 | int err; |
| 1847 | 1981 | ||
| 1848 | if (!strcmp(strstrip(filter_string), "0")) { | 1982 | if (!strcmp(strstrip(filter_string), "0")) { |
| 1849 | filter_disable(call); | 1983 | filter_disable(file); |
| 1850 | filter = call->filter; | 1984 | filter = event_filter(file); |
| 1985 | |||
| 1851 | if (!filter) | 1986 | if (!filter) |
| 1852 | return 0; | 1987 | return 0; |
| 1853 | RCU_INIT_POINTER(call->filter, NULL); | 1988 | |
| 1989 | event_clear_filter(file); | ||
| 1990 | |||
| 1854 | /* Make sure the filter is not being used */ | 1991 | /* Make sure the filter is not being used */ |
| 1855 | synchronize_sched(); | 1992 | synchronize_sched(); |
| 1856 | __free_filter(filter); | 1993 | __free_filter(filter); |
| 1994 | |||
| 1857 | return 0; | 1995 | return 0; |
| 1858 | } | 1996 | } |
| 1859 | 1997 | ||
| @@ -1866,14 +2004,15 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
| 1866 | * string | 2004 | * string |
| 1867 | */ | 2005 | */ |
| 1868 | if (filter) { | 2006 | if (filter) { |
| 1869 | struct event_filter *tmp = call->filter; | 2007 | struct event_filter *tmp; |
| 1870 | 2008 | ||
| 2009 | tmp = event_filter(file); | ||
| 1871 | if (!err) | 2010 | if (!err) |
| 1872 | call->flags |= TRACE_EVENT_FL_FILTERED; | 2011 | event_set_filtered_flag(file); |
| 1873 | else | 2012 | else |
| 1874 | filter_disable(call); | 2013 | filter_disable(file); |
| 1875 | 2014 | ||
| 1876 | rcu_assign_pointer(call->filter, filter); | 2015 | event_set_filter(file, filter); |
| 1877 | 2016 | ||
| 1878 | if (tmp) { | 2017 | if (tmp) { |
| 1879 | /* Make sure the call is done with the filter */ | 2018 | /* Make sure the call is done with the filter */ |
| @@ -1889,6 +2028,7 @@ int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | |||
| 1889 | char *filter_string) | 2028 | char *filter_string) |
| 1890 | { | 2029 | { |
| 1891 | struct event_subsystem *system = dir->subsystem; | 2030 | struct event_subsystem *system = dir->subsystem; |
| 2031 | struct trace_array *tr = dir->tr; | ||
| 1892 | struct event_filter *filter; | 2032 | struct event_filter *filter; |
| 1893 | int err = 0; | 2033 | int err = 0; |
| 1894 | 2034 | ||
| @@ -1901,18 +2041,18 @@ int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir, | |||
| 1901 | } | 2041 | } |
| 1902 | 2042 | ||
| 1903 | if (!strcmp(strstrip(filter_string), "0")) { | 2043 | if (!strcmp(strstrip(filter_string), "0")) { |
| 1904 | filter_free_subsystem_preds(system); | 2044 | filter_free_subsystem_preds(system, tr); |
| 1905 | remove_filter_string(system->filter); | 2045 | remove_filter_string(system->filter); |
| 1906 | filter = system->filter; | 2046 | filter = system->filter; |
| 1907 | system->filter = NULL; | 2047 | system->filter = NULL; |
| 1908 | /* Ensure all filters are no longer used */ | 2048 | /* Ensure all filters are no longer used */ |
| 1909 | synchronize_sched(); | 2049 | synchronize_sched(); |
| 1910 | filter_free_subsystem_filters(system); | 2050 | filter_free_subsystem_filters(system, tr); |
| 1911 | __free_filter(filter); | 2051 | __free_filter(filter); |
| 1912 | goto out_unlock; | 2052 | goto out_unlock; |
| 1913 | } | 2053 | } |
| 1914 | 2054 | ||
| 1915 | err = create_system_filter(system, filter_string, &filter); | 2055 | err = create_system_filter(system, tr, filter_string, &filter); |
| 1916 | if (filter) { | 2056 | if (filter) { |
| 1917 | /* | 2057 | /* |
| 1918 | * No event actually uses the system filter | 2058 | * No event actually uses the system filter |
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index d21a74670088..7c3e3e72e2b6 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
| @@ -180,7 +180,7 @@ struct ftrace_event_call __used event_##call = { \ | |||
| 180 | .event.type = etype, \ | 180 | .event.type = etype, \ |
| 181 | .class = &event_class_ftrace_##call, \ | 181 | .class = &event_class_ftrace_##call, \ |
| 182 | .print_fmt = print, \ | 182 | .print_fmt = print, \ |
| 183 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ | 183 | .flags = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \ |
| 184 | }; \ | 184 | }; \ |
| 185 | struct ftrace_event_call __used \ | 185 | struct ftrace_event_call __used \ |
| 186 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; | 186 | __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index b5c09242683d..0b99120d395c 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -82,9 +82,9 @@ static struct trace_array *graph_array; | |||
| 82 | * to fill in space into DURATION column. | 82 | * to fill in space into DURATION column. |
| 83 | */ | 83 | */ |
| 84 | enum { | 84 | enum { |
| 85 | DURATION_FILL_FULL = -1, | 85 | FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
| 86 | DURATION_FILL_START = -2, | 86 | FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
| 87 | DURATION_FILL_END = -3, | 87 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
| 88 | }; | 88 | }; |
| 89 | 89 | ||
| 90 | static enum print_line_t | 90 | static enum print_line_t |
| @@ -114,16 +114,37 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth, | |||
| 114 | return -EBUSY; | 114 | return -EBUSY; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | /* | ||
| 118 | * The curr_ret_stack is an index to ftrace return stack of | ||
| 119 | * current task. Its value should be in [0, FTRACE_RETFUNC_ | ||
| 120 | * DEPTH) when the function graph tracer is used. To support | ||
| 121 | * filtering out specific functions, it makes the index | ||
| 122 | * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH) | ||
| 123 | * so when it sees a negative index the ftrace will ignore | ||
| 124 | * the record. And the index gets recovered when returning | ||
| 125 | * from the filtered function by adding the FTRACE_NOTRACE_ | ||
| 126 | * DEPTH and then it'll continue to record functions normally. | ||
| 127 | * | ||
| 128 | * The curr_ret_stack is initialized to -1 and get increased | ||
| 129 | * in this function. So it can be less than -1 only if it was | ||
| 130 | * filtered out via ftrace_graph_notrace_addr() which can be | ||
| 131 | * set from set_graph_notrace file in debugfs by user. | ||
| 132 | */ | ||
| 133 | if (current->curr_ret_stack < -1) | ||
| 134 | return -EBUSY; | ||
| 135 | |||
| 117 | calltime = trace_clock_local(); | 136 | calltime = trace_clock_local(); |
| 118 | 137 | ||
| 119 | index = ++current->curr_ret_stack; | 138 | index = ++current->curr_ret_stack; |
| 139 | if (ftrace_graph_notrace_addr(func)) | ||
| 140 | current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH; | ||
| 120 | barrier(); | 141 | barrier(); |
| 121 | current->ret_stack[index].ret = ret; | 142 | current->ret_stack[index].ret = ret; |
| 122 | current->ret_stack[index].func = func; | 143 | current->ret_stack[index].func = func; |
| 123 | current->ret_stack[index].calltime = calltime; | 144 | current->ret_stack[index].calltime = calltime; |
| 124 | current->ret_stack[index].subtime = 0; | 145 | current->ret_stack[index].subtime = 0; |
| 125 | current->ret_stack[index].fp = frame_pointer; | 146 | current->ret_stack[index].fp = frame_pointer; |
| 126 | *depth = index; | 147 | *depth = current->curr_ret_stack; |
| 127 | 148 | ||
| 128 | return 0; | 149 | return 0; |
| 129 | } | 150 | } |
| @@ -137,7 +158,17 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, | |||
| 137 | 158 | ||
| 138 | index = current->curr_ret_stack; | 159 | index = current->curr_ret_stack; |
| 139 | 160 | ||
| 140 | if (unlikely(index < 0)) { | 161 | /* |
| 162 | * A negative index here means that it's just returned from a | ||
| 163 | * notrace'd function. Recover index to get an original | ||
| 164 | * return address. See ftrace_push_return_trace(). | ||
| 165 | * | ||
| 166 | * TODO: Need to check whether the stack gets corrupted. | ||
| 167 | */ | ||
| 168 | if (index < 0) | ||
| 169 | index += FTRACE_NOTRACE_DEPTH; | ||
| 170 | |||
| 171 | if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) { | ||
| 141 | ftrace_graph_stop(); | 172 | ftrace_graph_stop(); |
| 142 | WARN_ON(1); | 173 | WARN_ON(1); |
| 143 | /* Might as well panic, otherwise we have no where to go */ | 174 | /* Might as well panic, otherwise we have no where to go */ |
| @@ -193,6 +224,15 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer) | |||
| 193 | trace.rettime = trace_clock_local(); | 224 | trace.rettime = trace_clock_local(); |
| 194 | barrier(); | 225 | barrier(); |
| 195 | current->curr_ret_stack--; | 226 | current->curr_ret_stack--; |
| 227 | /* | ||
| 228 | * The curr_ret_stack can be less than -1 only if it was | ||
| 229 | * filtered out and it's about to return from the function. | ||
| 230 | * Recover the index and continue to trace normal functions. | ||
| 231 | */ | ||
| 232 | if (current->curr_ret_stack < -1) { | ||
| 233 | current->curr_ret_stack += FTRACE_NOTRACE_DEPTH; | ||
| 234 | return ret; | ||
| 235 | } | ||
| 196 | 236 | ||
| 197 | /* | 237 | /* |
| 198 | * The trace should run after decrementing the ret counter | 238 | * The trace should run after decrementing the ret counter |
| @@ -230,7 +270,7 @@ int __trace_graph_entry(struct trace_array *tr, | |||
| 230 | return 0; | 270 | return 0; |
| 231 | entry = ring_buffer_event_data(event); | 271 | entry = ring_buffer_event_data(event); |
| 232 | entry->graph_ent = *trace; | 272 | entry->graph_ent = *trace; |
| 233 | if (!filter_current_check_discard(buffer, call, entry, event)) | 273 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 234 | __buffer_unlock_commit(buffer, event); | 274 | __buffer_unlock_commit(buffer, event); |
| 235 | 275 | ||
| 236 | return 1; | 276 | return 1; |
| @@ -259,10 +299,20 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
| 259 | 299 | ||
| 260 | /* trace it when it is-nested-in or is a function enabled. */ | 300 | /* trace it when it is-nested-in or is a function enabled. */ |
| 261 | if ((!(trace->depth || ftrace_graph_addr(trace->func)) || | 301 | if ((!(trace->depth || ftrace_graph_addr(trace->func)) || |
| 262 | ftrace_graph_ignore_irqs()) || | 302 | ftrace_graph_ignore_irqs()) || (trace->depth < 0) || |
| 263 | (max_depth && trace->depth >= max_depth)) | 303 | (max_depth && trace->depth >= max_depth)) |
| 264 | return 0; | 304 | return 0; |
| 265 | 305 | ||
| 306 | /* | ||
| 307 | * Do not trace a function if it's filtered by set_graph_notrace. | ||
| 308 | * Make the index of ret stack negative to indicate that it should | ||
| 309 | * ignore further functions. But it needs its own ret stack entry | ||
| 310 | * to recover the original index in order to continue tracing after | ||
| 311 | * returning from the function. | ||
| 312 | */ | ||
| 313 | if (ftrace_graph_notrace_addr(trace->func)) | ||
| 314 | return 1; | ||
| 315 | |||
| 266 | local_irq_save(flags); | 316 | local_irq_save(flags); |
| 267 | cpu = raw_smp_processor_id(); | 317 | cpu = raw_smp_processor_id(); |
| 268 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); | 318 | data = per_cpu_ptr(tr->trace_buffer.data, cpu); |
| @@ -335,7 +385,7 @@ void __trace_graph_return(struct trace_array *tr, | |||
| 335 | return; | 385 | return; |
| 336 | entry = ring_buffer_event_data(event); | 386 | entry = ring_buffer_event_data(event); |
| 337 | entry->ret = *trace; | 387 | entry->ret = *trace; |
| 338 | if (!filter_current_check_discard(buffer, call, entry, event)) | 388 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 339 | __buffer_unlock_commit(buffer, event); | 389 | __buffer_unlock_commit(buffer, event); |
| 340 | } | 390 | } |
| 341 | 391 | ||
| @@ -652,7 +702,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
| 652 | } | 702 | } |
| 653 | 703 | ||
| 654 | /* No overhead */ | 704 | /* No overhead */ |
| 655 | ret = print_graph_duration(DURATION_FILL_START, s, flags); | 705 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_START); |
| 656 | if (ret != TRACE_TYPE_HANDLED) | 706 | if (ret != TRACE_TYPE_HANDLED) |
| 657 | return ret; | 707 | return ret; |
| 658 | 708 | ||
| @@ -664,7 +714,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
| 664 | if (!ret) | 714 | if (!ret) |
| 665 | return TRACE_TYPE_PARTIAL_LINE; | 715 | return TRACE_TYPE_PARTIAL_LINE; |
| 666 | 716 | ||
| 667 | ret = print_graph_duration(DURATION_FILL_END, s, flags); | 717 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_END); |
| 668 | if (ret != TRACE_TYPE_HANDLED) | 718 | if (ret != TRACE_TYPE_HANDLED) |
| 669 | return ret; | 719 | return ret; |
| 670 | 720 | ||
| @@ -729,14 +779,14 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
| 729 | return TRACE_TYPE_HANDLED; | 779 | return TRACE_TYPE_HANDLED; |
| 730 | 780 | ||
| 731 | /* No real adata, just filling the column with spaces */ | 781 | /* No real adata, just filling the column with spaces */ |
| 732 | switch (duration) { | 782 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { |
| 733 | case DURATION_FILL_FULL: | 783 | case FLAGS_FILL_FULL: |
| 734 | ret = trace_seq_puts(s, " | "); | 784 | ret = trace_seq_puts(s, " | "); |
| 735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 785 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
| 736 | case DURATION_FILL_START: | 786 | case FLAGS_FILL_START: |
| 737 | ret = trace_seq_puts(s, " "); | 787 | ret = trace_seq_puts(s, " "); |
| 738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 788 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
| 739 | case DURATION_FILL_END: | 789 | case FLAGS_FILL_END: |
| 740 | ret = trace_seq_puts(s, " |"); | 790 | ret = trace_seq_puts(s, " |"); |
| 741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 791 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
| 742 | } | 792 | } |
| @@ -852,7 +902,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 852 | } | 902 | } |
| 853 | 903 | ||
| 854 | /* No time */ | 904 | /* No time */ |
| 855 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); | 905 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); |
| 856 | if (ret != TRACE_TYPE_HANDLED) | 906 | if (ret != TRACE_TYPE_HANDLED) |
| 857 | return ret; | 907 | return ret; |
| 858 | 908 | ||
| @@ -1172,7 +1222,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
| 1172 | return TRACE_TYPE_PARTIAL_LINE; | 1222 | return TRACE_TYPE_PARTIAL_LINE; |
| 1173 | 1223 | ||
| 1174 | /* No time */ | 1224 | /* No time */ |
| 1175 | ret = print_graph_duration(DURATION_FILL_FULL, s, flags); | 1225 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); |
| 1176 | if (ret != TRACE_TYPE_HANDLED) | 1226 | if (ret != TRACE_TYPE_HANDLED) |
| 1177 | return ret; | 1227 | return ret; |
| 1178 | 1228 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 243f6834d026..dae9541ada9e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -835,7 +835,7 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs, | |||
| 835 | entry->ip = (unsigned long)tp->rp.kp.addr; | 835 | entry->ip = (unsigned long)tp->rp.kp.addr; |
| 836 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 836 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
| 837 | 837 | ||
| 838 | if (!filter_current_check_discard(buffer, call, entry, event)) | 838 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
| 839 | trace_buffer_unlock_commit_regs(buffer, event, | 839 | trace_buffer_unlock_commit_regs(buffer, event, |
| 840 | irq_flags, pc, regs); | 840 | irq_flags, pc, regs); |
| 841 | } | 841 | } |
| @@ -884,7 +884,7 @@ __kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
| 884 | entry->ret_ip = (unsigned long)ri->ret_addr; | 884 | entry->ret_ip = (unsigned long)ri->ret_addr; |
| 885 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); | 885 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
| 886 | 886 | ||
| 887 | if (!filter_current_check_discard(buffer, call, entry, event)) | 887 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
| 888 | trace_buffer_unlock_commit_regs(buffer, event, | 888 | trace_buffer_unlock_commit_regs(buffer, event, |
| 889 | irq_flags, pc, regs); | 889 | irq_flags, pc, regs); |
| 890 | } | 890 | } |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index b3dcfb2f0fef..0abd9b863474 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
| @@ -323,7 +323,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
| 323 | entry = ring_buffer_event_data(event); | 323 | entry = ring_buffer_event_data(event); |
| 324 | entry->rw = *rw; | 324 | entry->rw = *rw; |
| 325 | 325 | ||
| 326 | if (!filter_check_discard(call, entry, buffer, event)) | 326 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 327 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 327 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 328 | } | 328 | } |
| 329 | 329 | ||
| @@ -353,7 +353,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
| 353 | entry = ring_buffer_event_data(event); | 353 | entry = ring_buffer_event_data(event); |
| 354 | entry->map = *map; | 354 | entry->map = *map; |
| 355 | 355 | ||
| 356 | if (!filter_check_discard(call, entry, buffer, event)) | 356 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 357 | trace_buffer_unlock_commit(buffer, event, 0, pc); | 357 | trace_buffer_unlock_commit(buffer, event, 0, pc); |
| 358 | } | 358 | } |
| 359 | 359 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 34e7cbac0c9c..ed32284fbe32 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -618,8 +618,23 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
| 618 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | 618 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : |
| 619 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : | 619 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : |
| 620 | '.'; | 620 | '.'; |
| 621 | need_resched = | 621 | |
| 622 | (entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'; | 622 | switch (entry->flags & (TRACE_FLAG_NEED_RESCHED | |
| 623 | TRACE_FLAG_PREEMPT_RESCHED)) { | ||
| 624 | case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED: | ||
| 625 | need_resched = 'N'; | ||
| 626 | break; | ||
| 627 | case TRACE_FLAG_NEED_RESCHED: | ||
| 628 | need_resched = 'n'; | ||
| 629 | break; | ||
| 630 | case TRACE_FLAG_PREEMPT_RESCHED: | ||
| 631 | need_resched = 'p'; | ||
| 632 | break; | ||
| 633 | default: | ||
| 634 | need_resched = '.'; | ||
| 635 | break; | ||
| 636 | } | ||
| 637 | |||
| 623 | hardsoft_irq = | 638 | hardsoft_irq = |
| 624 | (hardirq && softirq) ? 'H' : | 639 | (hardirq && softirq) ? 'H' : |
| 625 | hardirq ? 'h' : | 640 | hardirq ? 'h' : |
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 4e98e3b257a3..3f34dc9b40f3 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -45,7 +45,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
| 45 | entry->next_state = next->state; | 45 | entry->next_state = next->state; |
| 46 | entry->next_cpu = task_cpu(next); | 46 | entry->next_cpu = task_cpu(next); |
| 47 | 47 | ||
| 48 | if (!filter_check_discard(call, entry, buffer, event)) | 48 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| @@ -101,7 +101,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
| 101 | entry->next_state = wakee->state; | 101 | entry->next_state = wakee->state; |
| 102 | entry->next_cpu = task_cpu(wakee); | 102 | entry->next_cpu = task_cpu(wakee); |
| 103 | 103 | ||
| 104 | if (!filter_check_discard(call, entry, buffer, event)) | 104 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 105 | trace_buffer_unlock_commit(buffer, event, flags, pc); | 105 | trace_buffer_unlock_commit(buffer, event, flags, pc); |
| 106 | } | 106 | } |
| 107 | 107 | ||
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 847f88a6194b..7af67360b330 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c | |||
| @@ -43,46 +43,15 @@ static DEFINE_MUTEX(all_stat_sessions_mutex); | |||
| 43 | /* The root directory for all stat files */ | 43 | /* The root directory for all stat files */ |
| 44 | static struct dentry *stat_dir; | 44 | static struct dentry *stat_dir; |
| 45 | 45 | ||
| 46 | /* | 46 | static void __reset_stat_session(struct stat_session *session) |
| 47 | * Iterate through the rbtree using a post order traversal path | ||
| 48 | * to release the next node. | ||
| 49 | * It won't necessary release one at each iteration | ||
| 50 | * but it will at least advance closer to the next one | ||
| 51 | * to be released. | ||
| 52 | */ | ||
| 53 | static struct rb_node *release_next(struct tracer_stat *ts, | ||
| 54 | struct rb_node *node) | ||
| 55 | { | 47 | { |
| 56 | struct stat_node *snode; | 48 | struct stat_node *snode, *n; |
| 57 | struct rb_node *parent = rb_parent(node); | ||
| 58 | |||
| 59 | if (node->rb_left) | ||
| 60 | return node->rb_left; | ||
| 61 | else if (node->rb_right) | ||
| 62 | return node->rb_right; | ||
| 63 | else { | ||
| 64 | if (!parent) | ||
| 65 | ; | ||
| 66 | else if (parent->rb_left == node) | ||
| 67 | parent->rb_left = NULL; | ||
| 68 | else | ||
| 69 | parent->rb_right = NULL; | ||
| 70 | 49 | ||
| 71 | snode = container_of(node, struct stat_node, node); | 50 | rbtree_postorder_for_each_entry_safe(snode, n, &session->stat_root, node) { |
| 72 | if (ts->stat_release) | 51 | if (session->ts->stat_release) |
| 73 | ts->stat_release(snode->stat); | 52 | session->ts->stat_release(snode->stat); |
| 74 | kfree(snode); | 53 | kfree(snode); |
| 75 | |||
| 76 | return parent; | ||
| 77 | } | 54 | } |
| 78 | } | ||
| 79 | |||
| 80 | static void __reset_stat_session(struct stat_session *session) | ||
| 81 | { | ||
| 82 | struct rb_node *node = session->stat_root.rb_node; | ||
| 83 | |||
| 84 | while (node) | ||
| 85 | node = release_next(session->ts, node); | ||
| 86 | 55 | ||
| 87 | session->stat_root = RB_ROOT; | 56 | session->stat_root = RB_ROOT; |
| 88 | } | 57 | } |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 559329d9bd2f..e4b6d11bdf78 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -302,6 +302,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call) | |||
| 302 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | 302 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) |
| 303 | { | 303 | { |
| 304 | struct trace_array *tr = data; | 304 | struct trace_array *tr = data; |
| 305 | struct ftrace_event_file *ftrace_file; | ||
| 305 | struct syscall_trace_enter *entry; | 306 | struct syscall_trace_enter *entry; |
| 306 | struct syscall_metadata *sys_data; | 307 | struct syscall_metadata *sys_data; |
| 307 | struct ring_buffer_event *event; | 308 | struct ring_buffer_event *event; |
| @@ -314,7 +315,13 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 314 | syscall_nr = trace_get_syscall_nr(current, regs); | 315 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 315 | if (syscall_nr < 0) | 316 | if (syscall_nr < 0) |
| 316 | return; | 317 | return; |
| 317 | if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) | 318 | |
| 319 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ | ||
| 320 | ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); | ||
| 321 | if (!ftrace_file) | ||
| 322 | return; | ||
| 323 | |||
| 324 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
| 318 | return; | 325 | return; |
| 319 | 326 | ||
| 320 | sys_data = syscall_nr_to_meta(syscall_nr); | 327 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -336,8 +343,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 336 | entry->nr = syscall_nr; | 343 | entry->nr = syscall_nr; |
| 337 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 344 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
| 338 | 345 | ||
| 339 | if (!filter_current_check_discard(buffer, sys_data->enter_event, | 346 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
| 340 | entry, event)) | ||
| 341 | trace_current_buffer_unlock_commit(buffer, event, | 347 | trace_current_buffer_unlock_commit(buffer, event, |
| 342 | irq_flags, pc); | 348 | irq_flags, pc); |
| 343 | } | 349 | } |
| @@ -345,6 +351,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 345 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | 351 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
| 346 | { | 352 | { |
| 347 | struct trace_array *tr = data; | 353 | struct trace_array *tr = data; |
| 354 | struct ftrace_event_file *ftrace_file; | ||
| 348 | struct syscall_trace_exit *entry; | 355 | struct syscall_trace_exit *entry; |
| 349 | struct syscall_metadata *sys_data; | 356 | struct syscall_metadata *sys_data; |
| 350 | struct ring_buffer_event *event; | 357 | struct ring_buffer_event *event; |
| @@ -356,7 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
| 356 | syscall_nr = trace_get_syscall_nr(current, regs); | 363 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 357 | if (syscall_nr < 0) | 364 | if (syscall_nr < 0) |
| 358 | return; | 365 | return; |
| 359 | if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) | 366 | |
| 367 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ | ||
| 368 | ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); | ||
| 369 | if (!ftrace_file) | ||
| 370 | return; | ||
| 371 | |||
| 372 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
| 360 | return; | 373 | return; |
| 361 | 374 | ||
| 362 | sys_data = syscall_nr_to_meta(syscall_nr); | 375 | sys_data = syscall_nr_to_meta(syscall_nr); |
| @@ -377,8 +390,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
| 377 | entry->nr = syscall_nr; | 390 | entry->nr = syscall_nr; |
| 378 | entry->ret = syscall_get_return_value(current, regs); | 391 | entry->ret = syscall_get_return_value(current, regs); |
| 379 | 392 | ||
| 380 | if (!filter_current_check_discard(buffer, sys_data->exit_event, | 393 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
| 381 | entry, event)) | ||
| 382 | trace_current_buffer_unlock_commit(buffer, event, | 394 | trace_current_buffer_unlock_commit(buffer, event, |
| 383 | irq_flags, pc); | 395 | irq_flags, pc); |
| 384 | } | 396 | } |
| @@ -397,7 +409,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file, | |||
| 397 | if (!tr->sys_refcount_enter) | 409 | if (!tr->sys_refcount_enter) |
| 398 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); | 410 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); |
| 399 | if (!ret) { | 411 | if (!ret) { |
| 400 | set_bit(num, tr->enabled_enter_syscalls); | 412 | rcu_assign_pointer(tr->enter_syscall_files[num], file); |
| 401 | tr->sys_refcount_enter++; | 413 | tr->sys_refcount_enter++; |
| 402 | } | 414 | } |
| 403 | mutex_unlock(&syscall_trace_lock); | 415 | mutex_unlock(&syscall_trace_lock); |
| @@ -415,10 +427,15 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, | |||
| 415 | return; | 427 | return; |
| 416 | mutex_lock(&syscall_trace_lock); | 428 | mutex_lock(&syscall_trace_lock); |
| 417 | tr->sys_refcount_enter--; | 429 | tr->sys_refcount_enter--; |
| 418 | clear_bit(num, tr->enabled_enter_syscalls); | 430 | rcu_assign_pointer(tr->enter_syscall_files[num], NULL); |
| 419 | if (!tr->sys_refcount_enter) | 431 | if (!tr->sys_refcount_enter) |
| 420 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); | 432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
| 421 | mutex_unlock(&syscall_trace_lock); | 433 | mutex_unlock(&syscall_trace_lock); |
| 434 | /* | ||
| 435 | * Callers expect the event to be completely disabled on | ||
| 436 | * return, so wait for current handlers to finish. | ||
| 437 | */ | ||
| 438 | synchronize_sched(); | ||
| 422 | } | 439 | } |
| 423 | 440 | ||
| 424 | static int reg_event_syscall_exit(struct ftrace_event_file *file, | 441 | static int reg_event_syscall_exit(struct ftrace_event_file *file, |
| @@ -435,7 +452,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file, | |||
| 435 | if (!tr->sys_refcount_exit) | 452 | if (!tr->sys_refcount_exit) |
| 436 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); | 453 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); |
| 437 | if (!ret) { | 454 | if (!ret) { |
| 438 | set_bit(num, tr->enabled_exit_syscalls); | 455 | rcu_assign_pointer(tr->exit_syscall_files[num], file); |
| 439 | tr->sys_refcount_exit++; | 456 | tr->sys_refcount_exit++; |
| 440 | } | 457 | } |
| 441 | mutex_unlock(&syscall_trace_lock); | 458 | mutex_unlock(&syscall_trace_lock); |
| @@ -453,10 +470,15 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
| 453 | return; | 470 | return; |
| 454 | mutex_lock(&syscall_trace_lock); | 471 | mutex_lock(&syscall_trace_lock); |
| 455 | tr->sys_refcount_exit--; | 472 | tr->sys_refcount_exit--; |
| 456 | clear_bit(num, tr->enabled_exit_syscalls); | 473 | rcu_assign_pointer(tr->exit_syscall_files[num], NULL); |
| 457 | if (!tr->sys_refcount_exit) | 474 | if (!tr->sys_refcount_exit) |
| 458 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); | 475 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
| 459 | mutex_unlock(&syscall_trace_lock); | 476 | mutex_unlock(&syscall_trace_lock); |
| 477 | /* | ||
| 478 | * Callers expect the event to be completely disabled on | ||
| 479 | * return, so wait for current handlers to finish. | ||
| 480 | */ | ||
| 481 | synchronize_sched(); | ||
| 460 | } | 482 | } |
| 461 | 483 | ||
| 462 | static int __init init_syscall_trace(struct ftrace_event_call *call) | 484 | static int __init init_syscall_trace(struct ftrace_event_call *call) |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 272261b5f94f..b6dcc42ef7f5 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -128,6 +128,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
| 128 | if (is_ret) | 128 | if (is_ret) |
| 129 | tu->consumer.ret_handler = uretprobe_dispatcher; | 129 | tu->consumer.ret_handler = uretprobe_dispatcher; |
| 130 | init_trace_uprobe_filter(&tu->filter); | 130 | init_trace_uprobe_filter(&tu->filter); |
| 131 | tu->call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER; | ||
| 131 | return tu; | 132 | return tu; |
| 132 | 133 | ||
| 133 | error: | 134 | error: |
| @@ -561,7 +562,7 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
| 561 | for (i = 0; i < tu->nr_args; i++) | 562 | for (i = 0; i < tu->nr_args; i++) |
| 562 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | 563 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); |
| 563 | 564 | ||
| 564 | if (!filter_current_check_discard(buffer, call, entry, event)) | 565 | if (!call_filter_check_discard(call, entry, buffer, event)) |
| 565 | trace_buffer_unlock_commit(buffer, event, 0, 0); | 566 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 566 | } | 567 | } |
| 567 | 568 | ||
