aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c313
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace.h15
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_functions_graph.c2
5 files changed, 148 insertions, 185 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 900dbb1efff2..8b488f4dd8e8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -89,16 +89,16 @@ struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
89/* What to set function_trace_op to */ 89/* What to set function_trace_op to */
90static struct ftrace_ops *set_function_trace_op; 90static struct ftrace_ops *set_function_trace_op;
91 91
92/* List for set_ftrace_pid's pids. */ 92static bool ftrace_pids_enabled(struct ftrace_ops *ops)
93LIST_HEAD(ftrace_pids);
94struct ftrace_pid {
95 struct list_head list;
96 struct pid *pid;
97};
98
99static bool ftrace_pids_enabled(void)
100{ 93{
101 return !list_empty(&ftrace_pids); 94 struct trace_array *tr;
95
96 if (!(ops->flags & FTRACE_OPS_FL_PID) || !ops->private)
97 return false;
98
99 tr = ops->private;
100
101 return tr->function_pids != NULL;
102} 102}
103 103
104static void ftrace_update_trampoline(struct ftrace_ops *ops); 104static void ftrace_update_trampoline(struct ftrace_ops *ops);
@@ -179,7 +179,9 @@ int ftrace_nr_registered_ops(void)
179static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, 179static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
180 struct ftrace_ops *op, struct pt_regs *regs) 180 struct ftrace_ops *op, struct pt_regs *regs)
181{ 181{
182 if (!test_tsk_trace_trace(current)) 182 struct trace_array *tr = op->private;
183
184 if (tr && this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid))
183 return; 185 return;
184 186
185 op->saved_func(ip, parent_ip, op, regs); 187 op->saved_func(ip, parent_ip, op, regs);
@@ -417,7 +419,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
417 /* Always save the function, and reset at unregistering */ 419 /* Always save the function, and reset at unregistering */
418 ops->saved_func = ops->func; 420 ops->saved_func = ops->func;
419 421
420 if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled()) 422 if (ftrace_pids_enabled(ops))
421 ops->func = ftrace_pid_func; 423 ops->func = ftrace_pid_func;
422 424
423 ftrace_update_trampoline(ops); 425 ftrace_update_trampoline(ops);
@@ -450,7 +452,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
450 452
451static void ftrace_update_pid_func(void) 453static void ftrace_update_pid_func(void)
452{ 454{
453 bool enabled = ftrace_pids_enabled();
454 struct ftrace_ops *op; 455 struct ftrace_ops *op;
455 456
456 /* Only do something if we are tracing something */ 457 /* Only do something if we are tracing something */
@@ -459,8 +460,8 @@ static void ftrace_update_pid_func(void)
459 460
460 do_for_each_ftrace_op(op, ftrace_ops_list) { 461 do_for_each_ftrace_op(op, ftrace_ops_list) {
461 if (op->flags & FTRACE_OPS_FL_PID) { 462 if (op->flags & FTRACE_OPS_FL_PID) {
462 op->func = enabled ? ftrace_pid_func : 463 op->func = ftrace_pids_enabled(op) ?
463 op->saved_func; 464 ftrace_pid_func : op->saved_func;
464 ftrace_update_trampoline(op); 465 ftrace_update_trampoline(op);
465 } 466 }
466 } while_for_each_ftrace_op(op); 467 } while_for_each_ftrace_op(op);
@@ -5324,179 +5325,99 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5324 return ops->func; 5325 return ops->func;
5325} 5326}
5326 5327
5327static void clear_ftrace_swapper(void) 5328static void
5329ftrace_filter_pid_sched_switch_probe(void *data, bool preempt,
5330 struct task_struct *prev, struct task_struct *next)
5328{ 5331{
5329 struct task_struct *p; 5332 struct trace_array *tr = data;
5330 int cpu; 5333 struct trace_pid_list *pid_list;
5331 5334
5332 get_online_cpus(); 5335 pid_list = rcu_dereference_sched(tr->function_pids);
5333 for_each_online_cpu(cpu) {
5334 p = idle_task(cpu);
5335 clear_tsk_trace_trace(p);
5336 }
5337 put_online_cpus();
5338}
5339
5340static void set_ftrace_swapper(void)
5341{
5342 struct task_struct *p;
5343 int cpu;
5344 5336
5345 get_online_cpus(); 5337 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
5346 for_each_online_cpu(cpu) { 5338 trace_ignore_this_task(pid_list, next));
5347 p = idle_task(cpu);
5348 set_tsk_trace_trace(p);
5349 }
5350 put_online_cpus();
5351} 5339}
5352 5340
5353static void clear_ftrace_pid(struct pid *pid) 5341static void clear_ftrace_pids(struct trace_array *tr)
5354{ 5342{
5355 struct task_struct *p; 5343 struct trace_pid_list *pid_list;
5344 int cpu;
5356 5345
5357 rcu_read_lock(); 5346 pid_list = rcu_dereference_protected(tr->function_pids,
5358 do_each_pid_task(pid, PIDTYPE_PID, p) { 5347 lockdep_is_held(&ftrace_lock));
5359 clear_tsk_trace_trace(p); 5348 if (!pid_list)
5360 } while_each_pid_task(pid, PIDTYPE_PID, p); 5349 return;
5361 rcu_read_unlock();
5362 5350
5363 put_pid(pid); 5351 unregister_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5364}
5365 5352
5366static void set_ftrace_pid(struct pid *pid) 5353 for_each_possible_cpu(cpu)
5367{ 5354 per_cpu_ptr(tr->trace_buffer.data, cpu)->ftrace_ignore_pid = false;
5368 struct task_struct *p;
5369 5355
5370 rcu_read_lock(); 5356 rcu_assign_pointer(tr->function_pids, NULL);
5371 do_each_pid_task(pid, PIDTYPE_PID, p) {
5372 set_tsk_trace_trace(p);
5373 } while_each_pid_task(pid, PIDTYPE_PID, p);
5374 rcu_read_unlock();
5375}
5376 5357
5377static void clear_ftrace_pid_task(struct pid *pid) 5358 /* Wait till all users are no longer using pid filtering */
5378{ 5359 synchronize_sched();
5379 if (pid == ftrace_swapper_pid)
5380 clear_ftrace_swapper();
5381 else
5382 clear_ftrace_pid(pid);
5383}
5384 5360
5385static void set_ftrace_pid_task(struct pid *pid) 5361 trace_free_pid_list(pid_list);
5386{
5387 if (pid == ftrace_swapper_pid)
5388 set_ftrace_swapper();
5389 else
5390 set_ftrace_pid(pid);
5391} 5362}
5392 5363
5393static int ftrace_pid_add(int p) 5364static void ftrace_pid_reset(struct trace_array *tr)
5394{ 5365{
5395 struct pid *pid;
5396 struct ftrace_pid *fpid;
5397 int ret = -EINVAL;
5398
5399 mutex_lock(&ftrace_lock); 5366 mutex_lock(&ftrace_lock);
5400 5367 clear_ftrace_pids(tr);
5401 if (!p)
5402 pid = ftrace_swapper_pid;
5403 else
5404 pid = find_get_pid(p);
5405
5406 if (!pid)
5407 goto out;
5408
5409 ret = 0;
5410
5411 list_for_each_entry(fpid, &ftrace_pids, list)
5412 if (fpid->pid == pid)
5413 goto out_put;
5414
5415 ret = -ENOMEM;
5416
5417 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5418 if (!fpid)
5419 goto out_put;
5420
5421 list_add(&fpid->list, &ftrace_pids);
5422 fpid->pid = pid;
5423
5424 set_ftrace_pid_task(pid);
5425 5368
5426 ftrace_update_pid_func(); 5369 ftrace_update_pid_func();
5427
5428 ftrace_startup_all(0); 5370 ftrace_startup_all(0);
5429 5371
5430 mutex_unlock(&ftrace_lock); 5372 mutex_unlock(&ftrace_lock);
5431 return 0;
5432
5433out_put:
5434 if (pid != ftrace_swapper_pid)
5435 put_pid(pid);
5436
5437out:
5438 mutex_unlock(&ftrace_lock);
5439 return ret;
5440} 5373}
5441 5374
5442static void ftrace_pid_reset(void) 5375/* Greater than any max PID */
5443{ 5376#define FTRACE_NO_PIDS (void *)(PID_MAX_LIMIT + 1)
5444 struct ftrace_pid *fpid, *safe;
5445
5446 mutex_lock(&ftrace_lock);
5447 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5448 struct pid *pid = fpid->pid;
5449
5450 clear_ftrace_pid_task(pid);
5451
5452 list_del(&fpid->list);
5453 kfree(fpid);
5454 }
5455
5456 ftrace_update_pid_func();
5457 ftrace_startup_all(0);
5458
5459 mutex_unlock(&ftrace_lock);
5460}
5461 5377
5462static void *fpid_start(struct seq_file *m, loff_t *pos) 5378static void *fpid_start(struct seq_file *m, loff_t *pos)
5379 __acquires(RCU)
5463{ 5380{
5381 struct trace_pid_list *pid_list;
5382 struct trace_array *tr = m->private;
5383
5464 mutex_lock(&ftrace_lock); 5384 mutex_lock(&ftrace_lock);
5385 rcu_read_lock_sched();
5465 5386
5466 if (!ftrace_pids_enabled() && (!*pos)) 5387 pid_list = rcu_dereference_sched(tr->function_pids);
5467 return (void *) 1;
5468 5388
5469 return seq_list_start(&ftrace_pids, *pos); 5389 if (!pid_list)
5390 return !(*pos) ? FTRACE_NO_PIDS : NULL;
5391
5392 return trace_pid_start(pid_list, pos);
5470} 5393}
5471 5394
5472static void *fpid_next(struct seq_file *m, void *v, loff_t *pos) 5395static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5473{ 5396{
5474 if (v == (void *)1) 5397 struct trace_array *tr = m->private;
5398 struct trace_pid_list *pid_list = rcu_dereference_sched(tr->function_pids);
5399
5400 if (v == FTRACE_NO_PIDS)
5475 return NULL; 5401 return NULL;
5476 5402
5477 return seq_list_next(v, &ftrace_pids, pos); 5403 return trace_pid_next(pid_list, v, pos);
5478} 5404}
5479 5405
5480static void fpid_stop(struct seq_file *m, void *p) 5406static void fpid_stop(struct seq_file *m, void *p)
5407 __releases(RCU)
5481{ 5408{
5409 rcu_read_unlock_sched();
5482 mutex_unlock(&ftrace_lock); 5410 mutex_unlock(&ftrace_lock);
5483} 5411}
5484 5412
5485static int fpid_show(struct seq_file *m, void *v) 5413static int fpid_show(struct seq_file *m, void *v)
5486{ 5414{
5487 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); 5415 if (v == FTRACE_NO_PIDS) {
5488
5489 if (v == (void *)1) {
5490 seq_puts(m, "no pid\n"); 5416 seq_puts(m, "no pid\n");
5491 return 0; 5417 return 0;
5492 } 5418 }
5493 5419
5494 if (fpid->pid == ftrace_swapper_pid) 5420 return trace_pid_show(m, v);
5495 seq_puts(m, "swapper tasks\n");
5496 else
5497 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5498
5499 return 0;
5500} 5421}
5501 5422
5502static const struct seq_operations ftrace_pid_sops = { 5423static const struct seq_operations ftrace_pid_sops = {
@@ -5509,58 +5430,103 @@ static const struct seq_operations ftrace_pid_sops = {
5509static int 5430static int
5510ftrace_pid_open(struct inode *inode, struct file *file) 5431ftrace_pid_open(struct inode *inode, struct file *file)
5511{ 5432{
5433 struct trace_array *tr = inode->i_private;
5434 struct seq_file *m;
5512 int ret = 0; 5435 int ret = 0;
5513 5436
5437 if (trace_array_get(tr) < 0)
5438 return -ENODEV;
5439
5514 if ((file->f_mode & FMODE_WRITE) && 5440 if ((file->f_mode & FMODE_WRITE) &&
5515 (file->f_flags & O_TRUNC)) 5441 (file->f_flags & O_TRUNC))
5516 ftrace_pid_reset(); 5442 ftrace_pid_reset(tr);
5517 5443
5518 if (file->f_mode & FMODE_READ) 5444 ret = seq_open(file, &ftrace_pid_sops);
5519 ret = seq_open(file, &ftrace_pid_sops); 5445 if (ret < 0) {
5446 trace_array_put(tr);
5447 } else {
5448 m = file->private_data;
5449 /* copy tr over to seq ops */
5450 m->private = tr;
5451 }
5520 5452
5521 return ret; 5453 return ret;
5522} 5454}
5523 5455
5456static void ignore_task_cpu(void *data)
5457{
5458 struct trace_array *tr = data;
5459 struct trace_pid_list *pid_list;
5460
5461 /*
5462 * This function is called by on_each_cpu() while the
5463 * event_mutex is held.
5464 */
5465 pid_list = rcu_dereference_protected(tr->function_pids,
5466 mutex_is_locked(&ftrace_lock));
5467
5468 this_cpu_write(tr->trace_buffer.data->ftrace_ignore_pid,
5469 trace_ignore_this_task(pid_list, current));
5470}
5471
5524static ssize_t 5472static ssize_t
5525ftrace_pid_write(struct file *filp, const char __user *ubuf, 5473ftrace_pid_write(struct file *filp, const char __user *ubuf,
5526 size_t cnt, loff_t *ppos) 5474 size_t cnt, loff_t *ppos)
5527{ 5475{
5528 char buf[64], *tmp; 5476 struct seq_file *m = filp->private_data;
5529 long val; 5477 struct trace_array *tr = m->private;
5530 int ret; 5478 struct trace_pid_list *filtered_pids = NULL;
5479 struct trace_pid_list *pid_list;
5480 ssize_t ret;
5531 5481
5532 if (cnt >= sizeof(buf)) 5482 if (!cnt)
5533 return -EINVAL; 5483 return 0;
5484
5485 mutex_lock(&ftrace_lock);
5486
5487 filtered_pids = rcu_dereference_protected(tr->function_pids,
5488 lockdep_is_held(&ftrace_lock));
5489
5490 ret = trace_pid_write(filtered_pids, &pid_list, ubuf, cnt);
5491 if (ret < 0)
5492 goto out;
5534 5493
5535 if (copy_from_user(&buf, ubuf, cnt)) 5494 rcu_assign_pointer(tr->function_pids, pid_list);
5536 return -EFAULT;
5537 5495
5538 buf[cnt] = 0; 5496 if (filtered_pids) {
5497 synchronize_sched();
5498 trace_free_pid_list(filtered_pids);
5499 } else if (pid_list) {
5500 /* Register a probe to set whether to ignore the tracing of a task */
5501 register_trace_sched_switch(ftrace_filter_pid_sched_switch_probe, tr);
5502 }
5539 5503
5540 /* 5504 /*
5541 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid" 5505 * Ignoring of pids is done at task switch. But we have to
5542 * to clean the filter quietly. 5506 * check for those tasks that are currently running.
5507 * Always do this in case a pid was appended or removed.
5543 */ 5508 */
5544 tmp = strstrip(buf); 5509 on_each_cpu(ignore_task_cpu, tr, 1);
5545 if (strlen(tmp) == 0)
5546 return 1;
5547 5510
5548 ret = kstrtol(tmp, 10, &val); 5511 ftrace_update_pid_func();
5549 if (ret < 0) 5512 ftrace_startup_all(0);
5550 return ret; 5513 out:
5514 mutex_unlock(&ftrace_lock);
5551 5515
5552 ret = ftrace_pid_add(val); 5516 if (ret > 0)
5517 *ppos += ret;
5553 5518
5554 return ret ? ret : cnt; 5519 return ret;
5555} 5520}
5556 5521
5557static int 5522static int
5558ftrace_pid_release(struct inode *inode, struct file *file) 5523ftrace_pid_release(struct inode *inode, struct file *file)
5559{ 5524{
5560 if (file->f_mode & FMODE_READ) 5525 struct trace_array *tr = inode->i_private;
5561 seq_release(inode, file);
5562 5526
5563 return 0; 5527 trace_array_put(tr);
5528
5529 return seq_release(inode, file);
5564} 5530}
5565 5531
5566static const struct file_operations ftrace_pid_fops = { 5532static const struct file_operations ftrace_pid_fops = {
@@ -5571,24 +5537,17 @@ static const struct file_operations ftrace_pid_fops = {
5571 .release = ftrace_pid_release, 5537 .release = ftrace_pid_release,
5572}; 5538};
5573 5539
5574static __init int ftrace_init_tracefs(void) 5540void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer)
5575{ 5541{
5576 struct dentry *d_tracer; 5542 /* Only the top level directory has the dyn_tracefs and profile */
5577 5543 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5578 d_tracer = tracing_init_dentry(); 5544 ftrace_init_dyn_tracefs(d_tracer);
5579 if (IS_ERR(d_tracer)) 5545 ftrace_profile_tracefs(d_tracer);
5580 return 0; 5546 }
5581
5582 ftrace_init_dyn_tracefs(d_tracer);
5583 5547
5584 trace_create_file("set_ftrace_pid", 0644, d_tracer, 5548 trace_create_file("set_ftrace_pid", 0644, d_tracer,
5585 NULL, &ftrace_pid_fops); 5549 tr, &ftrace_pid_fops);
5586
5587 ftrace_profile_tracefs(d_tracer);
5588
5589 return 0;
5590} 5550}
5591fs_initcall(ftrace_init_tracefs);
5592 5551
5593/** 5552/**
5594 * ftrace_kill - kill ftrace 5553 * ftrace_kill - kill ftrace
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index a8bb7485fd1d..aa240551fc5d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -7233,6 +7233,7 @@ init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7233 for_each_tracing_cpu(cpu) 7233 for_each_tracing_cpu(cpu)
7234 tracing_init_tracefs_percpu(tr, cpu); 7234 tracing_init_tracefs_percpu(tr, cpu);
7235 7235
7236 ftrace_init_tracefs(tr, d_tracer);
7236} 7237}
7237 7238
7238static struct vfsmount *trace_automount(void *ingore) 7239static struct vfsmount *trace_automount(void *ingore)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index a4dce1ef9e03..eaee458755a4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -156,6 +156,9 @@ struct trace_array_cpu {
156 char comm[TASK_COMM_LEN]; 156 char comm[TASK_COMM_LEN];
157 157
158 bool ignore_pid; 158 bool ignore_pid;
159#ifdef CONFIG_FUNCTION_TRACER
160 bool ftrace_ignore_pid;
161#endif
159}; 162};
160 163
161struct tracer; 164struct tracer;
@@ -247,6 +250,7 @@ struct trace_array {
247 int ref; 250 int ref;
248#ifdef CONFIG_FUNCTION_TRACER 251#ifdef CONFIG_FUNCTION_TRACER
249 struct ftrace_ops *ops; 252 struct ftrace_ops *ops;
253 struct trace_pid_list __rcu *function_pids;
250 /* function tracing enabled */ 254 /* function tracing enabled */
251 int function_enabled; 255 int function_enabled;
252#endif 256#endif
@@ -840,12 +844,9 @@ extern struct list_head ftrace_pids;
840 844
841#ifdef CONFIG_FUNCTION_TRACER 845#ifdef CONFIG_FUNCTION_TRACER
842extern bool ftrace_filter_param __initdata; 846extern bool ftrace_filter_param __initdata;
843static inline int ftrace_trace_task(struct task_struct *task) 847static inline int ftrace_trace_task(struct trace_array *tr)
844{ 848{
845 if (list_empty(&ftrace_pids)) 849 return !this_cpu_read(tr->trace_buffer.data->ftrace_ignore_pid);
846 return 1;
847
848 return test_tsk_trace_trace(task);
849} 850}
850extern int ftrace_is_dead(void); 851extern int ftrace_is_dead(void);
851int ftrace_create_function_files(struct trace_array *tr, 852int ftrace_create_function_files(struct trace_array *tr,
@@ -855,8 +856,9 @@ void ftrace_init_global_array_ops(struct trace_array *tr);
855void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func); 856void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func);
856void ftrace_reset_array_ops(struct trace_array *tr); 857void ftrace_reset_array_ops(struct trace_array *tr);
857int using_ftrace_ops_list_func(void); 858int using_ftrace_ops_list_func(void);
859void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer);
858#else 860#else
859static inline int ftrace_trace_task(struct task_struct *task) 861static inline int ftrace_trace_task(struct trace_array *tr)
860{ 862{
861 return 1; 863 return 1;
862} 864}
@@ -871,6 +873,7 @@ static inline void ftrace_destroy_function_files(struct trace_array *tr) { }
871static inline __init void 873static inline __init void
872ftrace_init_global_array_ops(struct trace_array *tr) { } 874ftrace_init_global_array_ops(struct trace_array *tr) { }
873static inline void ftrace_reset_array_ops(struct trace_array *tr) { } 875static inline void ftrace_reset_array_ops(struct trace_array *tr) { }
876static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { }
874/* ftace_func_t type is not defined, use macro instead of static inline */ 877/* ftace_func_t type is not defined, use macro instead of static inline */
875#define ftrace_init_array_ops(tr, func) do { } while (0) 878#define ftrace_init_array_ops(tr, func) do { } while (0)
876#endif /* CONFIG_FUNCTION_TRACER */ 879#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 5a095c2e4b69..0efa00d80623 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -43,7 +43,7 @@ static int allocate_ftrace_ops(struct trace_array *tr)
43 43
44 /* Currently only the non stack verision is supported */ 44 /* Currently only the non stack verision is supported */
45 ops->func = function_trace_call; 45 ops->func = function_trace_call;
46 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE; 46 ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
47 47
48 tr->ops = ops; 48 tr->ops = ops;
49 ops->private = tr; 49 ops->private = tr;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 3a0244ff7ea8..67cce7896aeb 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -319,7 +319,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
319 int cpu; 319 int cpu;
320 int pc; 320 int pc;
321 321
322 if (!ftrace_trace_task(current)) 322 if (!ftrace_trace_task(tr))
323 return 0; 323 return 0;
324 324
325 /* trace it when it is-nested-in or is a function enabled. */ 325 /* trace it when it is-nested-in or is a function enabled. */