aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/ftrace.h1
-rw-r--r--arch/x86/include/asm/thread_info.h29
-rw-r--r--arch/x86/kernel/ftrace.c29
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/sched.h23
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/trace/ftrace.c96
9 files changed, 137 insertions, 58 deletions
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 2bb43b433e07..754a3e082f94 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -29,7 +29,6 @@ struct dyn_arch_ftrace {
29#endif /* CONFIG_FUNCTION_TRACER */ 29#endif /* CONFIG_FUNCTION_TRACER */
30 30
31#ifdef CONFIG_FUNCTION_RET_TRACER 31#ifdef CONFIG_FUNCTION_RET_TRACER
32#define FTRACE_RET_STACK_SIZE 20
33 32
34#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
35 34
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index e90e81ef6ab9..0921b4018c11 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -40,36 +40,8 @@ struct thread_info {
40 */ 40 */
41 __u8 supervisor_stack[0]; 41 __u8 supervisor_stack[0];
42#endif 42#endif
43
44#ifdef CONFIG_FUNCTION_RET_TRACER
45 /* Index of current stored adress in ret_stack */
46 int curr_ret_stack;
47 /* Stack of return addresses for return function tracing */
48 struct ftrace_ret_stack ret_stack[FTRACE_RET_STACK_SIZE];
49 /*
50 * Number of functions that haven't been traced
51 * because of depth overrun.
52 */
53 atomic_t trace_overrun;
54#endif
55}; 43};
56 44
57#ifdef CONFIG_FUNCTION_RET_TRACER
58#define INIT_THREAD_INFO(tsk) \
59{ \
60 .task = &tsk, \
61 .exec_domain = &default_exec_domain, \
62 .flags = 0, \
63 .cpu = 0, \
64 .preempt_count = 1, \
65 .addr_limit = KERNEL_DS, \
66 .restart_block = { \
67 .fn = do_no_restart_syscall, \
68 }, \
69 .curr_ret_stack = -1,\
70 .trace_overrun = ATOMIC_INIT(0) \
71}
72#else
73#define INIT_THREAD_INFO(tsk) \ 45#define INIT_THREAD_INFO(tsk) \
74{ \ 46{ \
75 .task = &tsk, \ 47 .task = &tsk, \
@@ -82,7 +54,6 @@ struct thread_info {
82 .fn = do_no_restart_syscall, \ 54 .fn = do_no_restart_syscall, \
83 }, \ 55 }, \
84} 56}
85#endif
86 57
87#define init_thread_info (init_thread_union.thread_info) 58#define init_thread_info (init_thread_union.thread_info)
88#define init_stack (init_thread_union.stack) 59#define init_stack (init_thread_union.stack)
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 356bb1eb6e9a..bb137f7297ed 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -350,19 +350,21 @@ static int push_return_trace(unsigned long ret, unsigned long long time,
350 unsigned long func) 350 unsigned long func)
351{ 351{
352 int index; 352 int index;
353 struct thread_info *ti = current_thread_info(); 353
354 if (!current->ret_stack)
355 return -EBUSY;
354 356
355 /* The return trace stack is full */ 357 /* The return trace stack is full */
356 if (ti->curr_ret_stack == FTRACE_RET_STACK_SIZE - 1) { 358 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
357 atomic_inc(&ti->trace_overrun); 359 atomic_inc(&current->trace_overrun);
358 return -EBUSY; 360 return -EBUSY;
359 } 361 }
360 362
361 index = ++ti->curr_ret_stack; 363 index = ++current->curr_ret_stack;
362 barrier(); 364 barrier();
363 ti->ret_stack[index].ret = ret; 365 current->ret_stack[index].ret = ret;
364 ti->ret_stack[index].func = func; 366 current->ret_stack[index].func = func;
365 ti->ret_stack[index].calltime = time; 367 current->ret_stack[index].calltime = time;
366 368
367 return 0; 369 return 0;
368} 370}
@@ -373,13 +375,12 @@ static void pop_return_trace(unsigned long *ret, unsigned long long *time,
373{ 375{
374 int index; 376 int index;
375 377
376 struct thread_info *ti = current_thread_info(); 378 index = current->curr_ret_stack;
377 index = ti->curr_ret_stack; 379 *ret = current->ret_stack[index].ret;
378 *ret = ti->ret_stack[index].ret; 380 *func = current->ret_stack[index].func;
379 *func = ti->ret_stack[index].func; 381 *time = current->ret_stack[index].calltime;
380 *time = ti->ret_stack[index].calltime; 382 *overrun = atomic_read(&current->trace_overrun);
381 *overrun = atomic_read(&ti->trace_overrun); 383 current->curr_ret_stack--;
382 ti->curr_ret_stack--;
383} 384}
384 385
385/* 386/*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f7ba4ea5e128..2ba259b2defa 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -323,6 +323,8 @@ struct ftrace_retfunc {
323}; 323};
324 324
325#ifdef CONFIG_FUNCTION_RET_TRACER 325#ifdef CONFIG_FUNCTION_RET_TRACER
326#define FTRACE_RETFUNC_DEPTH 50
327#define FTRACE_RETSTACK_ALLOC_SIZE 32
326/* Type of a callback handler of tracing return function */ 328/* Type of a callback handler of tracing return function */
327typedef void (*trace_function_return_t)(struct ftrace_retfunc *); 329typedef void (*trace_function_return_t)(struct ftrace_retfunc *);
328 330
@@ -330,6 +332,9 @@ extern int register_ftrace_return(trace_function_return_t func);
330/* The current handler in use */ 332/* The current handler in use */
331extern trace_function_return_t ftrace_function_return; 333extern trace_function_return_t ftrace_function_return;
332extern void unregister_ftrace_return(void); 334extern void unregister_ftrace_return(void);
335
336extern void ftrace_retfunc_init_task(struct task_struct *t);
337extern void ftrace_retfunc_exit_task(struct task_struct *t);
333#endif 338#endif
334 339
335#endif /* _LINUX_FTRACE_H */ 340#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c8e0db464206..bee1e93c95ad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1352,6 +1352,17 @@ struct task_struct {
1352 unsigned long default_timer_slack_ns; 1352 unsigned long default_timer_slack_ns;
1353 1353
1354 struct list_head *scm_work_list; 1354 struct list_head *scm_work_list;
1355#ifdef CONFIG_FUNCTION_RET_TRACER
1356 /* Index of current stored adress in ret_stack */
1357 int curr_ret_stack;
1358 /* Stack of return addresses for return function tracing */
1359 struct ftrace_ret_stack *ret_stack;
1360 /*
1361 * Number of functions that haven't been traced
1362 * because of depth overrun.
1363 */
1364 atomic_t trace_overrun;
1365#endif
1355}; 1366};
1356 1367
1357/* 1368/*
@@ -2006,18 +2017,6 @@ static inline void setup_thread_stack(struct task_struct *p, struct task_struct
2006{ 2017{
2007 *task_thread_info(p) = *task_thread_info(org); 2018 *task_thread_info(p) = *task_thread_info(org);
2008 task_thread_info(p)->task = p; 2019 task_thread_info(p)->task = p;
2009
2010#ifdef CONFIG_FUNCTION_RET_TRACER
2011 /*
2012 * When fork() creates a child process, this function is called.
2013 * But the child task may not inherit the return adresses traced
2014 * by the return function tracer because it will directly execute
2015 * in userspace and will not return to kernel functions its parent
2016 * used.
2017 */
2018 task_thread_info(p)->curr_ret_stack = -1;
2019 atomic_set(&task_thread_info(p)->trace_overrun, 0);
2020#endif
2021} 2020}
2022 2021
2023static inline unsigned long *end_of_stack(struct task_struct *p) 2022static inline unsigned long *end_of_stack(struct task_struct *p)
diff --git a/kernel/exit.c b/kernel/exit.c
index 35c8ec2ba03a..b9d446329da1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,6 +47,7 @@
47#include <linux/task_io_accounting_ops.h> 47#include <linux/task_io_accounting_ops.h>
48#include <linux/tracehook.h> 48#include <linux/tracehook.h>
49#include <trace/sched.h> 49#include <trace/sched.h>
50#include <linux/ftrace.h>
50 51
51#include <asm/uaccess.h> 52#include <asm/uaccess.h>
52#include <asm/unistd.h> 53#include <asm/unistd.h>
@@ -1127,7 +1128,9 @@ NORET_TYPE void do_exit(long code)
1127 preempt_disable(); 1128 preempt_disable();
1128 /* causes final put_task_struct in finish_task_switch(). */ 1129 /* causes final put_task_struct in finish_task_switch(). */
1129 tsk->state = TASK_DEAD; 1130 tsk->state = TASK_DEAD;
1130 1131#ifdef CONFIG_FUNCTION_RET_TRACER
1132 ftrace_retfunc_exit_task(tsk);
1133#endif
1131 schedule(); 1134 schedule();
1132 BUG(); 1135 BUG();
1133 /* Avoid "noreturn function does return". */ 1136 /* Avoid "noreturn function does return". */
diff --git a/kernel/fork.c b/kernel/fork.c
index ac62f43ee430..d1eb30e69ccc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -47,6 +47,7 @@
47#include <linux/mount.h> 47#include <linux/mount.h>
48#include <linux/audit.h> 48#include <linux/audit.h>
49#include <linux/memcontrol.h> 49#include <linux/memcontrol.h>
50#include <linux/ftrace.h>
50#include <linux/profile.h> 51#include <linux/profile.h>
51#include <linux/rmap.h> 52#include <linux/rmap.h>
52#include <linux/acct.h> 53#include <linux/acct.h>
@@ -1269,6 +1270,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1269 total_forks++; 1270 total_forks++;
1270 spin_unlock(&current->sighand->siglock); 1271 spin_unlock(&current->sighand->siglock);
1271 write_unlock_irq(&tasklist_lock); 1272 write_unlock_irq(&tasklist_lock);
1273#ifdef CONFIG_FUNCTION_RET_TRACER
1274 ftrace_retfunc_init_task(p);
1275#endif
1272 proc_fork_connector(p); 1276 proc_fork_connector(p);
1273 cgroup_post_fork(p); 1277 cgroup_post_fork(p);
1274 return p; 1278 return p;
diff --git a/kernel/sched.c b/kernel/sched.c
index 4de56108c86f..fb17205950de 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5901,6 +5901,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5901 * The idle tasks have their own, simple scheduling class: 5901 * The idle tasks have their own, simple scheduling class:
5902 */ 5902 */
5903 idle->sched_class = &idle_sched_class; 5903 idle->sched_class = &idle_sched_class;
5904#ifdef CONFIG_FUNCTION_RET_TRACER
5905 ftrace_retfunc_init_task(idle);
5906#endif
5904} 5907}
5905 5908
5906/* 5909/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f212da486689..90d99fb02ae4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1498,10 +1498,77 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1498 1498
1499#ifdef CONFIG_FUNCTION_RET_TRACER 1499#ifdef CONFIG_FUNCTION_RET_TRACER
1500 1500
1501static atomic_t ftrace_retfunc_active;
1502
1501/* The callback that hooks the return of a function */ 1503/* The callback that hooks the return of a function */
1502trace_function_return_t ftrace_function_return = 1504trace_function_return_t ftrace_function_return =
1503 (trace_function_return_t)ftrace_stub; 1505 (trace_function_return_t)ftrace_stub;
1504 1506
1507
1508/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
1509static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1510{
1511 int i;
1512 int ret = 0;
1513 unsigned long flags;
1514 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
1515 struct task_struct *g, *t;
1516
1517 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1518 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
1519 * sizeof(struct ftrace_ret_stack),
1520 GFP_KERNEL);
1521 if (!ret_stack_list[i]) {
1522 start = 0;
1523 end = i;
1524 ret = -ENOMEM;
1525 goto free;
1526 }
1527 }
1528
1529 read_lock_irqsave(&tasklist_lock, flags);
1530 do_each_thread(g, t) {
1531 if (start == end) {
1532 ret = -EAGAIN;
1533 goto unlock;
1534 }
1535
1536 if (t->ret_stack == NULL) {
1537 t->ret_stack = ret_stack_list[start++];
1538 t->curr_ret_stack = -1;
1539 atomic_set(&t->trace_overrun, 0);
1540 }
1541 } while_each_thread(g, t);
1542
1543unlock:
1544 read_unlock_irqrestore(&tasklist_lock, flags);
1545free:
1546 for (i = start; i < end; i++)
1547 kfree(ret_stack_list[i]);
1548 return ret;
1549}
1550
1551/* Allocate a return stack for each task */
1552static int start_return_tracing(void)
1553{
1554 struct ftrace_ret_stack **ret_stack_list;
1555 int ret;
1556
1557 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
1558 sizeof(struct ftrace_ret_stack *),
1559 GFP_KERNEL);
1560
1561 if (!ret_stack_list)
1562 return -ENOMEM;
1563
1564 do {
1565 ret = alloc_retstack_tasklist(ret_stack_list);
1566 } while (ret == -EAGAIN);
1567
1568 kfree(ret_stack_list);
1569 return ret;
1570}
1571
1505int register_ftrace_return(trace_function_return_t func) 1572int register_ftrace_return(trace_function_return_t func)
1506{ 1573{
1507 int ret = 0; 1574 int ret = 0;
@@ -1516,7 +1583,12 @@ int register_ftrace_return(trace_function_return_t func)
1516 ret = -EBUSY; 1583 ret = -EBUSY;
1517 goto out; 1584 goto out;
1518 } 1585 }
1519 1586 atomic_inc(&ftrace_retfunc_active);
1587 ret = start_return_tracing();
1588 if (ret) {
1589 atomic_dec(&ftrace_retfunc_active);
1590 goto out;
1591 }
1520 ftrace_tracing_type = FTRACE_TYPE_RETURN; 1592 ftrace_tracing_type = FTRACE_TYPE_RETURN;
1521 ftrace_function_return = func; 1593 ftrace_function_return = func;
1522 ftrace_startup(); 1594 ftrace_startup();
@@ -1530,6 +1602,7 @@ void unregister_ftrace_return(void)
1530{ 1602{
1531 mutex_lock(&ftrace_sysctl_lock); 1603 mutex_lock(&ftrace_sysctl_lock);
1532 1604
1605 atomic_dec(&ftrace_retfunc_active);
1533 ftrace_function_return = (trace_function_return_t)ftrace_stub; 1606 ftrace_function_return = (trace_function_return_t)ftrace_stub;
1534 ftrace_shutdown(); 1607 ftrace_shutdown();
1535 /* Restore normal tracing type */ 1608 /* Restore normal tracing type */
@@ -1537,6 +1610,27 @@ void unregister_ftrace_return(void)
1537 1610
1538 mutex_unlock(&ftrace_sysctl_lock); 1611 mutex_unlock(&ftrace_sysctl_lock);
1539} 1612}
1613
1614/* Allocate a return stack for newly created task */
1615void ftrace_retfunc_init_task(struct task_struct *t)
1616{
1617 if (atomic_read(&ftrace_retfunc_active)) {
1618 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
1619 * sizeof(struct ftrace_ret_stack),
1620 GFP_KERNEL);
1621 if (!t->ret_stack)
1622 return;
1623 t->curr_ret_stack = -1;
1624 atomic_set(&t->trace_overrun, 0);
1625 } else
1626 t->ret_stack = NULL;
1627}
1628
1629void ftrace_retfunc_exit_task(struct task_struct *t)
1630{
1631 kfree(t->ret_stack);
1632 t->ret_stack = NULL;
1633}
1540#endif 1634#endif
1541 1635
1542 1636