aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c135
1 files changed, 62 insertions, 73 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1e6640f80454..d9062f5cc0c0 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -22,12 +22,12 @@
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <linux/kthread.h> 23#include <linux/kthread.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/kprobes.h>
26#include <linux/ftrace.h> 25#include <linux/ftrace.h>
27#include <linux/sysctl.h> 26#include <linux/sysctl.h>
28#include <linux/ctype.h> 27#include <linux/ctype.h>
29#include <linux/list.h> 28#include <linux/list.h>
30#include <linux/hash.h> 29#include <linux/hash.h>
30#include <linux/rcupdate.h>
31 31
32#include <trace/events/sched.h> 32#include <trace/events/sched.h>
33 33
@@ -85,22 +85,22 @@ ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
85ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; 85ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
86ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; 86ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
87 87
88#ifdef CONFIG_FUNCTION_GRAPH_TRACER 88/*
89static int ftrace_set_func(unsigned long *array, int *idx, char *buffer); 89 * Traverse the ftrace_list, invoking all entries. The reason that we
90#endif 90 * can use rcu_dereference_raw() is that elements removed from this list
91 91 * are simply leaked, so there is no need to interact with a grace-period
92 * mechanism. The rcu_dereference_raw() calls are needed to handle
93 * concurrent insertions into the ftrace_list.
94 *
95 * Silly Alpha and silly pointer-speculation compiler optimizations!
96 */
92static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) 97static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
93{ 98{
94 struct ftrace_ops *op = ftrace_list; 99 struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
95
96 /* in case someone actually ports this to alpha! */
97 read_barrier_depends();
98 100
99 while (op != &ftrace_list_end) { 101 while (op != &ftrace_list_end) {
100 /* silly alpha */
101 read_barrier_depends();
102 op->func(ip, parent_ip); 102 op->func(ip, parent_ip);
103 op = op->next; 103 op = rcu_dereference_raw(op->next); /*see above*/
104 }; 104 };
105} 105}
106 106
@@ -155,8 +155,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
155 * the ops->next pointer is valid before another CPU sees 155 * the ops->next pointer is valid before another CPU sees
156 * the ops pointer included into the ftrace_list. 156 * the ops pointer included into the ftrace_list.
157 */ 157 */
158 smp_wmb(); 158 rcu_assign_pointer(ftrace_list, ops);
159 ftrace_list = ops;
160 159
161 if (ftrace_enabled) { 160 if (ftrace_enabled) {
162 ftrace_func_t func; 161 ftrace_func_t func;
@@ -898,36 +897,6 @@ static struct dyn_ftrace *ftrace_free_records;
898 } \ 897 } \
899 } 898 }
900 899
901#ifdef CONFIG_KPROBES
902
903static int frozen_record_count;
904
905static inline void freeze_record(struct dyn_ftrace *rec)
906{
907 if (!(rec->flags & FTRACE_FL_FROZEN)) {
908 rec->flags |= FTRACE_FL_FROZEN;
909 frozen_record_count++;
910 }
911}
912
913static inline void unfreeze_record(struct dyn_ftrace *rec)
914{
915 if (rec->flags & FTRACE_FL_FROZEN) {
916 rec->flags &= ~FTRACE_FL_FROZEN;
917 frozen_record_count--;
918 }
919}
920
921static inline int record_frozen(struct dyn_ftrace *rec)
922{
923 return rec->flags & FTRACE_FL_FROZEN;
924}
925#else
926# define freeze_record(rec) ({ 0; })
927# define unfreeze_record(rec) ({ 0; })
928# define record_frozen(rec) ({ 0; })
929#endif /* CONFIG_KPROBES */
930
931static void ftrace_free_rec(struct dyn_ftrace *rec) 900static void ftrace_free_rec(struct dyn_ftrace *rec)
932{ 901{
933 rec->freelist = ftrace_free_records; 902 rec->freelist = ftrace_free_records;
@@ -1025,6 +994,21 @@ static void ftrace_bug(int failed, unsigned long ip)
1025} 994}
1026 995
1027 996
997/* Return 1 if the address range is reserved for ftrace */
998int ftrace_text_reserved(void *start, void *end)
999{
1000 struct dyn_ftrace *rec;
1001 struct ftrace_page *pg;
1002
1003 do_for_each_ftrace_rec(pg, rec) {
1004 if (rec->ip <= (unsigned long)end &&
1005 rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1006 return 1;
1007 } while_for_each_ftrace_rec();
1008 return 0;
1009}
1010
1011
1028static int 1012static int
1029__ftrace_replace_code(struct dyn_ftrace *rec, int enable) 1013__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1030{ 1014{
@@ -1076,14 +1060,6 @@ static void ftrace_replace_code(int enable)
1076 !(rec->flags & FTRACE_FL_CONVERTED)) 1060 !(rec->flags & FTRACE_FL_CONVERTED))
1077 continue; 1061 continue;
1078 1062
1079 /* ignore updates to this record's mcount site */
1080 if (get_kprobe((void *)rec->ip)) {
1081 freeze_record(rec);
1082 continue;
1083 } else {
1084 unfreeze_record(rec);
1085 }
1086
1087 failed = __ftrace_replace_code(rec, enable); 1063 failed = __ftrace_replace_code(rec, enable);
1088 if (failed) { 1064 if (failed) {
1089 rec->flags |= FTRACE_FL_FAILED; 1065 rec->flags |= FTRACE_FL_FAILED;
@@ -2300,6 +2276,8 @@ __setup("ftrace_filter=", set_ftrace_filter);
2300 2276
2301#ifdef CONFIG_FUNCTION_GRAPH_TRACER 2277#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2302static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; 2278static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2279static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2280
2303static int __init set_graph_function(char *str) 2281static int __init set_graph_function(char *str)
2304{ 2282{
2305 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); 2283 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
@@ -2426,6 +2404,7 @@ static const struct file_operations ftrace_notrace_fops = {
2426static DEFINE_MUTEX(graph_lock); 2404static DEFINE_MUTEX(graph_lock);
2427 2405
2428int ftrace_graph_count; 2406int ftrace_graph_count;
2407int ftrace_graph_filter_enabled;
2429unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; 2408unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2430 2409
2431static void * 2410static void *
@@ -2448,7 +2427,7 @@ static void *g_start(struct seq_file *m, loff_t *pos)
2448 mutex_lock(&graph_lock); 2427 mutex_lock(&graph_lock);
2449 2428
2450 /* Nothing, tell g_show to print all functions are enabled */ 2429 /* Nothing, tell g_show to print all functions are enabled */
2451 if (!ftrace_graph_count && !*pos) 2430 if (!ftrace_graph_filter_enabled && !*pos)
2452 return (void *)1; 2431 return (void *)1;
2453 2432
2454 return __g_next(m, pos); 2433 return __g_next(m, pos);
@@ -2494,6 +2473,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2494 mutex_lock(&graph_lock); 2473 mutex_lock(&graph_lock);
2495 if ((file->f_mode & FMODE_WRITE) && 2474 if ((file->f_mode & FMODE_WRITE) &&
2496 (file->f_flags & O_TRUNC)) { 2475 (file->f_flags & O_TRUNC)) {
2476 ftrace_graph_filter_enabled = 0;
2497 ftrace_graph_count = 0; 2477 ftrace_graph_count = 0;
2498 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2478 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2499 } 2479 }
@@ -2519,7 +2499,7 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2519 struct dyn_ftrace *rec; 2499 struct dyn_ftrace *rec;
2520 struct ftrace_page *pg; 2500 struct ftrace_page *pg;
2521 int search_len; 2501 int search_len;
2522 int found = 0; 2502 int fail = 1;
2523 int type, not; 2503 int type, not;
2524 char *search; 2504 char *search;
2525 bool exists; 2505 bool exists;
@@ -2530,37 +2510,51 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2530 2510
2531 /* decode regex */ 2511 /* decode regex */
2532 type = filter_parse_regex(buffer, strlen(buffer), &search, &not); 2512 type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
2533 if (not) 2513 if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
2534 return -EINVAL; 2514 return -EBUSY;
2535 2515
2536 search_len = strlen(search); 2516 search_len = strlen(search);
2537 2517
2538 mutex_lock(&ftrace_lock); 2518 mutex_lock(&ftrace_lock);
2539 do_for_each_ftrace_rec(pg, rec) { 2519 do_for_each_ftrace_rec(pg, rec) {
2540 2520
2541 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2542 break;
2543
2544 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) 2521 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
2545 continue; 2522 continue;
2546 2523
2547 if (ftrace_match_record(rec, search, search_len, type)) { 2524 if (ftrace_match_record(rec, search, search_len, type)) {
2548 /* ensure it is not already in the array */ 2525 /* if it is in the array */
2549 exists = false; 2526 exists = false;
2550 for (i = 0; i < *idx; i++) 2527 for (i = 0; i < *idx; i++) {
2551 if (array[i] == rec->ip) { 2528 if (array[i] == rec->ip) {
2552 exists = true; 2529 exists = true;
2553 break; 2530 break;
2554 } 2531 }
2555 if (!exists) 2532 }
2556 array[(*idx)++] = rec->ip; 2533
2557 found = 1; 2534 if (!not) {
2535 fail = 0;
2536 if (!exists) {
2537 array[(*idx)++] = rec->ip;
2538 if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
2539 goto out;
2540 }
2541 } else {
2542 if (exists) {
2543 array[i] = array[--(*idx)];
2544 array[*idx] = 0;
2545 fail = 0;
2546 }
2547 }
2558 } 2548 }
2559 } while_for_each_ftrace_rec(); 2549 } while_for_each_ftrace_rec();
2560 2550out:
2561 mutex_unlock(&ftrace_lock); 2551 mutex_unlock(&ftrace_lock);
2562 2552
2563 return found ? 0 : -EINVAL; 2553 if (fail)
2554 return -EINVAL;
2555
2556 ftrace_graph_filter_enabled = 1;
2557 return 0;
2564} 2558}
2565 2559
2566static ssize_t 2560static ssize_t
@@ -2570,16 +2564,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2570 struct trace_parser parser; 2564 struct trace_parser parser;
2571 ssize_t read, ret; 2565 ssize_t read, ret;
2572 2566
2573 if (!cnt || cnt < 0) 2567 if (!cnt)
2574 return 0; 2568 return 0;
2575 2569
2576 mutex_lock(&graph_lock); 2570 mutex_lock(&graph_lock);
2577 2571
2578 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
2579 ret = -EBUSY;
2580 goto out_unlock;
2581 }
2582
2583 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) { 2572 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
2584 ret = -ENOMEM; 2573 ret = -ENOMEM;
2585 goto out_unlock; 2574 goto out_unlock;
@@ -3364,6 +3353,7 @@ void ftrace_graph_init_task(struct task_struct *t)
3364{ 3353{
3365 /* Make sure we do not use the parent ret_stack */ 3354 /* Make sure we do not use the parent ret_stack */
3366 t->ret_stack = NULL; 3355 t->ret_stack = NULL;
3356 t->curr_ret_stack = -1;
3367 3357
3368 if (ftrace_graph_active) { 3358 if (ftrace_graph_active) {
3369 struct ftrace_ret_stack *ret_stack; 3359 struct ftrace_ret_stack *ret_stack;
@@ -3373,7 +3363,6 @@ void ftrace_graph_init_task(struct task_struct *t)
3373 GFP_KERNEL); 3363 GFP_KERNEL);
3374 if (!ret_stack) 3364 if (!ret_stack)
3375 return; 3365 return;
3376 t->curr_ret_stack = -1;
3377 atomic_set(&t->tracing_graph_pause, 0); 3366 atomic_set(&t->tracing_graph_pause, 0);
3378 atomic_set(&t->trace_overrun, 0); 3367 atomic_set(&t->trace_overrun, 0);
3379 t->ftrace_timestamp = 0; 3368 t->ftrace_timestamp = 0;