aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/extable.c5
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace.c36
-rw-r--r--kernel/trace/trace_functions_graph.c33
6 files changed, 50 insertions, 32 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 703cf3b7389c..19fad003b19d 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -21,10 +21,6 @@ CFLAGS_REMOVE_cgroup-debug.o = -pg
21CFLAGS_REMOVE_sched_clock.o = -pg 21CFLAGS_REMOVE_sched_clock.o = -pg
22CFLAGS_REMOVE_sched.o = -pg 22CFLAGS_REMOVE_sched.o = -pg
23endif 23endif
24ifdef CONFIG_FUNCTION_GRAPH_TRACER
25CFLAGS_REMOVE_extable.o = -pg # For __kernel_text_address()
26CFLAGS_REMOVE_module.o = -pg # For __module_text_address()
27endif
28 24
29obj-$(CONFIG_FREEZER) += freezer.o 25obj-$(CONFIG_FREEZER) += freezer.o
30obj-$(CONFIG_PROFILING) += profile.o 26obj-$(CONFIG_PROFILING) += profile.o
diff --git a/kernel/extable.c b/kernel/extable.c
index a26cb2e17023..feb0317cf09a 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -17,6 +17,7 @@
17*/ 17*/
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/ftrace.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21#include <asm/sections.h> 22#include <asm/sections.h>
22 23
@@ -40,7 +41,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
40 return e; 41 return e;
41} 42}
42 43
43int core_kernel_text(unsigned long addr) 44__notrace_funcgraph int core_kernel_text(unsigned long addr)
44{ 45{
45 if (addr >= (unsigned long)_stext && 46 if (addr >= (unsigned long)_stext &&
46 addr <= (unsigned long)_etext) 47 addr <= (unsigned long)_etext)
@@ -53,7 +54,7 @@ int core_kernel_text(unsigned long addr)
53 return 0; 54 return 0;
54} 55}
55 56
56int __kernel_text_address(unsigned long addr) 57__notrace_funcgraph int __kernel_text_address(unsigned long addr)
57{ 58{
58 if (core_kernel_text(addr)) 59 if (core_kernel_text(addr))
59 return 1; 60 return 1;
diff --git a/kernel/module.c b/kernel/module.c
index 89bcf7c1327d..dd2a54155b54 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2704,7 +2704,7 @@ int is_module_address(unsigned long addr)
2704 2704
2705 2705
2706/* Is this a valid kernel address? */ 2706/* Is this a valid kernel address? */
2707struct module *__module_text_address(unsigned long addr) 2707__notrace_funcgraph struct module *__module_text_address(unsigned long addr)
2708{ 2708{
2709 struct module *mod; 2709 struct module *mod;
2710 2710
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2971fe48f55e..a12f80efceaa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1998 /* Make sure IRQs see the -1 first: */ 1998 /* Make sure IRQs see the -1 first: */
1999 barrier(); 1999 barrier();
2000 t->ret_stack = ret_stack_list[start++]; 2000 t->ret_stack = ret_stack_list[start++];
2001 atomic_set(&t->tracing_graph_pause, 0);
2001 atomic_set(&t->trace_overrun, 0); 2002 atomic_set(&t->trace_overrun, 0);
2002 } 2003 }
2003 } while_each_thread(g, t); 2004 } while_each_thread(g, t);
@@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t)
2077 if (!t->ret_stack) 2078 if (!t->ret_stack)
2078 return; 2079 return;
2079 t->curr_ret_stack = -1; 2080 t->curr_ret_stack = -1;
2081 atomic_set(&t->tracing_graph_pause, 0);
2080 atomic_set(&t->trace_overrun, 0); 2082 atomic_set(&t->trace_overrun, 0);
2081 } else 2083 } else
2082 t->ret_stack = NULL; 2084 t->ret_stack = NULL;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7a93c663e52a..8ebe0070c47a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -44,13 +44,14 @@
44unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 44unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
45unsigned long __read_mostly tracing_thresh; 45unsigned long __read_mostly tracing_thresh;
46 46
47/* We need to change this state when a selftest is running. 47/*
48 * We need to change this state when a selftest is running.
48 * A selftest will lurk into the ring-buffer to count the 49 * A selftest will lurk into the ring-buffer to count the
49 * entries inserted during the selftest although some concurrent 50 * entries inserted during the selftest although some concurrent
50 * insertions into the ring-buffer such as ftrace_printk could occurred 51 * insertions into the ring-buffer such as ftrace_printk could occurred
51 * at the same time, giving false positive or negative results. 52 * at the same time, giving false positive or negative results.
52 */ 53 */
53static atomic_t tracing_selftest_running = ATOMIC_INIT(0); 54static bool __read_mostly tracing_selftest_running;
54 55
55/* For tracers that don't implement custom flags */ 56/* For tracers that don't implement custom flags */
56static struct tracer_opt dummy_tracer_opt[] = { 57static struct tracer_opt dummy_tracer_opt[] = {
@@ -574,6 +575,8 @@ int register_tracer(struct tracer *type)
574 unlock_kernel(); 575 unlock_kernel();
575 mutex_lock(&trace_types_lock); 576 mutex_lock(&trace_types_lock);
576 577
578 tracing_selftest_running = true;
579
577 for (t = trace_types; t; t = t->next) { 580 for (t = trace_types; t; t = t->next) {
578 if (strcmp(type->name, t->name) == 0) { 581 if (strcmp(type->name, t->name) == 0) {
579 /* already found */ 582 /* already found */
@@ -598,7 +601,6 @@ int register_tracer(struct tracer *type)
598 struct trace_array *tr = &global_trace; 601 struct trace_array *tr = &global_trace;
599 int i; 602 int i;
600 603
601 atomic_set(&tracing_selftest_running, 1);
602 /* 604 /*
603 * Run a selftest on this tracer. 605 * Run a selftest on this tracer.
604 * Here we reset the trace buffer, and set the current 606 * Here we reset the trace buffer, and set the current
@@ -613,7 +615,6 @@ int register_tracer(struct tracer *type)
613 /* the test is responsible for initializing and enabling */ 615 /* the test is responsible for initializing and enabling */
614 pr_info("Testing tracer %s: ", type->name); 616 pr_info("Testing tracer %s: ", type->name);
615 ret = type->selftest(type, tr); 617 ret = type->selftest(type, tr);
616 atomic_set(&tracing_selftest_running, 0);
617 /* the test is responsible for resetting too */ 618 /* the test is responsible for resetting too */
618 current_trace = saved_tracer; 619 current_trace = saved_tracer;
619 if (ret) { 620 if (ret) {
@@ -635,6 +636,7 @@ int register_tracer(struct tracer *type)
635 max_tracer_type_len = len; 636 max_tracer_type_len = len;
636 637
637 out: 638 out:
639 tracing_selftest_running = false;
638 mutex_unlock(&trace_types_lock); 640 mutex_unlock(&trace_types_lock);
639 lock_kernel(); 641 lock_kernel();
640 642
@@ -3588,24 +3590,17 @@ static __init int tracer_init_debugfs(void)
3588 3590
3589int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) 3591int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3590{ 3592{
3591 /* 3593 static DEFINE_SPINLOCK(trace_buf_lock);
3592 * Raw Spinlock because a normal spinlock would be traced here
3593 * and append an irrelevant couple spin_lock_irqsave/
3594 * spin_unlock_irqrestore traced by ftrace around this
3595 * TRACE_PRINTK trace.
3596 */
3597 static raw_spinlock_t trace_buf_lock =
3598 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3599 static char trace_buf[TRACE_BUF_SIZE]; 3594 static char trace_buf[TRACE_BUF_SIZE];
3600 3595
3601 struct ring_buffer_event *event; 3596 struct ring_buffer_event *event;
3602 struct trace_array *tr = &global_trace; 3597 struct trace_array *tr = &global_trace;
3603 struct trace_array_cpu *data; 3598 struct trace_array_cpu *data;
3604 struct print_entry *entry;
3605 unsigned long flags, irq_flags;
3606 int cpu, len = 0, size, pc; 3599 int cpu, len = 0, size, pc;
3600 struct print_entry *entry;
3601 unsigned long irq_flags;
3607 3602
3608 if (tracing_disabled || atomic_read(&tracing_selftest_running)) 3603 if (tracing_disabled || tracing_selftest_running)
3609 return 0; 3604 return 0;
3610 3605
3611 pc = preempt_count(); 3606 pc = preempt_count();
@@ -3616,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3616 if (unlikely(atomic_read(&data->disabled))) 3611 if (unlikely(atomic_read(&data->disabled)))
3617 goto out; 3612 goto out;
3618 3613
3619 local_irq_save(flags); 3614 pause_graph_tracing();
3620 __raw_spin_lock(&trace_buf_lock); 3615 spin_lock_irqsave(&trace_buf_lock, irq_flags);
3621 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 3616 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
3622 3617
3623 len = min(len, TRACE_BUF_SIZE-1); 3618 len = min(len, TRACE_BUF_SIZE-1);
@@ -3628,7 +3623,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3628 if (!event) 3623 if (!event)
3629 goto out_unlock; 3624 goto out_unlock;
3630 entry = ring_buffer_event_data(event); 3625 entry = ring_buffer_event_data(event);
3631 tracing_generic_entry_update(&entry->ent, flags, pc); 3626 tracing_generic_entry_update(&entry->ent, irq_flags, pc);
3632 entry->ent.type = TRACE_PRINT; 3627 entry->ent.type = TRACE_PRINT;
3633 entry->ip = ip; 3628 entry->ip = ip;
3634 entry->depth = depth; 3629 entry->depth = depth;
@@ -3638,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
3638 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 3633 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
3639 3634
3640 out_unlock: 3635 out_unlock:
3641 __raw_spin_unlock(&trace_buf_lock); 3636 spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
3642 local_irq_restore(flags); 3637 unpause_graph_tracing();
3643
3644 out: 3638 out:
3645 preempt_enable_notrace(); 3639 preempt_enable_notrace();
3646 3640
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 32b7fb9a19df..af60eef4cbcc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -570,11 +570,36 @@ print_graph_function(struct trace_iterator *iter)
570 } 570 }
571} 571}
572 572
573static void print_graph_headers(struct seq_file *s)
574{
575 /* 1st line */
576 seq_printf(s, "# ");
577 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
578 seq_printf(s, "CPU ");
579 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
580 seq_printf(s, "TASK/PID ");
581 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD)
582 seq_printf(s, "OVERHEAD/");
583 seq_printf(s, "DURATION FUNCTION CALLS\n");
584
585 /* 2nd line */
586 seq_printf(s, "# ");
587 if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU)
588 seq_printf(s, "| ");
589 if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC)
590 seq_printf(s, "| | ");
591 if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
592 seq_printf(s, "| ");
593 seq_printf(s, "| | | | |\n");
594 } else
595 seq_printf(s, " | | | | |\n");
596}
573static struct tracer graph_trace __read_mostly = { 597static struct tracer graph_trace __read_mostly = {
574 .name = "function_graph", 598 .name = "function_graph",
575 .init = graph_trace_init, 599 .init = graph_trace_init,
576 .reset = graph_trace_reset, 600 .reset = graph_trace_reset,
577 .print_line = print_graph_function, 601 .print_line = print_graph_function,
602 .print_header = print_graph_headers,
578 .flags = &tracer_flags, 603 .flags = &tracer_flags,
579}; 604};
580 605