diff options
Diffstat (limited to 'kernel/trace/trace_sysprof.c')
-rw-r--r-- | kernel/trace/trace_sysprof.c | 67 |
1 files changed, 63 insertions, 4 deletions
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 6c139bc1be7e..ba55b871b3d9 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -5,19 +5,76 @@ | |||
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
6 | * | 6 | * |
7 | */ | 7 | */ |
8 | #include <linux/module.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/kallsyms.h> | 8 | #include <linux/kallsyms.h> |
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/hrtimer.h> | ||
12 | #include <linux/uaccess.h> | 11 | #include <linux/uaccess.h> |
13 | #include <linux/marker.h> | ||
14 | #include <linux/ftrace.h> | 12 | #include <linux/ftrace.h> |
13 | #include <linux/module.h> | ||
14 | #include <linux/fs.h> | ||
15 | 15 | ||
16 | #include "trace.h" | 16 | #include "trace.h" |
17 | 17 | ||
18 | static struct trace_array *ctx_trace; | 18 | static struct trace_array *ctx_trace; |
19 | static int __read_mostly tracer_enabled; | 19 | static int __read_mostly tracer_enabled; |
20 | 20 | ||
21 | static const unsigned long sample_period = 1000000; | ||
22 | |||
23 | /* | ||
24 | * Per CPU hrtimers that do the profiling: | ||
25 | */ | ||
26 | static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); | ||
27 | |||
28 | static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | ||
29 | { | ||
30 | /* trace here */ | ||
31 | panic_timeout++; | ||
32 | |||
33 | hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); | ||
34 | |||
35 | return HRTIMER_RESTART; | ||
36 | } | ||
37 | |||
38 | static void start_stack_timer(int cpu) | ||
39 | { | ||
40 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | ||
41 | |||
42 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
43 | hrtimer->function = stack_trace_timer_fn; | ||
44 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | ||
45 | |||
46 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); | ||
47 | } | ||
48 | |||
49 | static void start_stack_timers(void) | ||
50 | { | ||
51 | cpumask_t saved_mask = current->cpus_allowed; | ||
52 | int cpu; | ||
53 | |||
54 | for_each_online_cpu(cpu) { | ||
55 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
56 | start_stack_timer(cpu); | ||
57 | printk("started timer on cpu%d\n", cpu); | ||
58 | } | ||
59 | set_cpus_allowed_ptr(current, &saved_mask); | ||
60 | } | ||
61 | |||
62 | static void stop_stack_timer(int cpu) | ||
63 | { | ||
64 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | ||
65 | |||
66 | hrtimer_cancel(hrtimer); | ||
67 | printk("cancelled timer on cpu%d\n", cpu); | ||
68 | } | ||
69 | |||
70 | static void stop_stack_timers(void) | ||
71 | { | ||
72 | int cpu; | ||
73 | |||
74 | for_each_online_cpu(cpu) | ||
75 | stop_stack_timer(cpu); | ||
76 | } | ||
77 | |||
21 | static notrace void stack_reset(struct trace_array *tr) | 78 | static notrace void stack_reset(struct trace_array *tr) |
22 | { | 79 | { |
23 | int cpu; | 80 | int cpu; |
@@ -31,11 +88,13 @@ static notrace void stack_reset(struct trace_array *tr) | |||
31 | static notrace void start_stack_trace(struct trace_array *tr) | 88 | static notrace void start_stack_trace(struct trace_array *tr) |
32 | { | 89 | { |
33 | stack_reset(tr); | 90 | stack_reset(tr); |
91 | start_stack_timers(); | ||
34 | tracer_enabled = 1; | 92 | tracer_enabled = 1; |
35 | } | 93 | } |
36 | 94 | ||
37 | static notrace void stop_stack_trace(struct trace_array *tr) | 95 | static notrace void stop_stack_trace(struct trace_array *tr) |
38 | { | 96 | { |
97 | stop_stack_timers(); | ||
39 | tracer_enabled = 0; | 98 | tracer_enabled = 0; |
40 | } | 99 | } |
41 | 100 | ||