diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/fork.c | 4 | ||||
-rw-r--r-- | kernel/ptrace.c | 10 | ||||
-rw-r--r-- | kernel/sched.c | 43 | ||||
-rw-r--r-- | kernel/trace/Makefile | 7 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 199 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 58 |
8 files changed, 214 insertions, 110 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 42423665660a..a35eee3436de 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -93,6 +93,7 @@ obj-$(CONFIG_LATENCYTOP) += latencytop.o | |||
93 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o | 93 | obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o |
94 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ | 94 | obj-$(CONFIG_FUNCTION_TRACER) += trace/ |
95 | obj-$(CONFIG_TRACING) += trace/ | 95 | obj-$(CONFIG_TRACING) += trace/ |
96 | obj-$(CONFIG_X86_DS) += trace/ | ||
96 | obj-$(CONFIG_SMP) += sched_cpupri.o | 97 | obj-$(CONFIG_SMP) += sched_cpupri.o |
97 | obj-$(CONFIG_SLOW_WORK) += slow-work.o | 98 | obj-$(CONFIG_SLOW_WORK) += slow-work.o |
98 | 99 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 085f73ebcea6..711468f3db2a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1088,8 +1088,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1088 | #ifdef CONFIG_DEBUG_MUTEXES | 1088 | #ifdef CONFIG_DEBUG_MUTEXES |
1089 | p->blocked_on = NULL; /* not blocked yet */ | 1089 | p->blocked_on = NULL; /* not blocked yet */ |
1090 | #endif | 1090 | #endif |
1091 | if (unlikely(current->ptrace)) | 1091 | |
1092 | ptrace_fork(p, clone_flags); | 1092 | p->bts = NULL; |
1093 | 1093 | ||
1094 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1094 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1095 | sched_fork(p, clone_flags); | 1095 | sched_fork(p, clone_flags); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 0692ab5a0d67..e950805f8630 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -25,16 +25,6 @@ | |||
25 | 25 | ||
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Initialize a new task whose father had been ptraced. | ||
29 | * | ||
30 | * Called from copy_process(). | ||
31 | */ | ||
32 | void ptrace_fork(struct task_struct *child, unsigned long clone_flags) | ||
33 | { | ||
34 | arch_ptrace_fork(child, clone_flags); | ||
35 | } | ||
36 | |||
37 | /* | ||
38 | * ptrace a task: make the debugger its new parent and | 28 | * ptrace a task: make the debugger its new parent and |
39 | * move it to the ptrace list. | 29 | * move it to the ptrace list. |
40 | * | 30 | * |
diff --git a/kernel/sched.c b/kernel/sched.c index 14a19b17674e..6530a27052f3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2011,6 +2011,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
2011 | } | 2011 | } |
2012 | 2012 | ||
2013 | /* | 2013 | /* |
2014 | * wait_task_context_switch - wait for a thread to complete at least one | ||
2015 | * context switch. | ||
2016 | * | ||
2017 | * @p must not be current. | ||
2018 | */ | ||
2019 | void wait_task_context_switch(struct task_struct *p) | ||
2020 | { | ||
2021 | unsigned long nvcsw, nivcsw, flags; | ||
2022 | int running; | ||
2023 | struct rq *rq; | ||
2024 | |||
2025 | nvcsw = p->nvcsw; | ||
2026 | nivcsw = p->nivcsw; | ||
2027 | for (;;) { | ||
2028 | /* | ||
2029 | * The runqueue is assigned before the actual context | ||
2030 | * switch. We need to take the runqueue lock. | ||
2031 | * | ||
2032 | * We could check initially without the lock but it is | ||
2033 | * very likely that we need to take the lock in every | ||
2034 | * iteration. | ||
2035 | */ | ||
2036 | rq = task_rq_lock(p, &flags); | ||
2037 | running = task_running(rq, p); | ||
2038 | task_rq_unlock(rq, &flags); | ||
2039 | |||
2040 | if (likely(!running)) | ||
2041 | break; | ||
2042 | /* | ||
2043 | * The switch count is incremented before the actual | ||
2044 | * context switch. We thus wait for two switches to be | ||
2045 | * sure at least one completed. | ||
2046 | */ | ||
2047 | if ((p->nvcsw - nvcsw) > 1) | ||
2048 | break; | ||
2049 | if ((p->nivcsw - nivcsw) > 1) | ||
2050 | break; | ||
2051 | |||
2052 | cpu_relax(); | ||
2053 | } | ||
2054 | } | ||
2055 | |||
2056 | /* | ||
2014 | * wait_task_inactive - wait for a thread to unschedule. | 2057 | * wait_task_inactive - wait for a thread to unschedule. |
2015 | * | 2058 | * |
2016 | * If @match_state is nonzero, it's the @p->state value just checked and | 2059 | * If @match_state is nonzero, it's the @p->state value just checked and |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 7c34cbfff96e..06b85850fab4 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -15,12 +15,17 @@ ifdef CONFIG_TRACING_BRANCHES | |||
15 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | 15 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING |
16 | endif | 16 | endif |
17 | 17 | ||
18 | # | ||
19 | # Make the trace clocks available generally: it's infrastructure | ||
20 | # relied on by ptrace for example: | ||
21 | # | ||
22 | obj-y += trace_clock.o | ||
23 | |||
18 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o | 24 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o |
19 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 25 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
20 | obj-$(CONFIG_RING_BUFFER_BENCHMARK) += ring_buffer_benchmark.o | 26 | obj-$(CONFIG_RING_BUFFER_BENCHMARK) += ring_buffer_benchmark.o |
21 | 27 | ||
22 | obj-$(CONFIG_TRACING) += trace.o | 28 | obj-$(CONFIG_TRACING) += trace.o |
23 | obj-$(CONFIG_TRACING) += trace_clock.o | ||
24 | obj-$(CONFIG_TRACING) += trace_output.o | 29 | obj-$(CONFIG_TRACING) += trace_output.o |
25 | obj-$(CONFIG_TRACING) += trace_stat.o | 30 | obj-$(CONFIG_TRACING) += trace_stat.o |
26 | obj-$(CONFIG_TRACING) += trace_printk.o | 31 | obj-$(CONFIG_TRACING) += trace_printk.o |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ba25793ffe67..6e735d4771f8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -538,6 +538,8 @@ extern int trace_selftest_startup_sysprof(struct tracer *trace, | |||
538 | struct trace_array *tr); | 538 | struct trace_array *tr); |
539 | extern int trace_selftest_startup_branch(struct tracer *trace, | 539 | extern int trace_selftest_startup_branch(struct tracer *trace, |
540 | struct trace_array *tr); | 540 | struct trace_array *tr); |
541 | extern int trace_selftest_startup_hw_branches(struct tracer *trace, | ||
542 | struct trace_array *tr); | ||
541 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 543 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
542 | 544 | ||
543 | extern void *head_page(struct trace_array_cpu *data); | 545 | extern void *head_page(struct trace_array_cpu *data); |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 8683d50a753a..ca7d7c4d0c2a 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -1,10 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * h/w branch tracer for x86 based on bts | 2 | * h/w branch tracer for x86 based on BTS |
3 | * | 3 | * |
4 | * Copyright (C) 2008-2009 Intel Corporation. | 4 | * Copyright (C) 2008-2009 Intel Corporation. |
5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 | 5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 |
6 | */ | 6 | */ |
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/kallsyms.h> | 7 | #include <linux/kallsyms.h> |
9 | #include <linux/debugfs.h> | 8 | #include <linux/debugfs.h> |
10 | #include <linux/ftrace.h> | 9 | #include <linux/ftrace.h> |
@@ -15,110 +14,119 @@ | |||
15 | 14 | ||
16 | #include <asm/ds.h> | 15 | #include <asm/ds.h> |
17 | 16 | ||
18 | #include "trace.h" | ||
19 | #include "trace_output.h" | 17 | #include "trace_output.h" |
18 | #include "trace.h" | ||
20 | 19 | ||
21 | 20 | ||
22 | #define SIZEOF_BTS (1 << 13) | 21 | #define BTS_BUFFER_SIZE (1 << 13) |
23 | 22 | ||
24 | /* | ||
25 | * The tracer lock protects the below per-cpu tracer array. | ||
26 | * It needs to be held to: | ||
27 | * - start tracing on all cpus | ||
28 | * - stop tracing on all cpus | ||
29 | * - start tracing on a single hotplug cpu | ||
30 | * - stop tracing on a single hotplug cpu | ||
31 | * - read the trace from all cpus | ||
32 | * - read the trace from a single cpu | ||
33 | */ | ||
34 | static DEFINE_SPINLOCK(bts_tracer_lock); | ||
35 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 23 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); |
36 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); | 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); |
37 | 25 | ||
38 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 26 | #define this_tracer per_cpu(tracer, smp_processor_id()) |
39 | #define this_buffer per_cpu(buffer, smp_processor_id()) | ||
40 | 27 | ||
41 | static int __read_mostly trace_hw_branches_enabled; | 28 | static int trace_hw_branches_enabled __read_mostly; |
29 | static int trace_hw_branches_suspended __read_mostly; | ||
42 | static struct trace_array *hw_branch_trace __read_mostly; | 30 | static struct trace_array *hw_branch_trace __read_mostly; |
43 | 31 | ||
44 | 32 | ||
45 | /* | 33 | static void bts_trace_init_cpu(int cpu) |
46 | * Start tracing on the current cpu. | ||
47 | * The argument is ignored. | ||
48 | * | ||
49 | * pre: bts_tracer_lock must be locked. | ||
50 | */ | ||
51 | static void bts_trace_start_cpu(void *arg) | ||
52 | { | 34 | { |
53 | if (this_tracer) | 35 | per_cpu(tracer, cpu) = |
54 | ds_release_bts(this_tracer); | 36 | ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, |
55 | 37 | NULL, (size_t)-1, BTS_KERNEL); | |
56 | this_tracer = | 38 | |
57 | ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS, | 39 | if (IS_ERR(per_cpu(tracer, cpu))) |
58 | /* ovfl = */ NULL, /* th = */ (size_t)-1, | 40 | per_cpu(tracer, cpu) = NULL; |
59 | BTS_KERNEL); | ||
60 | if (IS_ERR(this_tracer)) { | ||
61 | this_tracer = NULL; | ||
62 | return; | ||
63 | } | ||
64 | } | 41 | } |
65 | 42 | ||
66 | static void bts_trace_start(struct trace_array *tr) | 43 | static int bts_trace_init(struct trace_array *tr) |
67 | { | 44 | { |
68 | spin_lock(&bts_tracer_lock); | 45 | int cpu; |
46 | |||
47 | hw_branch_trace = tr; | ||
48 | trace_hw_branches_enabled = 0; | ||
69 | 49 | ||
70 | on_each_cpu(bts_trace_start_cpu, NULL, 1); | 50 | get_online_cpus(); |
71 | trace_hw_branches_enabled = 1; | 51 | for_each_online_cpu(cpu) { |
52 | bts_trace_init_cpu(cpu); | ||
72 | 53 | ||
73 | spin_unlock(&bts_tracer_lock); | 54 | if (likely(per_cpu(tracer, cpu))) |
55 | trace_hw_branches_enabled = 1; | ||
56 | } | ||
57 | trace_hw_branches_suspended = 0; | ||
58 | put_online_cpus(); | ||
59 | |||
60 | /* If we could not enable tracing on a single cpu, we fail. */ | ||
61 | return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP; | ||
74 | } | 62 | } |
75 | 63 | ||
76 | /* | 64 | static void bts_trace_reset(struct trace_array *tr) |
77 | * Stop tracing on the current cpu. | ||
78 | * The argument is ignored. | ||
79 | * | ||
80 | * pre: bts_tracer_lock must be locked. | ||
81 | */ | ||
82 | static void bts_trace_stop_cpu(void *arg) | ||
83 | { | 65 | { |
84 | if (this_tracer) { | 66 | int cpu; |
85 | ds_release_bts(this_tracer); | 67 | |
86 | this_tracer = NULL; | 68 | get_online_cpus(); |
69 | for_each_online_cpu(cpu) { | ||
70 | if (likely(per_cpu(tracer, cpu))) { | ||
71 | ds_release_bts(per_cpu(tracer, cpu)); | ||
72 | per_cpu(tracer, cpu) = NULL; | ||
73 | } | ||
87 | } | 74 | } |
75 | trace_hw_branches_enabled = 0; | ||
76 | trace_hw_branches_suspended = 0; | ||
77 | put_online_cpus(); | ||
88 | } | 78 | } |
89 | 79 | ||
90 | static void bts_trace_stop(struct trace_array *tr) | 80 | static void bts_trace_start(struct trace_array *tr) |
91 | { | 81 | { |
92 | spin_lock(&bts_tracer_lock); | 82 | int cpu; |
93 | 83 | ||
94 | trace_hw_branches_enabled = 0; | 84 | get_online_cpus(); |
95 | on_each_cpu(bts_trace_stop_cpu, NULL, 1); | 85 | for_each_online_cpu(cpu) |
86 | if (likely(per_cpu(tracer, cpu))) | ||
87 | ds_resume_bts(per_cpu(tracer, cpu)); | ||
88 | trace_hw_branches_suspended = 0; | ||
89 | put_online_cpus(); | ||
90 | } | ||
96 | 91 | ||
97 | spin_unlock(&bts_tracer_lock); | 92 | static void bts_trace_stop(struct trace_array *tr) |
93 | { | ||
94 | int cpu; | ||
95 | |||
96 | get_online_cpus(); | ||
97 | for_each_online_cpu(cpu) | ||
98 | if (likely(per_cpu(tracer, cpu))) | ||
99 | ds_suspend_bts(per_cpu(tracer, cpu)); | ||
100 | trace_hw_branches_suspended = 1; | ||
101 | put_online_cpus(); | ||
98 | } | 102 | } |
99 | 103 | ||
100 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | 104 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, |
101 | unsigned long action, void *hcpu) | 105 | unsigned long action, void *hcpu) |
102 | { | 106 | { |
103 | unsigned int cpu = (unsigned long)hcpu; | 107 | int cpu = (long)hcpu; |
104 | |||
105 | spin_lock(&bts_tracer_lock); | ||
106 | |||
107 | if (!trace_hw_branches_enabled) | ||
108 | goto out; | ||
109 | 108 | ||
110 | switch (action) { | 109 | switch (action) { |
111 | case CPU_ONLINE: | 110 | case CPU_ONLINE: |
112 | case CPU_DOWN_FAILED: | 111 | case CPU_DOWN_FAILED: |
113 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | 112 | /* The notification is sent with interrupts enabled. */ |
113 | if (trace_hw_branches_enabled) { | ||
114 | bts_trace_init_cpu(cpu); | ||
115 | |||
116 | if (trace_hw_branches_suspended && | ||
117 | likely(per_cpu(tracer, cpu))) | ||
118 | ds_suspend_bts(per_cpu(tracer, cpu)); | ||
119 | } | ||
114 | break; | 120 | break; |
121 | |||
115 | case CPU_DOWN_PREPARE: | 122 | case CPU_DOWN_PREPARE: |
116 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | 123 | /* The notification is sent with interrupts enabled. */ |
117 | break; | 124 | if (likely(per_cpu(tracer, cpu))) { |
125 | ds_release_bts(per_cpu(tracer, cpu)); | ||
126 | per_cpu(tracer, cpu) = NULL; | ||
127 | } | ||
118 | } | 128 | } |
119 | 129 | ||
120 | out: | ||
121 | spin_unlock(&bts_tracer_lock); | ||
122 | return NOTIFY_DONE; | 130 | return NOTIFY_DONE; |
123 | } | 131 | } |
124 | 132 | ||
@@ -126,20 +134,6 @@ static struct notifier_block bts_hotcpu_notifier __cpuinitdata = { | |||
126 | .notifier_call = bts_hotcpu_handler | 134 | .notifier_call = bts_hotcpu_handler |
127 | }; | 135 | }; |
128 | 136 | ||
129 | static int bts_trace_init(struct trace_array *tr) | ||
130 | { | ||
131 | hw_branch_trace = tr; | ||
132 | |||
133 | bts_trace_start(tr); | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static void bts_trace_reset(struct trace_array *tr) | ||
139 | { | ||
140 | bts_trace_stop(tr); | ||
141 | } | ||
142 | |||
143 | static void bts_trace_print_header(struct seq_file *m) | 137 | static void bts_trace_print_header(struct seq_file *m) |
144 | { | 138 | { |
145 | seq_puts(m, "# CPU# TO <- FROM\n"); | 139 | seq_puts(m, "# CPU# TO <- FROM\n"); |
@@ -147,10 +141,10 @@ static void bts_trace_print_header(struct seq_file *m) | |||
147 | 141 | ||
148 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | 142 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) |
149 | { | 143 | { |
144 | unsigned long symflags = TRACE_ITER_SYM_OFFSET; | ||
150 | struct trace_entry *entry = iter->ent; | 145 | struct trace_entry *entry = iter->ent; |
151 | struct trace_seq *seq = &iter->seq; | 146 | struct trace_seq *seq = &iter->seq; |
152 | struct hw_branch_entry *it; | 147 | struct hw_branch_entry *it; |
153 | unsigned long symflags = TRACE_ITER_SYM_OFFSET; | ||
154 | 148 | ||
155 | trace_assign_type(it, entry); | 149 | trace_assign_type(it, entry); |
156 | 150 | ||
@@ -226,11 +220,11 @@ static void trace_bts_at(const struct bts_trace *trace, void *at) | |||
226 | /* | 220 | /* |
227 | * Collect the trace on the current cpu and write it into the ftrace buffer. | 221 | * Collect the trace on the current cpu and write it into the ftrace buffer. |
228 | * | 222 | * |
229 | * pre: bts_tracer_lock must be locked | 223 | * pre: tracing must be suspended on the current cpu |
230 | */ | 224 | */ |
231 | static void trace_bts_cpu(void *arg) | 225 | static void trace_bts_cpu(void *arg) |
232 | { | 226 | { |
233 | struct trace_array *tr = (struct trace_array *) arg; | 227 | struct trace_array *tr = (struct trace_array *)arg; |
234 | const struct bts_trace *trace; | 228 | const struct bts_trace *trace; |
235 | unsigned char *at; | 229 | unsigned char *at; |
236 | 230 | ||
@@ -243,10 +237,9 @@ static void trace_bts_cpu(void *arg) | |||
243 | if (unlikely(!this_tracer)) | 237 | if (unlikely(!this_tracer)) |
244 | return; | 238 | return; |
245 | 239 | ||
246 | ds_suspend_bts(this_tracer); | ||
247 | trace = ds_read_bts(this_tracer); | 240 | trace = ds_read_bts(this_tracer); |
248 | if (!trace) | 241 | if (!trace) |
249 | goto out; | 242 | return; |
250 | 243 | ||
251 | for (at = trace->ds.top; (void *)at < trace->ds.end; | 244 | for (at = trace->ds.top; (void *)at < trace->ds.end; |
252 | at += trace->ds.size) | 245 | at += trace->ds.size) |
@@ -255,18 +248,27 @@ static void trace_bts_cpu(void *arg) | |||
255 | for (at = trace->ds.begin; (void *)at < trace->ds.top; | 248 | for (at = trace->ds.begin; (void *)at < trace->ds.top; |
256 | at += trace->ds.size) | 249 | at += trace->ds.size) |
257 | trace_bts_at(trace, at); | 250 | trace_bts_at(trace, at); |
258 | |||
259 | out: | ||
260 | ds_resume_bts(this_tracer); | ||
261 | } | 251 | } |
262 | 252 | ||
263 | static void trace_bts_prepare(struct trace_iterator *iter) | 253 | static void trace_bts_prepare(struct trace_iterator *iter) |
264 | { | 254 | { |
265 | spin_lock(&bts_tracer_lock); | 255 | int cpu; |
266 | 256 | ||
257 | get_online_cpus(); | ||
258 | for_each_online_cpu(cpu) | ||
259 | if (likely(per_cpu(tracer, cpu))) | ||
260 | ds_suspend_bts(per_cpu(tracer, cpu)); | ||
261 | /* | ||
262 | * We need to collect the trace on the respective cpu since ftrace | ||
263 | * implicitly adds the record for the current cpu. | ||
264 | * Once that is more flexible, we could collect the data from any cpu. | ||
265 | */ | ||
267 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | 266 | on_each_cpu(trace_bts_cpu, iter->tr, 1); |
268 | 267 | ||
269 | spin_unlock(&bts_tracer_lock); | 268 | for_each_online_cpu(cpu) |
269 | if (likely(per_cpu(tracer, cpu))) | ||
270 | ds_resume_bts(per_cpu(tracer, cpu)); | ||
271 | put_online_cpus(); | ||
270 | } | 272 | } |
271 | 273 | ||
272 | static void trace_bts_close(struct trace_iterator *iter) | 274 | static void trace_bts_close(struct trace_iterator *iter) |
@@ -276,11 +278,11 @@ static void trace_bts_close(struct trace_iterator *iter) | |||
276 | 278 | ||
277 | void trace_hw_branch_oops(void) | 279 | void trace_hw_branch_oops(void) |
278 | { | 280 | { |
279 | spin_lock(&bts_tracer_lock); | 281 | if (this_tracer) { |
280 | 282 | ds_suspend_bts_noirq(this_tracer); | |
281 | trace_bts_cpu(hw_branch_trace); | 283 | trace_bts_cpu(hw_branch_trace); |
282 | 284 | ds_resume_bts_noirq(this_tracer); | |
283 | spin_unlock(&bts_tracer_lock); | 285 | } |
284 | } | 286 | } |
285 | 287 | ||
286 | struct tracer bts_tracer __read_mostly = | 288 | struct tracer bts_tracer __read_mostly = |
@@ -293,7 +295,10 @@ struct tracer bts_tracer __read_mostly = | |||
293 | .start = bts_trace_start, | 295 | .start = bts_trace_start, |
294 | .stop = bts_trace_stop, | 296 | .stop = bts_trace_stop, |
295 | .open = trace_bts_prepare, | 297 | .open = trace_bts_prepare, |
296 | .close = trace_bts_close | 298 | .close = trace_bts_close, |
299 | #ifdef CONFIG_FTRACE_SELFTEST | ||
300 | .selftest = trace_selftest_startup_hw_branches, | ||
301 | #endif /* CONFIG_FTRACE_SELFTEST */ | ||
297 | }; | 302 | }; |
298 | 303 | ||
299 | __init static int init_bts_trace(void) | 304 | __init static int init_bts_trace(void) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 08f4eb2763d1..00dd6485bdd7 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -16,6 +16,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
16 | case TRACE_BRANCH: | 16 | case TRACE_BRANCH: |
17 | case TRACE_GRAPH_ENT: | 17 | case TRACE_GRAPH_ENT: |
18 | case TRACE_GRAPH_RET: | 18 | case TRACE_GRAPH_RET: |
19 | case TRACE_HW_BRANCHES: | ||
19 | return 1; | 20 | return 1; |
20 | } | 21 | } |
21 | return 0; | 22 | return 0; |
@@ -188,6 +189,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
188 | #else | 189 | #else |
189 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) | 190 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) |
190 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 191 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
192 | |||
191 | /* | 193 | /* |
192 | * Simple verification test of ftrace function tracer. | 194 | * Simple verification test of ftrace function tracer. |
193 | * Enable ftrace, sleep 1/10 second, and then read the trace | 195 | * Enable ftrace, sleep 1/10 second, and then read the trace |
@@ -749,3 +751,59 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
749 | return ret; | 751 | return ret; |
750 | } | 752 | } |
751 | #endif /* CONFIG_BRANCH_TRACER */ | 753 | #endif /* CONFIG_BRANCH_TRACER */ |
754 | |||
755 | #ifdef CONFIG_HW_BRANCH_TRACER | ||
756 | int | ||
757 | trace_selftest_startup_hw_branches(struct tracer *trace, | ||
758 | struct trace_array *tr) | ||
759 | { | ||
760 | struct trace_iterator *iter; | ||
761 | struct tracer tracer; | ||
762 | unsigned long count; | ||
763 | int ret; | ||
764 | |||
765 | if (!trace->open) { | ||
766 | printk(KERN_CONT "missing open function..."); | ||
767 | return -1; | ||
768 | } | ||
769 | |||
770 | ret = tracer_init(trace, tr); | ||
771 | if (ret) { | ||
772 | warn_failed_init_tracer(trace, ret); | ||
773 | return ret; | ||
774 | } | ||
775 | |||
776 | /* | ||
777 | * The hw-branch tracer needs to collect the trace from the various | ||
778 | * cpu trace buffers - before tracing is stopped. | ||
779 | */ | ||
780 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | ||
781 | if (!iter) | ||
782 | return -ENOMEM; | ||
783 | |||
784 | memcpy(&tracer, trace, sizeof(tracer)); | ||
785 | |||
786 | iter->trace = &tracer; | ||
787 | iter->tr = tr; | ||
788 | iter->pos = -1; | ||
789 | mutex_init(&iter->mutex); | ||
790 | |||
791 | trace->open(iter); | ||
792 | |||
793 | mutex_destroy(&iter->mutex); | ||
794 | kfree(iter); | ||
795 | |||
796 | tracing_stop(); | ||
797 | |||
798 | ret = trace_test_buffer(tr, &count); | ||
799 | trace->reset(tr); | ||
800 | tracing_start(); | ||
801 | |||
802 | if (!ret && !count) { | ||
803 | printk(KERN_CONT "no entries found.."); | ||
804 | ret = -1; | ||
805 | } | ||
806 | |||
807 | return ret; | ||
808 | } | ||
809 | #endif /* CONFIG_HW_BRANCH_TRACER */ | ||