diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-25 02:40:09 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-25 03:16:01 -0500 |
commit | 2d542cf34264ac92e9e7ac55c0b096b066d569d2 (patch) | |
tree | e7864da3a119ba2fd1800616f0041610d09058cb /kernel/trace | |
parent | 499aa86dcbc3c4daf7d2c59c5c30e1a78220fbc1 (diff) |
tracing/hw-branch-tracing: convert bts-tracer mutex to a spinlock
Impact: fix CPU hotplug lockup
bts_hotcpu_handler() is called with irqs disabled, so using mutex_lock()
is a no-no.
All the BTS codepaths here are atomic (they do not schedule), so using
a spinlock is the right solution.
Cc: Markus Metzger <markus.t.metzger@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 57 |
1 files changed, 28 insertions, 29 deletions
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 3335e807144b..7bfdf4c2347f 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -3,17 +3,15 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2008-2009 Intel Corporation. | 4 | * Copyright (C) 2008-2009 Intel Corporation. |
5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 | 5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 |
6 | * | ||
7 | */ | 6 | */ |
8 | 7 | #include <linux/spinlock.h> | |
9 | #include <linux/module.h> | 8 | #include <linux/kallsyms.h> |
10 | #include <linux/fs.h> | ||
11 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
12 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
13 | #include <linux/kallsyms.h> | 11 | #include <linux/module.h> |
14 | #include <linux/mutex.h> | ||
15 | #include <linux/cpu.h> | 12 | #include <linux/cpu.h> |
16 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | #include <linux/fs.h> | ||
17 | 15 | ||
18 | #include <asm/ds.h> | 16 | #include <asm/ds.h> |
19 | 17 | ||
@@ -23,16 +21,17 @@ | |||
23 | 21 | ||
24 | #define SIZEOF_BTS (1 << 13) | 22 | #define SIZEOF_BTS (1 << 13) |
25 | 23 | ||
26 | /* The tracer mutex protects the below per-cpu tracer array. | 24 | /* |
27 | It needs to be held to: | 25 | * The tracer lock protects the below per-cpu tracer array. |
28 | - start tracing on all cpus | 26 | * It needs to be held to: |
29 | - stop tracing on all cpus | 27 | * - start tracing on all cpus |
30 | - start tracing on a single hotplug cpu | 28 | * - stop tracing on all cpus |
31 | - stop tracing on a single hotplug cpu | 29 | * - start tracing on a single hotplug cpu |
32 | - read the trace from all cpus | 30 | * - stop tracing on a single hotplug cpu |
33 | - read the trace from a single cpu | 31 | * - read the trace from all cpus |
34 | */ | 32 | * - read the trace from a single cpu |
35 | static DEFINE_MUTEX(bts_tracer_mutex); | 33 | */ |
34 | static DEFINE_SPINLOCK(bts_tracer_lock); | ||
36 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 35 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); |
37 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); | 36 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); |
38 | 37 | ||
@@ -47,7 +46,7 @@ static struct trace_array *hw_branch_trace __read_mostly; | |||
47 | * Start tracing on the current cpu. | 46 | * Start tracing on the current cpu. |
48 | * The argument is ignored. | 47 | * The argument is ignored. |
49 | * | 48 | * |
50 | * pre: bts_tracer_mutex must be locked. | 49 | * pre: bts_tracer_lock must be locked. |
51 | */ | 50 | */ |
52 | static void bts_trace_start_cpu(void *arg) | 51 | static void bts_trace_start_cpu(void *arg) |
53 | { | 52 | { |
@@ -66,19 +65,19 @@ static void bts_trace_start_cpu(void *arg) | |||
66 | 65 | ||
67 | static void bts_trace_start(struct trace_array *tr) | 66 | static void bts_trace_start(struct trace_array *tr) |
68 | { | 67 | { |
69 | mutex_lock(&bts_tracer_mutex); | 68 | spin_lock(&bts_tracer_lock); |
70 | 69 | ||
71 | on_each_cpu(bts_trace_start_cpu, NULL, 1); | 70 | on_each_cpu(bts_trace_start_cpu, NULL, 1); |
72 | trace_hw_branches_enabled = 1; | 71 | trace_hw_branches_enabled = 1; |
73 | 72 | ||
74 | mutex_unlock(&bts_tracer_mutex); | 73 | spin_unlock(&bts_tracer_lock); |
75 | } | 74 | } |
76 | 75 | ||
77 | /* | 76 | /* |
78 | * Stop tracing on the current cpu. | 77 | * Stop tracing on the current cpu. |
79 | * The argument is ignored. | 78 | * The argument is ignored. |
80 | * | 79 | * |
81 | * pre: bts_tracer_mutex must be locked. | 80 | * pre: bts_tracer_lock must be locked. |
82 | */ | 81 | */ |
83 | static void bts_trace_stop_cpu(void *arg) | 82 | static void bts_trace_stop_cpu(void *arg) |
84 | { | 83 | { |
@@ -90,12 +89,12 @@ static void bts_trace_stop_cpu(void *arg) | |||
90 | 89 | ||
91 | static void bts_trace_stop(struct trace_array *tr) | 90 | static void bts_trace_stop(struct trace_array *tr) |
92 | { | 91 | { |
93 | mutex_lock(&bts_tracer_mutex); | 92 | spin_lock(&bts_tracer_lock); |
94 | 93 | ||
95 | trace_hw_branches_enabled = 0; | 94 | trace_hw_branches_enabled = 0; |
96 | on_each_cpu(bts_trace_stop_cpu, NULL, 1); | 95 | on_each_cpu(bts_trace_stop_cpu, NULL, 1); |
97 | 96 | ||
98 | mutex_unlock(&bts_tracer_mutex); | 97 | spin_unlock(&bts_tracer_lock); |
99 | } | 98 | } |
100 | 99 | ||
101 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | 100 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, |
@@ -103,7 +102,7 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | |||
103 | { | 102 | { |
104 | unsigned int cpu = (unsigned long)hcpu; | 103 | unsigned int cpu = (unsigned long)hcpu; |
105 | 104 | ||
106 | mutex_lock(&bts_tracer_mutex); | 105 | spin_lock(&bts_tracer_lock); |
107 | 106 | ||
108 | if (!trace_hw_branches_enabled) | 107 | if (!trace_hw_branches_enabled) |
109 | goto out; | 108 | goto out; |
@@ -119,7 +118,7 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | |||
119 | } | 118 | } |
120 | 119 | ||
121 | out: | 120 | out: |
122 | mutex_unlock(&bts_tracer_mutex); | 121 | spin_unlock(&bts_tracer_lock); |
123 | return NOTIFY_DONE; | 122 | return NOTIFY_DONE; |
124 | } | 123 | } |
125 | 124 | ||
@@ -225,7 +224,7 @@ static void trace_bts_at(const struct bts_trace *trace, void *at) | |||
225 | /* | 224 | /* |
226 | * Collect the trace on the current cpu and write it into the ftrace buffer. | 225 | * Collect the trace on the current cpu and write it into the ftrace buffer. |
227 | * | 226 | * |
228 | * pre: bts_tracer_mutex must be locked | 227 | * pre: bts_tracer_lock must be locked |
229 | */ | 228 | */ |
230 | static void trace_bts_cpu(void *arg) | 229 | static void trace_bts_cpu(void *arg) |
231 | { | 230 | { |
@@ -261,11 +260,11 @@ out: | |||
261 | 260 | ||
262 | static void trace_bts_prepare(struct trace_iterator *iter) | 261 | static void trace_bts_prepare(struct trace_iterator *iter) |
263 | { | 262 | { |
264 | mutex_lock(&bts_tracer_mutex); | 263 | spin_lock(&bts_tracer_lock); |
265 | 264 | ||
266 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | 265 | on_each_cpu(trace_bts_cpu, iter->tr, 1); |
267 | 266 | ||
268 | mutex_unlock(&bts_tracer_mutex); | 267 | spin_unlock(&bts_tracer_lock); |
269 | } | 268 | } |
270 | 269 | ||
271 | static void trace_bts_close(struct trace_iterator *iter) | 270 | static void trace_bts_close(struct trace_iterator *iter) |
@@ -275,11 +274,11 @@ static void trace_bts_close(struct trace_iterator *iter) | |||
275 | 274 | ||
276 | void trace_hw_branch_oops(void) | 275 | void trace_hw_branch_oops(void) |
277 | { | 276 | { |
278 | mutex_lock(&bts_tracer_mutex); | 277 | spin_lock(&bts_tracer_lock); |
279 | 278 | ||
280 | trace_bts_cpu(hw_branch_trace); | 279 | trace_bts_cpu(hw_branch_trace); |
281 | 280 | ||
282 | mutex_unlock(&bts_tracer_mutex); | 281 | spin_unlock(&bts_tracer_lock); |
283 | } | 282 | } |
284 | 283 | ||
285 | struct tracer bts_tracer __read_mostly = | 284 | struct tracer bts_tracer __read_mostly = |