diff options
author | Markus Metzger <markus.t.metzger@intel.com> | 2009-04-03 10:43:40 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-07 07:36:20 -0400 |
commit | de79f54f5347ad7ec6ff55ccbb6d4ab2a21f6a93 (patch) | |
tree | dfd3f000600b942a545cbc8acd2f2e67f4518015 /kernel/trace/trace_hw_branches.c | |
parent | 35bb7600c17762bb129588c1877d2717fe325289 (diff) |
x86, bts, hw-branch-tracer: add _noirq variants to the debug store interface
The hw-branch-tracer uses debug store functions from an on_each_cpu()
context, which is simply wrong since the functions may sleep.
Add _noirq variants for most functions, which may be called with
interrupts disabled.
Separate per-cpu and per-task tracing and allow per-cpu tracing to be
controlled from any cpu.
Make the hw-branch-tracer use the new debug store interface, synchronize
with hotplug cpu event using get/put_online_cpus(), and remove the
unnecessary spinlock.
Make the ptrace bts and the ds selftest code use the new interface.
Defer the ds selftest.
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Cc: roland@redhat.com
Cc: eranian@googlemail.com
Cc: oleg@redhat.com
Cc: juan.villacis@intel.com
Cc: ak@linux.jf.intel.com
LKML-Reference: <20090403144555.658136000@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_hw_branches.c')
-rw-r--r-- | kernel/trace/trace_hw_branches.c | 193 |
1 files changed, 72 insertions, 121 deletions
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 8b2109a6c61c..50565d8cd2ed 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -4,7 +4,6 @@ | |||
4 | * Copyright (C) 2008-2009 Intel Corporation. | 4 | * Copyright (C) 2008-2009 Intel Corporation. |
5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 | 5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 |
6 | */ | 6 | */ |
7 | #include <linux/spinlock.h> | ||
8 | #include <linux/kallsyms.h> | 7 | #include <linux/kallsyms.h> |
9 | #include <linux/debugfs.h> | 8 | #include <linux/debugfs.h> |
10 | #include <linux/ftrace.h> | 9 | #include <linux/ftrace.h> |
@@ -21,168 +20,113 @@ | |||
21 | 20 | ||
22 | #define BTS_BUFFER_SIZE (1 << 13) | 21 | #define BTS_BUFFER_SIZE (1 << 13) |
23 | 22 | ||
24 | /* | ||
25 | * The tracer lock protects the below per-cpu tracer array. | ||
26 | * It needs to be held to: | ||
27 | * - start tracing on all cpus | ||
28 | * - stop tracing on all cpus | ||
29 | * - start tracing on a single hotplug cpu | ||
30 | * - stop tracing on a single hotplug cpu | ||
31 | * - read the trace from all cpus | ||
32 | * - read the trace from a single cpu | ||
33 | */ | ||
34 | static DEFINE_SPINLOCK(bts_tracer_lock); | ||
35 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 23 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); |
36 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); | 24 | static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer); |
37 | 25 | ||
38 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 26 | #define this_tracer per_cpu(tracer, smp_processor_id()) |
39 | #define this_buffer per_cpu(buffer, smp_processor_id()) | ||
40 | 27 | ||
41 | static int trace_hw_branches_enabled __read_mostly; | 28 | static int trace_hw_branches_enabled __read_mostly; |
42 | static int trace_hw_branches_suspended __read_mostly; | 29 | static int trace_hw_branches_suspended __read_mostly; |
43 | static struct trace_array *hw_branch_trace __read_mostly; | 30 | static struct trace_array *hw_branch_trace __read_mostly; |
44 | 31 | ||
45 | 32 | ||
46 | /* | 33 | static void bts_trace_init_cpu(int cpu) |
47 | * Initialize the tracer for the current cpu. | ||
48 | * The argument is ignored. | ||
49 | * | ||
50 | * pre: bts_tracer_lock must be locked. | ||
51 | */ | ||
52 | static void bts_trace_init_cpu(void *arg) | ||
53 | { | 34 | { |
54 | if (this_tracer) | 35 | per_cpu(tracer, cpu) = |
55 | ds_release_bts(this_tracer); | 36 | ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE, |
37 | NULL, (size_t)-1, BTS_KERNEL); | ||
56 | 38 | ||
57 | this_tracer = ds_request_bts(NULL, this_buffer, BTS_BUFFER_SIZE, | 39 | if (IS_ERR(per_cpu(tracer, cpu))) |
58 | NULL, (size_t)-1, BTS_KERNEL); | 40 | per_cpu(tracer, cpu) = NULL; |
59 | if (IS_ERR(this_tracer)) { | ||
60 | this_tracer = NULL; | ||
61 | return; | ||
62 | } | ||
63 | } | 41 | } |
64 | 42 | ||
65 | static int bts_trace_init(struct trace_array *tr) | 43 | static int bts_trace_init(struct trace_array *tr) |
66 | { | 44 | { |
67 | int cpu, avail; | 45 | int cpu; |
68 | |||
69 | spin_lock(&bts_tracer_lock); | ||
70 | 46 | ||
71 | hw_branch_trace = tr; | 47 | hw_branch_trace = tr; |
48 | trace_hw_branches_enabled = 0; | ||
72 | 49 | ||
73 | on_each_cpu(bts_trace_init_cpu, NULL, 1); | 50 | get_online_cpus(); |
74 | 51 | for_each_online_cpu(cpu) { | |
75 | /* Check on how many cpus we could enable tracing */ | 52 | bts_trace_init_cpu(cpu); |
76 | avail = 0; | ||
77 | for_each_online_cpu(cpu) | ||
78 | if (per_cpu(tracer, cpu)) | ||
79 | avail++; | ||
80 | 53 | ||
81 | trace_hw_branches_enabled = (avail ? 1 : 0); | 54 | if (likely(per_cpu(tracer, cpu))) |
55 | trace_hw_branches_enabled = 1; | ||
56 | } | ||
82 | trace_hw_branches_suspended = 0; | 57 | trace_hw_branches_suspended = 0; |
83 | 58 | put_online_cpus(); | |
84 | spin_unlock(&bts_tracer_lock); | ||
85 | |||
86 | 59 | ||
87 | /* If we could not enable tracing on a single cpu, we fail. */ | 60 | /* If we could not enable tracing on a single cpu, we fail. */ |
88 | return avail ? 0 : -EOPNOTSUPP; | 61 | return trace_hw_branches_enabled ? 0 : -EOPNOTSUPP; |
89 | } | ||
90 | |||
91 | /* | ||
92 | * Release the tracer for the current cpu. | ||
93 | * The argument is ignored. | ||
94 | * | ||
95 | * pre: bts_tracer_lock must be locked. | ||
96 | */ | ||
97 | static void bts_trace_release_cpu(void *arg) | ||
98 | { | ||
99 | if (this_tracer) { | ||
100 | ds_release_bts(this_tracer); | ||
101 | this_tracer = NULL; | ||
102 | } | ||
103 | } | 62 | } |
104 | 63 | ||
105 | static void bts_trace_reset(struct trace_array *tr) | 64 | static void bts_trace_reset(struct trace_array *tr) |
106 | { | 65 | { |
107 | spin_lock(&bts_tracer_lock); | 66 | int cpu; |
108 | 67 | ||
109 | on_each_cpu(bts_trace_release_cpu, NULL, 1); | 68 | get_online_cpus(); |
69 | for_each_online_cpu(cpu) { | ||
70 | if (likely(per_cpu(tracer, cpu))) { | ||
71 | ds_release_bts(per_cpu(tracer, cpu)); | ||
72 | per_cpu(tracer, cpu) = NULL; | ||
73 | } | ||
74 | } | ||
110 | trace_hw_branches_enabled = 0; | 75 | trace_hw_branches_enabled = 0; |
111 | trace_hw_branches_suspended = 0; | 76 | trace_hw_branches_suspended = 0; |
112 | 77 | put_online_cpus(); | |
113 | spin_unlock(&bts_tracer_lock); | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Resume tracing on the current cpu. | ||
118 | * The argument is ignored. | ||
119 | * | ||
120 | * pre: bts_tracer_lock must be locked. | ||
121 | */ | ||
122 | static void bts_trace_resume_cpu(void *arg) | ||
123 | { | ||
124 | if (this_tracer) | ||
125 | ds_resume_bts(this_tracer); | ||
126 | } | 78 | } |
127 | 79 | ||
128 | static void bts_trace_start(struct trace_array *tr) | 80 | static void bts_trace_start(struct trace_array *tr) |
129 | { | 81 | { |
130 | spin_lock(&bts_tracer_lock); | 82 | int cpu; |
131 | 83 | ||
132 | on_each_cpu(bts_trace_resume_cpu, NULL, 1); | 84 | get_online_cpus(); |
85 | for_each_online_cpu(cpu) | ||
86 | if (likely(per_cpu(tracer, cpu))) | ||
87 | ds_resume_bts(per_cpu(tracer, cpu)); | ||
133 | trace_hw_branches_suspended = 0; | 88 | trace_hw_branches_suspended = 0; |
134 | 89 | put_online_cpus(); | |
135 | spin_unlock(&bts_tracer_lock); | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * Suspend tracing on the current cpu. | ||
140 | * The argument is ignored. | ||
141 | * | ||
142 | * pre: bts_tracer_lock must be locked. | ||
143 | */ | ||
144 | static void bts_trace_suspend_cpu(void *arg) | ||
145 | { | ||
146 | if (this_tracer) | ||
147 | ds_suspend_bts(this_tracer); | ||
148 | } | 90 | } |
149 | 91 | ||
150 | static void bts_trace_stop(struct trace_array *tr) | 92 | static void bts_trace_stop(struct trace_array *tr) |
151 | { | 93 | { |
152 | spin_lock(&bts_tracer_lock); | 94 | int cpu; |
153 | 95 | ||
154 | on_each_cpu(bts_trace_suspend_cpu, NULL, 1); | 96 | get_online_cpus(); |
97 | for_each_online_cpu(cpu) | ||
98 | if (likely(per_cpu(tracer, cpu))) | ||
99 | ds_suspend_bts(per_cpu(tracer, cpu)); | ||
155 | trace_hw_branches_suspended = 1; | 100 | trace_hw_branches_suspended = 1; |
156 | 101 | put_online_cpus(); | |
157 | spin_unlock(&bts_tracer_lock); | ||
158 | } | 102 | } |
159 | 103 | ||
160 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | 104 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, |
161 | unsigned long action, void *hcpu) | 105 | unsigned long action, void *hcpu) |
162 | { | 106 | { |
163 | unsigned int cpu = (unsigned long)hcpu; | 107 | int cpu = (long)hcpu; |
164 | |||
165 | spin_lock(&bts_tracer_lock); | ||
166 | |||
167 | if (!trace_hw_branches_enabled) | ||
168 | goto out; | ||
169 | 108 | ||
170 | switch (action) { | 109 | switch (action) { |
171 | case CPU_ONLINE: | 110 | case CPU_ONLINE: |
172 | case CPU_DOWN_FAILED: | 111 | case CPU_DOWN_FAILED: |
173 | smp_call_function_single(cpu, bts_trace_init_cpu, NULL, 1); | 112 | /* The notification is sent with interrupts enabled. */ |
174 | 113 | if (trace_hw_branches_enabled) { | |
175 | if (trace_hw_branches_suspended) | 114 | bts_trace_init_cpu(cpu); |
176 | smp_call_function_single(cpu, bts_trace_suspend_cpu, | 115 | |
177 | NULL, 1); | 116 | if (trace_hw_branches_suspended && |
117 | likely(per_cpu(tracer, cpu))) | ||
118 | ds_suspend_bts(per_cpu(tracer, cpu)); | ||
119 | } | ||
178 | break; | 120 | break; |
121 | |||
179 | case CPU_DOWN_PREPARE: | 122 | case CPU_DOWN_PREPARE: |
180 | smp_call_function_single(cpu, bts_trace_release_cpu, NULL, 1); | 123 | /* The notification is sent with interrupts enabled. */ |
181 | break; | 124 | if (likely(per_cpu(tracer, cpu))) { |
125 | ds_release_bts(per_cpu(tracer, cpu)); | ||
126 | per_cpu(tracer, cpu) = NULL; | ||
127 | } | ||
182 | } | 128 | } |
183 | 129 | ||
184 | out: | ||
185 | spin_unlock(&bts_tracer_lock); | ||
186 | return NOTIFY_DONE; | 130 | return NOTIFY_DONE; |
187 | } | 131 | } |
188 | 132 | ||
@@ -274,7 +218,7 @@ static void trace_bts_at(const struct bts_trace *trace, void *at) | |||
274 | /* | 218 | /* |
275 | * Collect the trace on the current cpu and write it into the ftrace buffer. | 219 | * Collect the trace on the current cpu and write it into the ftrace buffer. |
276 | * | 220 | * |
277 | * pre: bts_tracer_lock must be locked | 221 | * pre: tracing must be suspended on the current cpu |
278 | */ | 222 | */ |
279 | static void trace_bts_cpu(void *arg) | 223 | static void trace_bts_cpu(void *arg) |
280 | { | 224 | { |
@@ -291,10 +235,9 @@ static void trace_bts_cpu(void *arg) | |||
291 | if (unlikely(!this_tracer)) | 235 | if (unlikely(!this_tracer)) |
292 | return; | 236 | return; |
293 | 237 | ||
294 | ds_suspend_bts(this_tracer); | ||
295 | trace = ds_read_bts(this_tracer); | 238 | trace = ds_read_bts(this_tracer); |
296 | if (!trace) | 239 | if (!trace) |
297 | goto out; | 240 | return; |
298 | 241 | ||
299 | for (at = trace->ds.top; (void *)at < trace->ds.end; | 242 | for (at = trace->ds.top; (void *)at < trace->ds.end; |
300 | at += trace->ds.size) | 243 | at += trace->ds.size) |
@@ -303,18 +246,27 @@ static void trace_bts_cpu(void *arg) | |||
303 | for (at = trace->ds.begin; (void *)at < trace->ds.top; | 246 | for (at = trace->ds.begin; (void *)at < trace->ds.top; |
304 | at += trace->ds.size) | 247 | at += trace->ds.size) |
305 | trace_bts_at(trace, at); | 248 | trace_bts_at(trace, at); |
306 | |||
307 | out: | ||
308 | ds_resume_bts(this_tracer); | ||
309 | } | 249 | } |
310 | 250 | ||
311 | static void trace_bts_prepare(struct trace_iterator *iter) | 251 | static void trace_bts_prepare(struct trace_iterator *iter) |
312 | { | 252 | { |
313 | spin_lock(&bts_tracer_lock); | 253 | int cpu; |
314 | 254 | ||
255 | get_online_cpus(); | ||
256 | for_each_online_cpu(cpu) | ||
257 | if (likely(per_cpu(tracer, cpu))) | ||
258 | ds_suspend_bts(per_cpu(tracer, cpu)); | ||
259 | /* | ||
260 | * We need to collect the trace on the respective cpu since ftrace | ||
261 | * implicitly adds the record for the current cpu. | ||
262 | * Once that is more flexible, we could collect the data from any cpu. | ||
263 | */ | ||
315 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | 264 | on_each_cpu(trace_bts_cpu, iter->tr, 1); |
316 | 265 | ||
317 | spin_unlock(&bts_tracer_lock); | 266 | for_each_online_cpu(cpu) |
267 | if (likely(per_cpu(tracer, cpu))) | ||
268 | ds_resume_bts(per_cpu(tracer, cpu)); | ||
269 | put_online_cpus(); | ||
318 | } | 270 | } |
319 | 271 | ||
320 | static void trace_bts_close(struct trace_iterator *iter) | 272 | static void trace_bts_close(struct trace_iterator *iter) |
@@ -324,12 +276,11 @@ static void trace_bts_close(struct trace_iterator *iter) | |||
324 | 276 | ||
325 | void trace_hw_branch_oops(void) | 277 | void trace_hw_branch_oops(void) |
326 | { | 278 | { |
327 | spin_lock(&bts_tracer_lock); | 279 | if (this_tracer) { |
328 | 280 | ds_suspend_bts_noirq(this_tracer); | |
329 | if (trace_hw_branches_enabled) | ||
330 | trace_bts_cpu(hw_branch_trace); | 281 | trace_bts_cpu(hw_branch_trace); |
331 | 282 | ds_resume_bts_noirq(this_tracer); | |
332 | spin_unlock(&bts_tracer_lock); | 283 | } |
333 | } | 284 | } |
334 | 285 | ||
335 | struct tracer bts_tracer __read_mostly = | 286 | struct tracer bts_tracer __read_mostly = |