aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_hw_branches.c
diff options
context:
space:
mode:
authorMarkus Metzger <markus.t.metzger@intel.com>2009-01-19 04:26:53 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-20 07:03:38 -0500
commit5c5317de147e9b38ea9c4cbdc2d15bed7648d036 (patch)
treed3a4ddbf0c3a19fde807e70d55b23f3204b2219f /kernel/trace/trace_hw_branches.c
parentb43f70933e7753a284733d5ae355f6778bd118ce (diff)
x86, ftrace, hw-branch-tracer: support hotplug cpus
Support hotplug cpus. Reported-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Markus Metzger <markus.t.metzger@intel.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_hw_branches.c')
-rw-r--r--kernel/trace/trace_hw_branches.c123
1 files changed, 107 insertions, 16 deletions
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c
index df21c1e72b95..398195397c75 100644
--- a/kernel/trace/trace_hw_branches.c
+++ b/kernel/trace/trace_hw_branches.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * h/w branch tracer for x86 based on bts 2 * h/w branch tracer for x86 based on bts
3 * 3 *
4 * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com> 4 * Copyright (C) 2008-2009 Intel Corporation.
5 * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009
5 * 6 *
6 */ 7 */
7 8
@@ -10,6 +11,9 @@
10#include <linux/debugfs.h> 11#include <linux/debugfs.h>
11#include <linux/ftrace.h> 12#include <linux/ftrace.h>
12#include <linux/kallsyms.h> 13#include <linux/kallsyms.h>
14#include <linux/mutex.h>
15#include <linux/cpu.h>
16#include <linux/smp.h>
13 17
14#include <asm/ds.h> 18#include <asm/ds.h>
15 19
@@ -19,13 +23,31 @@
19 23
20#define SIZEOF_BTS (1 << 13) 24#define SIZEOF_BTS (1 << 13)
21 25
26/* The tracer mutex protects the below per-cpu tracer array.
27 It needs to be held to:
28 - start tracing on all cpus
29 - stop tracing on all cpus
30 - start tracing on a single hotplug cpu
31 - stop tracing on a single hotplug cpu
32 - read the trace from all cpus
33 - read the trace from a single cpu
34*/
35static DEFINE_MUTEX(bts_tracer_mutex);
22static DEFINE_PER_CPU(struct bts_tracer *, tracer); 36static DEFINE_PER_CPU(struct bts_tracer *, tracer);
23static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); 37static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer);
24 38
25#define this_tracer per_cpu(tracer, smp_processor_id()) 39#define this_tracer per_cpu(tracer, smp_processor_id())
26#define this_buffer per_cpu(buffer, smp_processor_id()) 40#define this_buffer per_cpu(buffer, smp_processor_id())
27 41
42static int __read_mostly trace_hw_branches_enabled;
28 43
44
45/*
46 * Start tracing on the current cpu.
47 * The argument is ignored.
48 *
49 * pre: bts_tracer_mutex must be locked.
50 */
29static void bts_trace_start_cpu(void *arg) 51static void bts_trace_start_cpu(void *arg)
30{ 52{
31 if (this_tracer) 53 if (this_tracer)
@@ -43,14 +65,20 @@ static void bts_trace_start_cpu(void *arg)
43 65
44static void bts_trace_start(struct trace_array *tr) 66static void bts_trace_start(struct trace_array *tr)
45{ 67{
46 int cpu; 68 mutex_lock(&bts_tracer_mutex);
47 69
48 tracing_reset_online_cpus(tr); 70 on_each_cpu(bts_trace_start_cpu, NULL, 1);
71 trace_hw_branches_enabled = 1;
49 72
50 for_each_cpu(cpu, cpu_possible_mask) 73 mutex_unlock(&bts_tracer_mutex);
51 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
52} 74}
53 75
76/*
77 * Start tracing on the current cpu.
78 * The argument is ignored.
79 *
80 * pre: bts_tracer_mutex must be locked.
81 */
54static void bts_trace_stop_cpu(void *arg) 82static void bts_trace_stop_cpu(void *arg)
55{ 83{
56 if (this_tracer) { 84 if (this_tracer) {
@@ -61,20 +89,58 @@ static void bts_trace_stop_cpu(void *arg)
61 89
62static void bts_trace_stop(struct trace_array *tr) 90static void bts_trace_stop(struct trace_array *tr)
63{ 91{
64 int cpu; 92 mutex_lock(&bts_tracer_mutex);
93
94 trace_hw_branches_enabled = 0;
95 on_each_cpu(bts_trace_stop_cpu, NULL, 1);
65 96
66 for_each_cpu(cpu, cpu_possible_mask) 97 mutex_unlock(&bts_tracer_mutex);
98}
99
100static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
101 unsigned long action, void *hcpu)
102{
103 unsigned int cpu = (unsigned long)hcpu;
104
105 mutex_lock(&bts_tracer_mutex);
106
107 if (!trace_hw_branches_enabled)
108 goto out;
109
110 switch (action) {
111 case CPU_ONLINE:
112 case CPU_DOWN_FAILED:
113 smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
114 break;
115 case CPU_DOWN_PREPARE:
67 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); 116 smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
117 break;
118 }
119
120 out:
121 mutex_unlock(&bts_tracer_mutex);
122 return NOTIFY_DONE;
68} 123}
69 124
125static struct notifier_block bts_hotcpu_notifier __cpuinitdata = {
126 .notifier_call = bts_hotcpu_handler
127};
128
70static int bts_trace_init(struct trace_array *tr) 129static int bts_trace_init(struct trace_array *tr)
71{ 130{
131 register_hotcpu_notifier(&bts_hotcpu_notifier);
72 tracing_reset_online_cpus(tr); 132 tracing_reset_online_cpus(tr);
73 bts_trace_start(tr); 133 bts_trace_start(tr);
74 134
75 return 0; 135 return 0;
76} 136}
77 137
138static void bts_trace_reset(struct trace_array *tr)
139{
140 bts_trace_stop(tr);
141 unregister_hotcpu_notifier(&bts_hotcpu_notifier);
142}
143
78static void bts_trace_print_header(struct seq_file *m) 144static void bts_trace_print_header(struct seq_file *m)
79{ 145{
80 seq_puts(m, 146 seq_puts(m,
@@ -108,18 +174,34 @@ void trace_hw_branch(struct trace_array *tr, u64 from, u64 to)
108{ 174{
109 struct ring_buffer_event *event; 175 struct ring_buffer_event *event;
110 struct hw_branch_entry *entry; 176 struct hw_branch_entry *entry;
111 unsigned long irq; 177 unsigned long irq1, irq2;
178 int cpu;
112 179
113 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); 180 if (unlikely(!tr))
114 if (!event) 181 return;
182
183 if (unlikely(!trace_hw_branches_enabled))
115 return; 184 return;
185
186 local_irq_save(irq1);
187 cpu = raw_smp_processor_id();
188 if (atomic_inc_return(&tr->data[cpu]->disabled) != 1)
189 goto out;
190
191 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq2);
192 if (!event)
193 goto out;
116 entry = ring_buffer_event_data(event); 194 entry = ring_buffer_event_data(event);
117 tracing_generic_entry_update(&entry->ent, 0, from); 195 tracing_generic_entry_update(&entry->ent, 0, from);
118 entry->ent.type = TRACE_HW_BRANCHES; 196 entry->ent.type = TRACE_HW_BRANCHES;
119 entry->ent.cpu = smp_processor_id(); 197 entry->ent.cpu = cpu;
120 entry->from = from; 198 entry->from = from;
121 entry->to = to; 199 entry->to = to;
122 ring_buffer_unlock_commit(tr->buffer, event, irq); 200 ring_buffer_unlock_commit(tr->buffer, event, irq2);
201
202 out:
203 atomic_dec(&tr->data[cpu]->disabled);
204 local_irq_restore(irq1);
123} 205}
124 206
125static void trace_bts_at(struct trace_array *tr, 207static void trace_bts_at(struct trace_array *tr,
@@ -143,6 +225,11 @@ static void trace_bts_at(struct trace_array *tr,
143 } 225 }
144} 226}
145 227
228/*
229 * Collect the trace on the current cpu and write it into the ftrace buffer.
230 *
231 * pre: bts_tracer_mutex must be locked
232 */
146static void trace_bts_cpu(void *arg) 233static void trace_bts_cpu(void *arg)
147{ 234{
148 struct trace_array *tr = (struct trace_array *) arg; 235 struct trace_array *tr = (struct trace_array *) arg;
@@ -152,6 +239,9 @@ static void trace_bts_cpu(void *arg)
152 if (!this_tracer) 239 if (!this_tracer)
153 return; 240 return;
154 241
242 if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled)))
243 return;
244
155 ds_suspend_bts(this_tracer); 245 ds_suspend_bts(this_tracer);
156 trace = ds_read_bts(this_tracer); 246 trace = ds_read_bts(this_tracer);
157 if (!trace) 247 if (!trace)
@@ -171,17 +261,18 @@ out:
171 261
172static void trace_bts_prepare(struct trace_iterator *iter) 262static void trace_bts_prepare(struct trace_iterator *iter)
173{ 263{
174 int cpu; 264 mutex_lock(&bts_tracer_mutex);
265
266 on_each_cpu(trace_bts_cpu, iter->tr, 1);
175 267
176 for_each_cpu(cpu, cpu_possible_mask) 268 mutex_unlock(&bts_tracer_mutex);
177 smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
178} 269}
179 270
180struct tracer bts_tracer __read_mostly = 271struct tracer bts_tracer __read_mostly =
181{ 272{
182 .name = "hw-branch-tracer", 273 .name = "hw-branch-tracer",
183 .init = bts_trace_init, 274 .init = bts_trace_init,
184 .reset = bts_trace_stop, 275 .reset = bts_trace_reset,
185 .print_header = bts_trace_print_header, 276 .print_header = bts_trace_print_header,
186 .print_line = bts_trace_print_line, 277 .print_line = bts_trace_print_line,
187 .start = bts_trace_start, 278 .start = bts_trace_start,