aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_sysprof.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_sysprof.c')
-rw-r--r--kernel/trace/trace_sysprof.c45
1 files changed, 8 insertions, 37 deletions
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index 9587d3bcba55..eaca5ad803ff 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -196,27 +196,19 @@ static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer)
196 return HRTIMER_RESTART; 196 return HRTIMER_RESTART;
197} 197}
198 198
199static void start_stack_timer(int cpu) 199static void start_stack_timer(void *unused)
200{ 200{
201 struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); 201 struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer);
202 202
203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 203 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
204 hrtimer->function = stack_trace_timer_fn; 204 hrtimer->function = stack_trace_timer_fn;
205 hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
206 205
207 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); 206 hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL);
208} 207}
209 208
210static void start_stack_timers(void) 209static void start_stack_timers(void)
211{ 210{
212 cpumask_t saved_mask = current->cpus_allowed; 211 on_each_cpu(start_stack_timer, NULL, 1);
213 int cpu;
214
215 for_each_online_cpu(cpu) {
216 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
217 start_stack_timer(cpu);
218 }
219 set_cpus_allowed_ptr(current, &saved_mask);
220} 212}
221 213
222static void stop_stack_timer(int cpu) 214static void stop_stack_timer(int cpu)
@@ -234,20 +226,10 @@ static void stop_stack_timers(void)
234 stop_stack_timer(cpu); 226 stop_stack_timer(cpu);
235} 227}
236 228
237static void stack_reset(struct trace_array *tr)
238{
239 int cpu;
240
241 tr->time_start = ftrace_now(tr->cpu);
242
243 for_each_online_cpu(cpu)
244 tracing_reset(tr, cpu);
245}
246
247static void start_stack_trace(struct trace_array *tr) 229static void start_stack_trace(struct trace_array *tr)
248{ 230{
249 mutex_lock(&sample_timer_lock); 231 mutex_lock(&sample_timer_lock);
250 stack_reset(tr); 232 tracing_reset_online_cpus(tr);
251 start_stack_timers(); 233 start_stack_timers();
252 tracer_enabled = 1; 234 tracer_enabled = 1;
253 mutex_unlock(&sample_timer_lock); 235 mutex_unlock(&sample_timer_lock);
@@ -261,27 +243,17 @@ static void stop_stack_trace(struct trace_array *tr)
261 mutex_unlock(&sample_timer_lock); 243 mutex_unlock(&sample_timer_lock);
262} 244}
263 245
264static void stack_trace_init(struct trace_array *tr) 246static int stack_trace_init(struct trace_array *tr)
265{ 247{
266 sysprof_trace = tr; 248 sysprof_trace = tr;
267 249
268 if (tr->ctrl) 250 start_stack_trace(tr);
269 start_stack_trace(tr); 251 return 0;
270} 252}
271 253
272static void stack_trace_reset(struct trace_array *tr) 254static void stack_trace_reset(struct trace_array *tr)
273{ 255{
274 if (tr->ctrl) 256 stop_stack_trace(tr);
275 stop_stack_trace(tr);
276}
277
278static void stack_trace_ctrl_update(struct trace_array *tr)
279{
280 /* When starting a new trace, reset the buffers */
281 if (tr->ctrl)
282 start_stack_trace(tr);
283 else
284 stop_stack_trace(tr);
285} 257}
286 258
287static struct tracer stack_trace __read_mostly = 259static struct tracer stack_trace __read_mostly =
@@ -289,7 +261,6 @@ static struct tracer stack_trace __read_mostly =
289 .name = "sysprof", 261 .name = "sysprof",
290 .init = stack_trace_init, 262 .init = stack_trace_init,
291 .reset = stack_trace_reset, 263 .reset = stack_trace_reset,
292 .ctrl_update = stack_trace_ctrl_update,
293#ifdef CONFIG_FTRACE_SELFTEST 264#ifdef CONFIG_FTRACE_SELFTEST
294 .selftest = trace_selftest_startup_sysprof, 265 .selftest = trace_selftest_startup_sysprof,
295#endif 266#endif