diff options
author | Steven Rostedt <srostedt@redhat.com> | 2012-08-06 16:24:11 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:34:43 -0400 |
commit | a7603ff4b5f7e26e67af82a4c3d05eeeb8d7b160 (patch) | |
tree | 2d348aeb190cf6c7ba43f97419b291251d6e04c5 /kernel/trace/trace_sched_wakeup.c | |
parent | ccb469a198cffac94a7eea0b69f715f06e2ddf15 (diff) |
tracing: Replace the static global per_cpu arrays with allocated per_cpu
The global and max-tr currently use static per_cpu arrays for the CPU data
descriptors. But in order to get new allocated trace_arrays, they need to
be allocated per_cpu arrays. Instead of using the static arrays, switch
the global and max-tr to use allocated data.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_sched_wakeup.c')
-rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 5255a8477247..f9ceb75a95b7 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -89,7 +89,7 @@ func_prolog_preempt_disable(struct trace_array *tr, | |||
89 | if (cpu != wakeup_current_cpu) | 89 | if (cpu != wakeup_current_cpu) |
90 | goto out_enable; | 90 | goto out_enable; |
91 | 91 | ||
92 | *data = tr->data[cpu]; | 92 | *data = per_cpu_ptr(tr->data, cpu); |
93 | disabled = atomic_inc_return(&(*data)->disabled); | 93 | disabled = atomic_inc_return(&(*data)->disabled); |
94 | if (unlikely(disabled != 1)) | 94 | if (unlikely(disabled != 1)) |
95 | goto out; | 95 | goto out; |
@@ -353,7 +353,7 @@ probe_wakeup_sched_switch(void *ignore, | |||
353 | 353 | ||
354 | /* disable local data, not wakeup_cpu data */ | 354 | /* disable local data, not wakeup_cpu data */ |
355 | cpu = raw_smp_processor_id(); | 355 | cpu = raw_smp_processor_id(); |
356 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); | 356 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); |
357 | if (likely(disabled != 1)) | 357 | if (likely(disabled != 1)) |
358 | goto out; | 358 | goto out; |
359 | 359 | ||
@@ -365,7 +365,7 @@ probe_wakeup_sched_switch(void *ignore, | |||
365 | goto out_unlock; | 365 | goto out_unlock; |
366 | 366 | ||
367 | /* The task we are waiting for is waking up */ | 367 | /* The task we are waiting for is waking up */ |
368 | data = wakeup_trace->data[wakeup_cpu]; | 368 | data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu); |
369 | 369 | ||
370 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); | 370 | __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); |
371 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); | 371 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); |
@@ -387,7 +387,7 @@ out_unlock: | |||
387 | arch_spin_unlock(&wakeup_lock); | 387 | arch_spin_unlock(&wakeup_lock); |
388 | local_irq_restore(flags); | 388 | local_irq_restore(flags); |
389 | out: | 389 | out: |
390 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 390 | atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); |
391 | } | 391 | } |
392 | 392 | ||
393 | static void __wakeup_reset(struct trace_array *tr) | 393 | static void __wakeup_reset(struct trace_array *tr) |
@@ -435,7 +435,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
435 | return; | 435 | return; |
436 | 436 | ||
437 | pc = preempt_count(); | 437 | pc = preempt_count(); |
438 | disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); | 438 | disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); |
439 | if (unlikely(disabled != 1)) | 439 | if (unlikely(disabled != 1)) |
440 | goto out; | 440 | goto out; |
441 | 441 | ||
@@ -458,7 +458,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
458 | 458 | ||
459 | local_save_flags(flags); | 459 | local_save_flags(flags); |
460 | 460 | ||
461 | data = wakeup_trace->data[wakeup_cpu]; | 461 | data = per_cpu_ptr(wakeup_trace->data, wakeup_cpu); |
462 | data->preempt_timestamp = ftrace_now(cpu); | 462 | data->preempt_timestamp = ftrace_now(cpu); |
463 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); | 463 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
464 | 464 | ||
@@ -472,7 +472,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) | |||
472 | out_locked: | 472 | out_locked: |
473 | arch_spin_unlock(&wakeup_lock); | 473 | arch_spin_unlock(&wakeup_lock); |
474 | out: | 474 | out: |
475 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 475 | atomic_dec(&per_cpu_ptr(wakeup_trace->data, cpu)->disabled); |
476 | } | 476 | } |
477 | 477 | ||
478 | static void start_wakeup_tracer(struct trace_array *tr) | 478 | static void start_wakeup_tracer(struct trace_array *tr) |