diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-09-07 02:19:51 -0400 |
| commit | a1922ed661ab2c1637d0b10cde933bd9cd33d965 (patch) | |
| tree | 0f1777542b385ebefd30b3586d830fd8ed6fda5b /kernel/trace/trace_power.c | |
| parent | 75e33751ca8bbb72dd6f1a74d2810ddc8cbe4bdf (diff) | |
| parent | d28daf923ac5e4a0d7cecebae56f3e339189366b (diff) | |
Merge branch 'tracing/core' into tracing/hw-breakpoints
Conflicts:
arch/Kconfig
kernel/trace/trace.h
Merge reason: resolve the conflicts, plus adopt to the new
ring-buffer APIs.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_power.c')
| -rw-r--r-- | kernel/trace/trace_power.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 8a30d9874cd4..fe1a00f1445a 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
| @@ -38,6 +38,7 @@ static void probe_power_end(struct power_trace *it) | |||
| 38 | { | 38 | { |
| 39 | struct ftrace_event_call *call = &event_power; | 39 | struct ftrace_event_call *call = &event_power; |
| 40 | struct ring_buffer_event *event; | 40 | struct ring_buffer_event *event; |
| 41 | struct ring_buffer *buffer; | ||
| 41 | struct trace_power *entry; | 42 | struct trace_power *entry; |
| 42 | struct trace_array_cpu *data; | 43 | struct trace_array_cpu *data; |
| 43 | struct trace_array *tr = power_trace; | 44 | struct trace_array *tr = power_trace; |
| @@ -45,18 +46,20 @@ static void probe_power_end(struct power_trace *it) | |||
| 45 | if (!trace_power_enabled) | 46 | if (!trace_power_enabled) |
| 46 | return; | 47 | return; |
| 47 | 48 | ||
| 49 | buffer = tr->buffer; | ||
| 50 | |||
| 48 | preempt_disable(); | 51 | preempt_disable(); |
| 49 | it->end = ktime_get(); | 52 | it->end = ktime_get(); |
| 50 | data = tr->data[smp_processor_id()]; | 53 | data = tr->data[smp_processor_id()]; |
| 51 | 54 | ||
| 52 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | 55 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, |
| 53 | sizeof(*entry), 0, 0); | 56 | sizeof(*entry), 0, 0); |
| 54 | if (!event) | 57 | if (!event) |
| 55 | goto out; | 58 | goto out; |
| 56 | entry = ring_buffer_event_data(event); | 59 | entry = ring_buffer_event_data(event); |
| 57 | entry->state_data = *it; | 60 | entry->state_data = *it; |
| 58 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 61 | if (!filter_check_discard(call, entry, buffer, event)) |
| 59 | trace_buffer_unlock_commit(tr, event, 0, 0); | 62 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 60 | out: | 63 | out: |
| 61 | preempt_enable(); | 64 | preempt_enable(); |
| 62 | } | 65 | } |
| @@ -66,6 +69,7 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
| 66 | { | 69 | { |
| 67 | struct ftrace_event_call *call = &event_power; | 70 | struct ftrace_event_call *call = &event_power; |
| 68 | struct ring_buffer_event *event; | 71 | struct ring_buffer_event *event; |
| 72 | struct ring_buffer *buffer; | ||
| 69 | struct trace_power *entry; | 73 | struct trace_power *entry; |
| 70 | struct trace_array_cpu *data; | 74 | struct trace_array_cpu *data; |
| 71 | struct trace_array *tr = power_trace; | 75 | struct trace_array *tr = power_trace; |
| @@ -73,6 +77,8 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
| 73 | if (!trace_power_enabled) | 77 | if (!trace_power_enabled) |
| 74 | return; | 78 | return; |
| 75 | 79 | ||
| 80 | buffer = tr->buffer; | ||
| 81 | |||
| 76 | memset(it, 0, sizeof(struct power_trace)); | 82 | memset(it, 0, sizeof(struct power_trace)); |
| 77 | it->state = level; | 83 | it->state = level; |
| 78 | it->type = type; | 84 | it->type = type; |
| @@ -81,14 +87,14 @@ static void probe_power_mark(struct power_trace *it, unsigned int type, | |||
| 81 | it->end = it->stamp; | 87 | it->end = it->stamp; |
| 82 | data = tr->data[smp_processor_id()]; | 88 | data = tr->data[smp_processor_id()]; |
| 83 | 89 | ||
| 84 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | 90 | event = trace_buffer_lock_reserve(buffer, TRACE_POWER, |
| 85 | sizeof(*entry), 0, 0); | 91 | sizeof(*entry), 0, 0); |
| 86 | if (!event) | 92 | if (!event) |
| 87 | goto out; | 93 | goto out; |
| 88 | entry = ring_buffer_event_data(event); | 94 | entry = ring_buffer_event_data(event); |
| 89 | entry->state_data = *it; | 95 | entry->state_data = *it; |
| 90 | if (!filter_check_discard(call, entry, tr->buffer, event)) | 96 | if (!filter_check_discard(call, entry, buffer, event)) |
| 91 | trace_buffer_unlock_commit(tr, event, 0, 0); | 97 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 92 | out: | 98 | out: |
| 93 | preempt_enable(); | 99 | preempt_enable(); |
| 94 | } | 100 | } |
| @@ -144,14 +150,12 @@ static void power_trace_reset(struct trace_array *tr) | |||
| 144 | 150 | ||
| 145 | static int power_trace_init(struct trace_array *tr) | 151 | static int power_trace_init(struct trace_array *tr) |
| 146 | { | 152 | { |
| 147 | int cpu; | ||
| 148 | power_trace = tr; | 153 | power_trace = tr; |
| 149 | 154 | ||
| 150 | trace_power_enabled = 1; | 155 | trace_power_enabled = 1; |
| 151 | tracing_power_register(); | 156 | tracing_power_register(); |
| 152 | 157 | ||
| 153 | for_each_cpu(cpu, cpu_possible_mask) | 158 | tracing_reset_online_cpus(tr); |
| 154 | tracing_reset(tr, cpu); | ||
| 155 | return 0; | 159 | return 0; |
| 156 | } | 160 | } |
| 157 | 161 | ||
