diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-09-21 05:31:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-21 06:54:59 -0400 |
commit | dfc65094d0313cc48969fa60bcf33d693aeb05a7 (patch) | |
tree | 6fe01475e45895107866227c18df362fe36b2303 /kernel/perf_counter.c | |
parent | 65abc8653c282ded3dbdb9ec1227784140ba28cd (diff) |
perf_counter: Rename 'event' to event_id/hw_event
In preparation to the renames, to avoid a namespace clash.
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 13ad73aed4ca..62de0db8092b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -3044,22 +3044,22 @@ perf_counter_read_event(struct perf_counter *counter, | |||
3044 | struct task_struct *task) | 3044 | struct task_struct *task) |
3045 | { | 3045 | { |
3046 | struct perf_output_handle handle; | 3046 | struct perf_output_handle handle; |
3047 | struct perf_read_event event = { | 3047 | struct perf_read_event read_event = { |
3048 | .header = { | 3048 | .header = { |
3049 | .type = PERF_EVENT_READ, | 3049 | .type = PERF_EVENT_READ, |
3050 | .misc = 0, | 3050 | .misc = 0, |
3051 | .size = sizeof(event) + perf_counter_read_size(counter), | 3051 | .size = sizeof(read_event) + perf_counter_read_size(counter), |
3052 | }, | 3052 | }, |
3053 | .pid = perf_counter_pid(counter, task), | 3053 | .pid = perf_counter_pid(counter, task), |
3054 | .tid = perf_counter_tid(counter, task), | 3054 | .tid = perf_counter_tid(counter, task), |
3055 | }; | 3055 | }; |
3056 | int ret; | 3056 | int ret; |
3057 | 3057 | ||
3058 | ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); | 3058 | ret = perf_output_begin(&handle, counter, read_event.header.size, 0, 0); |
3059 | if (ret) | 3059 | if (ret) |
3060 | return; | 3060 | return; |
3061 | 3061 | ||
3062 | perf_output_put(&handle, event); | 3062 | perf_output_put(&handle, read_event); |
3063 | perf_output_read(&handle, counter); | 3063 | perf_output_read(&handle, counter); |
3064 | 3064 | ||
3065 | perf_output_end(&handle); | 3065 | perf_output_end(&handle); |
@@ -3698,14 +3698,14 @@ static int perf_swcounter_is_counting(struct perf_counter *counter) | |||
3698 | 3698 | ||
3699 | static int perf_swcounter_match(struct perf_counter *counter, | 3699 | static int perf_swcounter_match(struct perf_counter *counter, |
3700 | enum perf_type_id type, | 3700 | enum perf_type_id type, |
3701 | u32 event, struct pt_regs *regs) | 3701 | u32 event_id, struct pt_regs *regs) |
3702 | { | 3702 | { |
3703 | if (!perf_swcounter_is_counting(counter)) | 3703 | if (!perf_swcounter_is_counting(counter)) |
3704 | return 0; | 3704 | return 0; |
3705 | 3705 | ||
3706 | if (counter->attr.type != type) | 3706 | if (counter->attr.type != type) |
3707 | return 0; | 3707 | return 0; |
3708 | if (counter->attr.config != event) | 3708 | if (counter->attr.config != event_id) |
3709 | return 0; | 3709 | return 0; |
3710 | 3710 | ||
3711 | if (regs) { | 3711 | if (regs) { |
@@ -3721,7 +3721,7 @@ static int perf_swcounter_match(struct perf_counter *counter, | |||
3721 | 3721 | ||
3722 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | 3722 | static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, |
3723 | enum perf_type_id type, | 3723 | enum perf_type_id type, |
3724 | u32 event, u64 nr, int nmi, | 3724 | u32 event_id, u64 nr, int nmi, |
3725 | struct perf_sample_data *data, | 3725 | struct perf_sample_data *data, |
3726 | struct pt_regs *regs) | 3726 | struct pt_regs *regs) |
3727 | { | 3727 | { |
@@ -3732,7 +3732,7 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, | |||
3732 | 3732 | ||
3733 | rcu_read_lock(); | 3733 | rcu_read_lock(); |
3734 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { | 3734 | list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { |
3735 | if (perf_swcounter_match(counter, type, event, regs)) | 3735 | if (perf_swcounter_match(counter, type, event_id, regs)) |
3736 | perf_swcounter_add(counter, nr, nmi, data, regs); | 3736 | perf_swcounter_add(counter, nr, nmi, data, regs); |
3737 | } | 3737 | } |
3738 | rcu_read_unlock(); | 3738 | rcu_read_unlock(); |
@@ -4036,17 +4036,17 @@ atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX]; | |||
4036 | 4036 | ||
4037 | static void sw_perf_counter_destroy(struct perf_counter *counter) | 4037 | static void sw_perf_counter_destroy(struct perf_counter *counter) |
4038 | { | 4038 | { |
4039 | u64 event = counter->attr.config; | 4039 | u64 event_id = counter->attr.config; |
4040 | 4040 | ||
4041 | WARN_ON(counter->parent); | 4041 | WARN_ON(counter->parent); |
4042 | 4042 | ||
4043 | atomic_dec(&perf_swcounter_enabled[event]); | 4043 | atomic_dec(&perf_swcounter_enabled[event_id]); |
4044 | } | 4044 | } |
4045 | 4045 | ||
4046 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | 4046 | static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) |
4047 | { | 4047 | { |
4048 | const struct pmu *pmu = NULL; | 4048 | const struct pmu *pmu = NULL; |
4049 | u64 event = counter->attr.config; | 4049 | u64 event_id = counter->attr.config; |
4050 | 4050 | ||
4051 | /* | 4051 | /* |
4052 | * Software counters (currently) can't in general distinguish | 4052 | * Software counters (currently) can't in general distinguish |
@@ -4055,7 +4055,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
4055 | * to be kernel events, and page faults are never hypervisor | 4055 | * to be kernel events, and page faults are never hypervisor |
4056 | * events. | 4056 | * events. |
4057 | */ | 4057 | */ |
4058 | switch (event) { | 4058 | switch (event_id) { |
4059 | case PERF_COUNT_SW_CPU_CLOCK: | 4059 | case PERF_COUNT_SW_CPU_CLOCK: |
4060 | pmu = &perf_ops_cpu_clock; | 4060 | pmu = &perf_ops_cpu_clock; |
4061 | 4061 | ||
@@ -4077,7 +4077,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
4077 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 4077 | case PERF_COUNT_SW_CONTEXT_SWITCHES: |
4078 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 4078 | case PERF_COUNT_SW_CPU_MIGRATIONS: |
4079 | if (!counter->parent) { | 4079 | if (!counter->parent) { |
4080 | atomic_inc(&perf_swcounter_enabled[event]); | 4080 | atomic_inc(&perf_swcounter_enabled[event_id]); |
4081 | counter->destroy = sw_perf_counter_destroy; | 4081 | counter->destroy = sw_perf_counter_destroy; |
4082 | } | 4082 | } |
4083 | pmu = &perf_ops_generic; | 4083 | pmu = &perf_ops_generic; |