aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-09-21 05:31:35 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-21 06:54:59 -0400
commitdfc65094d0313cc48969fa60bcf33d693aeb05a7 (patch)
tree6fe01475e45895107866227c18df362fe36b2303
parent65abc8653c282ded3dbdb9ec1227784140ba28cd (diff)
perf_counter: Rename 'event' to event_id/hw_event
In preparation to the renames, to avoid a namespace clash. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c48
-rw-r--r--kernel/perf_counter.c26
2 files changed, 37 insertions, 37 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index a6c8b27553cd..b1f115696c84 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -124,9 +124,9 @@ static const u64 p6_perfmon_event_map[] =
124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, 124 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
125}; 125};
126 126
127static u64 p6_pmu_event_map(int event) 127static u64 p6_pmu_event_map(int hw_event)
128{ 128{
129 return p6_perfmon_event_map[event]; 129 return p6_perfmon_event_map[hw_event];
130} 130}
131 131
132/* 132/*
@@ -137,7 +137,7 @@ static u64 p6_pmu_event_map(int event)
137 */ 137 */
138#define P6_NOP_COUNTER 0x0000002EULL 138#define P6_NOP_COUNTER 0x0000002EULL
139 139
140static u64 p6_pmu_raw_event(u64 event) 140static u64 p6_pmu_raw_event(u64 hw_event)
141{ 141{
142#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL 142#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL 143#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
@@ -152,7 +152,7 @@ static u64 p6_pmu_raw_event(u64 event)
152 P6_EVNTSEL_INV_MASK | \ 152 P6_EVNTSEL_INV_MASK | \
153 P6_EVNTSEL_COUNTER_MASK) 153 P6_EVNTSEL_COUNTER_MASK)
154 154
155 return event & P6_EVNTSEL_MASK; 155 return hw_event & P6_EVNTSEL_MASK;
156} 156}
157 157
158 158
@@ -170,16 +170,16 @@ static const u64 intel_perfmon_event_map[] =
170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, 170 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
171}; 171};
172 172
173static u64 intel_pmu_event_map(int event) 173static u64 intel_pmu_event_map(int hw_event)
174{ 174{
175 return intel_perfmon_event_map[event]; 175 return intel_perfmon_event_map[hw_event];
176} 176}
177 177
178/* 178/*
179 * Generalized hw caching related event table, filled 179 * Generalized hw caching related hw_event table, filled
180 * in on a per model basis. A value of 0 means 180 * in on a per model basis. A value of 0 means
181 * 'not supported', -1 means 'event makes no sense on 181 * 'not supported', -1 means 'hw_event makes no sense on
182 * this CPU', any other value means the raw event 182 * this CPU', any other value means the raw hw_event
183 * ID. 183 * ID.
184 */ 184 */
185 185
@@ -463,7 +463,7 @@ static const u64 atom_hw_cache_event_ids
463 }, 463 },
464}; 464};
465 465
466static u64 intel_pmu_raw_event(u64 event) 466static u64 intel_pmu_raw_event(u64 hw_event)
467{ 467{
468#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL 468#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL 469#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
@@ -478,7 +478,7 @@ static u64 intel_pmu_raw_event(u64 event)
478 CORE_EVNTSEL_INV_MASK | \ 478 CORE_EVNTSEL_INV_MASK | \
479 CORE_EVNTSEL_COUNTER_MASK) 479 CORE_EVNTSEL_COUNTER_MASK)
480 480
481 return event & CORE_EVNTSEL_MASK; 481 return hw_event & CORE_EVNTSEL_MASK;
482} 482}
483 483
484static const u64 amd_hw_cache_event_ids 484static const u64 amd_hw_cache_event_ids
@@ -585,12 +585,12 @@ static const u64 amd_perfmon_event_map[] =
585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, 585 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
586}; 586};
587 587
588static u64 amd_pmu_event_map(int event) 588static u64 amd_pmu_event_map(int hw_event)
589{ 589{
590 return amd_perfmon_event_map[event]; 590 return amd_perfmon_event_map[hw_event];
591} 591}
592 592
593static u64 amd_pmu_raw_event(u64 event) 593static u64 amd_pmu_raw_event(u64 hw_event)
594{ 594{
595#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL 595#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL 596#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
@@ -605,7 +605,7 @@ static u64 amd_pmu_raw_event(u64 event)
605 K7_EVNTSEL_INV_MASK | \ 605 K7_EVNTSEL_INV_MASK | \
606 K7_EVNTSEL_COUNTER_MASK) 606 K7_EVNTSEL_COUNTER_MASK)
607 607
608 return event & K7_EVNTSEL_MASK; 608 return hw_event & K7_EVNTSEL_MASK;
609} 609}
610 610
611/* 611/*
@@ -956,7 +956,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
956 } 956 }
957 957
958 /* 958 /*
959 * Raw event type provide the config in the event structure 959 * Raw hw_event type provide the config in the hw_event structure
960 */ 960 */
961 if (attr->type == PERF_TYPE_RAW) { 961 if (attr->type == PERF_TYPE_RAW) {
962 hwc->config |= x86_pmu.raw_event(attr->config); 962 hwc->config |= x86_pmu.raw_event(attr->config);
@@ -1245,7 +1245,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1245 ret = 1; 1245 ret = 1;
1246 } 1246 }
1247 /* 1247 /*
1248 * Quirk: certain CPUs dont like it if just 1 event is left: 1248 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1249 */ 1249 */
1250 if (unlikely(left < 2)) 1250 if (unlikely(left < 2))
1251 left = 2; 1251 left = 2;
@@ -1337,11 +1337,11 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1337static int 1337static int
1338fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) 1338fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
1339{ 1339{
1340 unsigned int event; 1340 unsigned int hw_event;
1341 1341
1342 event = hwc->config & ARCH_PERFMON_EVENT_MASK; 1342 hw_event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1343 1343
1344 if (unlikely((event == 1344 if (unlikely((hw_event ==
1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && 1345 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
1346 (hwc->sample_period == 1))) 1346 (hwc->sample_period == 1)))
1347 return X86_PMC_IDX_FIXED_BTS; 1347 return X86_PMC_IDX_FIXED_BTS;
@@ -1349,11 +1349,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
1349 if (!x86_pmu.num_counters_fixed) 1349 if (!x86_pmu.num_counters_fixed)
1350 return -1; 1350 return -1;
1351 1351
1352 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) 1352 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS; 1353 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
1354 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) 1354 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
1355 return X86_PMC_IDX_FIXED_CPU_CYCLES; 1355 return X86_PMC_IDX_FIXED_CPU_CYCLES;
1356 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) 1356 if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
1357 return X86_PMC_IDX_FIXED_BUS_CYCLES; 1357 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1358 1358
1359 return -1; 1359 return -1;
@@ -1970,7 +1970,7 @@ static int intel_pmu_init(void)
1970 1970
1971 /* 1971 /*
1972 * Check whether the Architectural PerfMon supports 1972 * Check whether the Architectural PerfMon supports
1973 * Branch Misses Retired Event or not. 1973 * Branch Misses Retired hw_event or not.
1974 */ 1974 */
1975 cpuid(10, &eax.full, &ebx, &unused, &edx.full); 1975 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
1976 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 1976 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 13ad73aed4ca..62de0db8092b 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -3044,22 +3044,22 @@ perf_counter_read_event(struct perf_counter *counter,
3044 struct task_struct *task) 3044 struct task_struct *task)
3045{ 3045{
3046 struct perf_output_handle handle; 3046 struct perf_output_handle handle;
3047 struct perf_read_event event = { 3047 struct perf_read_event read_event = {
3048 .header = { 3048 .header = {
3049 .type = PERF_EVENT_READ, 3049 .type = PERF_EVENT_READ,
3050 .misc = 0, 3050 .misc = 0,
3051 .size = sizeof(event) + perf_counter_read_size(counter), 3051 .size = sizeof(read_event) + perf_counter_read_size(counter),
3052 }, 3052 },
3053 .pid = perf_counter_pid(counter, task), 3053 .pid = perf_counter_pid(counter, task),
3054 .tid = perf_counter_tid(counter, task), 3054 .tid = perf_counter_tid(counter, task),
3055 }; 3055 };
3056 int ret; 3056 int ret;
3057 3057
3058 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3058 ret = perf_output_begin(&handle, counter, read_event.header.size, 0, 0);
3059 if (ret) 3059 if (ret)
3060 return; 3060 return;
3061 3061
3062 perf_output_put(&handle, event); 3062 perf_output_put(&handle, read_event);
3063 perf_output_read(&handle, counter); 3063 perf_output_read(&handle, counter);
3064 3064
3065 perf_output_end(&handle); 3065 perf_output_end(&handle);
@@ -3698,14 +3698,14 @@ static int perf_swcounter_is_counting(struct perf_counter *counter)
3698 3698
3699static int perf_swcounter_match(struct perf_counter *counter, 3699static int perf_swcounter_match(struct perf_counter *counter,
3700 enum perf_type_id type, 3700 enum perf_type_id type,
3701 u32 event, struct pt_regs *regs) 3701 u32 event_id, struct pt_regs *regs)
3702{ 3702{
3703 if (!perf_swcounter_is_counting(counter)) 3703 if (!perf_swcounter_is_counting(counter))
3704 return 0; 3704 return 0;
3705 3705
3706 if (counter->attr.type != type) 3706 if (counter->attr.type != type)
3707 return 0; 3707 return 0;
3708 if (counter->attr.config != event) 3708 if (counter->attr.config != event_id)
3709 return 0; 3709 return 0;
3710 3710
3711 if (regs) { 3711 if (regs) {
@@ -3721,7 +3721,7 @@ static int perf_swcounter_match(struct perf_counter *counter,
3721 3721
3722static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3722static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3723 enum perf_type_id type, 3723 enum perf_type_id type,
3724 u32 event, u64 nr, int nmi, 3724 u32 event_id, u64 nr, int nmi,
3725 struct perf_sample_data *data, 3725 struct perf_sample_data *data,
3726 struct pt_regs *regs) 3726 struct pt_regs *regs)
3727{ 3727{
@@ -3732,7 +3732,7 @@ static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3732 3732
3733 rcu_read_lock(); 3733 rcu_read_lock();
3734 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3734 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3735 if (perf_swcounter_match(counter, type, event, regs)) 3735 if (perf_swcounter_match(counter, type, event_id, regs))
3736 perf_swcounter_add(counter, nr, nmi, data, regs); 3736 perf_swcounter_add(counter, nr, nmi, data, regs);
3737 } 3737 }
3738 rcu_read_unlock(); 3738 rcu_read_unlock();
@@ -4036,17 +4036,17 @@ atomic_t perf_swcounter_enabled[PERF_COUNT_SW_MAX];
4036 4036
4037static void sw_perf_counter_destroy(struct perf_counter *counter) 4037static void sw_perf_counter_destroy(struct perf_counter *counter)
4038{ 4038{
4039 u64 event = counter->attr.config; 4039 u64 event_id = counter->attr.config;
4040 4040
4041 WARN_ON(counter->parent); 4041 WARN_ON(counter->parent);
4042 4042
4043 atomic_dec(&perf_swcounter_enabled[event]); 4043 atomic_dec(&perf_swcounter_enabled[event_id]);
4044} 4044}
4045 4045
4046static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) 4046static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4047{ 4047{
4048 const struct pmu *pmu = NULL; 4048 const struct pmu *pmu = NULL;
4049 u64 event = counter->attr.config; 4049 u64 event_id = counter->attr.config;
4050 4050
4051 /* 4051 /*
4052 * Software counters (currently) can't in general distinguish 4052 * Software counters (currently) can't in general distinguish
@@ -4055,7 +4055,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4055 * to be kernel events, and page faults are never hypervisor 4055 * to be kernel events, and page faults are never hypervisor
4056 * events. 4056 * events.
4057 */ 4057 */
4058 switch (event) { 4058 switch (event_id) {
4059 case PERF_COUNT_SW_CPU_CLOCK: 4059 case PERF_COUNT_SW_CPU_CLOCK:
4060 pmu = &perf_ops_cpu_clock; 4060 pmu = &perf_ops_cpu_clock;
4061 4061
@@ -4077,7 +4077,7 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
4077 case PERF_COUNT_SW_CONTEXT_SWITCHES: 4077 case PERF_COUNT_SW_CONTEXT_SWITCHES:
4078 case PERF_COUNT_SW_CPU_MIGRATIONS: 4078 case PERF_COUNT_SW_CPU_MIGRATIONS:
4079 if (!counter->parent) { 4079 if (!counter->parent) {
4080 atomic_inc(&perf_swcounter_enabled[event]); 4080 atomic_inc(&perf_swcounter_enabled[event_id]);
4081 counter->destroy = sw_perf_counter_destroy; 4081 counter->destroy = sw_perf_counter_destroy;
4082 } 4082 }
4083 pmu = &perf_ops_generic; 4083 pmu = &perf_ops_generic;