aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-01-22 09:25:59 -0500
committerIngo Molnar <mingo@elte.hu>2010-01-29 03:01:37 -0500
commitc91e0f5da81c6f3a611a1bd6d0cca6717c90fdab (patch)
tree96b681b98a33850ad436d9729d789f17f2cfd7a2 /arch/x86
parent81269a085669b5130058a0275aa7ba9f94abd1fa (diff)
perf_event: x86: Clean up some of the u64/long bitmask casting
We need this to be u64 for direct assigment, but the bitmask functions all work on unsigned long, leading to cast heaven, solve this by using a union. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> LKML-Reference: <20100122155535.595961269@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c47
1 files changed, 23 insertions, 24 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 7e181a5097ea..921bbf732e77 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -69,10 +69,11 @@ struct debug_store {
69 u64 pebs_event_reset[MAX_PEBS_EVENTS]; 69 u64 pebs_event_reset[MAX_PEBS_EVENTS];
70}; 70};
71 71
72#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
73
74struct event_constraint { 72struct event_constraint {
75 u64 idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)]; 73 union {
74 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
75 u64 idxmsk64[1];
76 };
76 int code; 77 int code;
77 int cmask; 78 int cmask;
78}; 79};
@@ -90,13 +91,14 @@ struct cpu_hw_events {
90 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 91 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
91}; 92};
92 93
93#define EVENT_CONSTRAINT(c, n, m) { \ 94#define EVENT_CONSTRAINT(c, n, m) { \
94 .code = (c), \ 95 { .idxmsk64[0] = (n) }, \
95 .cmask = (m), \ 96 .code = (c), \
96 .idxmsk[0] = (n) } 97 .cmask = (m), \
98}
97 99
98#define EVENT_CONSTRAINT_END \ 100#define EVENT_CONSTRAINT_END \
99 { .code = 0, .cmask = 0, .idxmsk[0] = 0 } 101 EVENT_CONSTRAINT(0, 0, 0)
100 102
101#define for_each_event_constraint(e, c) \ 103#define for_each_event_constraint(e, c) \
102 for ((e) = (c); (e)->cmask; (e)++) 104 for ((e) = (c); (e)->cmask; (e)++)
@@ -126,8 +128,11 @@ struct x86_pmu {
126 u64 intel_ctrl; 128 u64 intel_ctrl;
127 void (*enable_bts)(u64 config); 129 void (*enable_bts)(u64 config);
128 void (*disable_bts)(void); 130 void (*disable_bts)(void);
129 void (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk); 131 void (*get_event_constraints)(struct cpu_hw_events *cpuc,
130 void (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event); 132 struct perf_event *event,
133 unsigned long *idxmsk);
134 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
135 struct perf_event *event);
131 const struct event_constraint *event_constraints; 136 const struct event_constraint *event_constraints;
132}; 137};
133 138
@@ -2144,14 +2149,11 @@ perf_event_nmi_handler(struct notifier_block *self,
2144 return NOTIFY_STOP; 2149 return NOTIFY_STOP;
2145} 2150}
2146 2151
2147static struct event_constraint bts_constraint = { 2152static struct event_constraint bts_constraint =
2148 .code = 0, 2153 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2149 .cmask = 0,
2150 .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
2151};
2152 2154
2153static int intel_special_constraints(struct perf_event *event, 2155static int intel_special_constraints(struct perf_event *event,
2154 u64 *idxmsk) 2156 unsigned long *idxmsk)
2155{ 2157{
2156 unsigned int hw_event; 2158 unsigned int hw_event;
2157 2159
@@ -2171,14 +2173,14 @@ static int intel_special_constraints(struct perf_event *event,
2171 2173
2172static void intel_get_event_constraints(struct cpu_hw_events *cpuc, 2174static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2173 struct perf_event *event, 2175 struct perf_event *event,
2174 u64 *idxmsk) 2176 unsigned long *idxmsk)
2175{ 2177{
2176 const struct event_constraint *c; 2178 const struct event_constraint *c;
2177 2179
2178 /* 2180 /*
2179 * cleanup bitmask 2181 * cleanup bitmask
2180 */ 2182 */
2181 bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX); 2183 bitmap_zero(idxmsk, X86_PMC_IDX_MAX);
2182 2184
2183 if (intel_special_constraints(event, idxmsk)) 2185 if (intel_special_constraints(event, idxmsk))
2184 return; 2186 return;
@@ -2186,10 +2188,7 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2186 if (x86_pmu.event_constraints) { 2188 if (x86_pmu.event_constraints) {
2187 for_each_event_constraint(c, x86_pmu.event_constraints) { 2189 for_each_event_constraint(c, x86_pmu.event_constraints) {
2188 if ((event->hw.config & c->cmask) == c->code) { 2190 if ((event->hw.config & c->cmask) == c->code) {
2189 2191 bitmap_copy(idxmsk, c->idxmsk, X86_PMC_IDX_MAX);
2190 bitmap_copy((unsigned long *)idxmsk,
2191 (unsigned long *)c->idxmsk,
2192 X86_PMC_IDX_MAX);
2193 return; 2192 return;
2194 } 2193 }
2195 } 2194 }
@@ -2200,10 +2199,10 @@ static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2200 2199
2201static void amd_get_event_constraints(struct cpu_hw_events *cpuc, 2200static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
2202 struct perf_event *event, 2201 struct perf_event *event,
2203 u64 *idxmsk) 2202 unsigned long *idxmsk)
2204{ 2203{
2205 /* no constraints, means supports all generic counters */ 2204 /* no constraints, means supports all generic counters */
2206 bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events); 2205 bitmap_fill(idxmsk, x86_pmu.num_events);
2207} 2206}
2208 2207
2209static int x86_event_sched_in(struct perf_event *event, 2208static int x86_event_sched_in(struct perf_event *event,