diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-11 06:46:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-11 09:45:51 -0500 |
commit | 621a01eac89b5e2f81a4cf576568b31f40a02724 (patch) | |
tree | 79a7f2a8d49017077f40cfac4e78ee255cf2f43e /arch/x86/kernel/cpu/perf_counter.c | |
parent | ccff286d85098ba5438e22aa2ea807fc1e18cf2f (diff) |
perf counters: hw driver API
Impact: restructure code, introduce hw_ops driver abstraction
Introduce this abstraction to handle counter details:
struct hw_perf_counter_ops {
void (*hw_perf_counter_enable) (struct perf_counter *counter);
void (*hw_perf_counter_disable) (struct perf_counter *counter);
void (*hw_perf_counter_read) (struct perf_counter *counter);
};
This will be useful to support assymetric hw details, and it will also
be useful to implement "software counters". (Counters that count kernel
managed sw events such as pagefaults, context-switches, wall-clock time
or task-local time.)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 54b4ad0cce68..718b635dece6 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -56,7 +56,7 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); | |||
56 | /* | 56 | /* |
57 | * Setup the hardware configuration for a given hw_event_type | 57 | * Setup the hardware configuration for a given hw_event_type |
58 | */ | 58 | */ |
59 | int hw_perf_counter_init(struct perf_counter *counter) | 59 | static int __hw_perf_counter_init(struct perf_counter *counter) |
60 | { | 60 | { |
61 | struct perf_counter_hw_event *hw_event = &counter->hw_event; | 61 | struct perf_counter_hw_event *hw_event = &counter->hw_event; |
62 | struct hw_perf_counter *hwc = &counter->hw; | 62 | struct hw_perf_counter *hwc = &counter->hw; |
@@ -135,7 +135,7 @@ u64 hw_perf_disable_all(void) | |||
135 | EXPORT_SYMBOL_GPL(hw_perf_disable_all); | 135 | EXPORT_SYMBOL_GPL(hw_perf_disable_all); |
136 | 136 | ||
137 | static inline void | 137 | static inline void |
138 | __hw_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) | 138 | __x86_perf_counter_disable(struct hw_perf_counter *hwc, unsigned int idx) |
139 | { | 139 | { |
140 | wrmsr(hwc->config_base + idx, hwc->config, 0); | 140 | wrmsr(hwc->config_base + idx, hwc->config, 0); |
141 | } | 141 | } |
@@ -149,13 +149,13 @@ static void __hw_perf_counter_set_period(struct hw_perf_counter *hwc, int idx) | |||
149 | wrmsr(hwc->counter_base + idx, hwc->next_count, 0); | 149 | wrmsr(hwc->counter_base + idx, hwc->next_count, 0); |
150 | } | 150 | } |
151 | 151 | ||
152 | static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx) | 152 | static void __x86_perf_counter_enable(struct hw_perf_counter *hwc, int idx) |
153 | { | 153 | { |
154 | wrmsr(hwc->config_base + idx, | 154 | wrmsr(hwc->config_base + idx, |
155 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); | 155 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE, 0); |
156 | } | 156 | } |
157 | 157 | ||
158 | void hw_perf_counter_enable(struct perf_counter *counter) | 158 | static void x86_perf_counter_enable(struct perf_counter *counter) |
159 | { | 159 | { |
160 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 160 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
161 | struct hw_perf_counter *hwc = &counter->hw; | 161 | struct hw_perf_counter *hwc = &counter->hw; |
@@ -170,12 +170,12 @@ void hw_perf_counter_enable(struct perf_counter *counter) | |||
170 | 170 | ||
171 | perf_counters_lapic_init(hwc->nmi); | 171 | perf_counters_lapic_init(hwc->nmi); |
172 | 172 | ||
173 | __hw_perf_counter_disable(hwc, idx); | 173 | __x86_perf_counter_disable(hwc, idx); |
174 | 174 | ||
175 | cpuc->counters[idx] = counter; | 175 | cpuc->counters[idx] = counter; |
176 | 176 | ||
177 | __hw_perf_counter_set_period(hwc, idx); | 177 | __hw_perf_counter_set_period(hwc, idx); |
178 | __hw_perf_counter_enable(hwc, idx); | 178 | __x86_perf_counter_enable(hwc, idx); |
179 | } | 179 | } |
180 | 180 | ||
181 | #ifdef CONFIG_X86_64 | 181 | #ifdef CONFIG_X86_64 |
@@ -282,20 +282,20 @@ void perf_counter_print_debug(void) | |||
282 | local_irq_enable(); | 282 | local_irq_enable(); |
283 | } | 283 | } |
284 | 284 | ||
285 | void hw_perf_counter_disable(struct perf_counter *counter) | 285 | static void x86_perf_counter_disable(struct perf_counter *counter) |
286 | { | 286 | { |
287 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 287 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
288 | struct hw_perf_counter *hwc = &counter->hw; | 288 | struct hw_perf_counter *hwc = &counter->hw; |
289 | unsigned int idx = hwc->idx; | 289 | unsigned int idx = hwc->idx; |
290 | 290 | ||
291 | __hw_perf_counter_disable(hwc, idx); | 291 | __x86_perf_counter_disable(hwc, idx); |
292 | 292 | ||
293 | clear_bit(idx, cpuc->used); | 293 | clear_bit(idx, cpuc->used); |
294 | cpuc->counters[idx] = NULL; | 294 | cpuc->counters[idx] = NULL; |
295 | __hw_perf_save_counter(counter, hwc, idx); | 295 | __hw_perf_save_counter(counter, hwc, idx); |
296 | } | 296 | } |
297 | 297 | ||
298 | void hw_perf_counter_read(struct perf_counter *counter) | 298 | static void x86_perf_counter_read(struct perf_counter *counter) |
299 | { | 299 | { |
300 | struct hw_perf_counter *hwc = &counter->hw; | 300 | struct hw_perf_counter *hwc = &counter->hw; |
301 | unsigned long addr = hwc->counter_base + hwc->idx; | 301 | unsigned long addr = hwc->counter_base + hwc->idx; |
@@ -342,7 +342,7 @@ static void perf_save_and_restart(struct perf_counter *counter) | |||
342 | __hw_perf_counter_set_period(hwc, idx); | 342 | __hw_perf_counter_set_period(hwc, idx); |
343 | 343 | ||
344 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) | 344 | if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) |
345 | __hw_perf_counter_enable(hwc, idx); | 345 | __x86_perf_counter_enable(hwc, idx); |
346 | } | 346 | } |
347 | 347 | ||
348 | static void | 348 | static void |
@@ -572,3 +572,20 @@ void __init init_hw_perf_counters(void) | |||
572 | 572 | ||
573 | perf_counters_initialized = true; | 573 | perf_counters_initialized = true; |
574 | } | 574 | } |
575 | |||
576 | static struct hw_perf_counter_ops x86_perf_counter_ops = { | ||
577 | .hw_perf_counter_enable = x86_perf_counter_enable, | ||
578 | .hw_perf_counter_disable = x86_perf_counter_disable, | ||
579 | .hw_perf_counter_read = x86_perf_counter_read, | ||
580 | }; | ||
581 | |||
582 | struct hw_perf_counter_ops *hw_perf_counter_init(struct perf_counter *counter) | ||
583 | { | ||
584 | int err; | ||
585 | |||
586 | err = __hw_perf_counter_init(counter); | ||
587 | if (err) | ||
588 | return NULL; | ||
589 | |||
590 | return &x86_perf_counter_ops; | ||
591 | } | ||