aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-17 03:09:13 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-23 06:45:10 -0500
commiteb2b861810d4ff72454c83996b891df4e0aaff9a (patch)
tree9b99f1ec88cd3a248425a6370c700aca2db9b759 /arch
parent5c167b8585c8d91206b395d57011ead7711e322f (diff)
x86, perfcounters: prepare for fixed-mode PMCs
Impact: refactor the x86 code for fixed-mode PMCs Extend the data structures and rename the existing facilities to allow for a 'generic' versus 'fixed' counter distinction. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/perf_counter.h11
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c53
2 files changed, 37 insertions, 27 deletions
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index 9dadce1124ee..dd5a4a559e2d 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -1,6 +1,13 @@
1#ifndef _ASM_X86_PERF_COUNTER_H 1#ifndef _ASM_X86_PERF_COUNTER_H
2#define _ASM_X86_PERF_COUNTER_H 2#define _ASM_X86_PERF_COUNTER_H
3 3
4/*
5 * Performance counter hw details:
6 */
7
8#define X86_PMC_MAX_GENERIC 8
9#define X86_PMC_MAX_FIXED 3
10
4#define MSR_ARCH_PERFMON_PERFCTR0 0xc1 11#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
5#define MSR_ARCH_PERFMON_PERFCTR1 0xc2 12#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
6 13
@@ -20,6 +27,10 @@
20 27
21#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 28#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
22 29
30/*
31 * Intel "Architectural Performance Monitoring" CPUID
32 * detection/enumeration details:
33 */
23union cpuid10_eax { 34union cpuid10_eax {
24 struct { 35 struct {
25 unsigned int version_id:8; 36 unsigned int version_id:8;
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index a4a3a09a654b..fc3af8688232 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -27,13 +27,12 @@ static bool perf_counters_initialized __read_mostly;
27static int nr_hw_counters __read_mostly; 27static int nr_hw_counters __read_mostly;
28static u32 perf_counter_mask __read_mostly; 28static u32 perf_counter_mask __read_mostly;
29 29
30/* No support for fixed function counters yet */
31
32#define MAX_HW_COUNTERS 8
33
34struct cpu_hw_counters { 30struct cpu_hw_counters {
35 struct perf_counter *counters[MAX_HW_COUNTERS]; 31 struct perf_counter *generic[X86_PMC_MAX_GENERIC];
36 unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)]; 32 unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)];
33
34 struct perf_counter *fixed[X86_PMC_MAX_FIXED];
35 unsigned long used_fixed[BITS_TO_LONGS(X86_PMC_MAX_FIXED)];
37}; 36};
38 37
39/* 38/*
@@ -185,7 +184,7 @@ void hw_perf_restore(u64 ctrl)
185EXPORT_SYMBOL_GPL(hw_perf_restore); 184EXPORT_SYMBOL_GPL(hw_perf_restore);
186 185
187static inline void 186static inline void
188__x86_perf_counter_disable(struct perf_counter *counter, 187__pmc_generic_disable(struct perf_counter *counter,
189 struct hw_perf_counter *hwc, unsigned int idx) 188 struct hw_perf_counter *hwc, unsigned int idx)
190{ 189{
191 int err; 190 int err;
@@ -193,7 +192,7 @@ __x86_perf_counter_disable(struct perf_counter *counter,
193 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0); 192 err = wrmsr_safe(hwc->config_base + idx, hwc->config, 0);
194} 193}
195 194
196static DEFINE_PER_CPU(u64, prev_left[MAX_HW_COUNTERS]); 195static DEFINE_PER_CPU(u64, prev_left[X86_PMC_MAX_GENERIC]);
197 196
198/* 197/*
199 * Set the next IRQ period, based on the hwc->period_left value. 198 * Set the next IRQ period, based on the hwc->period_left value.
@@ -231,7 +230,7 @@ __hw_perf_counter_set_period(struct perf_counter *counter,
231} 230}
232 231
233static void 232static void
234__x86_perf_counter_enable(struct perf_counter *counter, 233__pmc_generic_enable(struct perf_counter *counter,
235 struct hw_perf_counter *hwc, int idx) 234 struct hw_perf_counter *hwc, int idx)
236{ 235{
237 wrmsr(hwc->config_base + idx, 236 wrmsr(hwc->config_base + idx,
@@ -241,7 +240,7 @@ __x86_perf_counter_enable(struct perf_counter *counter,
241/* 240/*
242 * Find a PMC slot for the freshly enabled / scheduled in counter: 241 * Find a PMC slot for the freshly enabled / scheduled in counter:
243 */ 242 */
244static void x86_perf_counter_enable(struct perf_counter *counter) 243static void pmc_generic_enable(struct perf_counter *counter)
245{ 244{
246 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 245 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
247 struct hw_perf_counter *hwc = &counter->hw; 246 struct hw_perf_counter *hwc = &counter->hw;
@@ -256,12 +255,12 @@ static void x86_perf_counter_enable(struct perf_counter *counter)
256 255
257 perf_counters_lapic_init(hwc->nmi); 256 perf_counters_lapic_init(hwc->nmi);
258 257
259 __x86_perf_counter_disable(counter, hwc, idx); 258 __pmc_generic_disable(counter, hwc, idx);
260 259
261 cpuc->counters[idx] = counter; 260 cpuc->generic[idx] = counter;
262 261
263 __hw_perf_counter_set_period(counter, hwc, idx); 262 __hw_perf_counter_set_period(counter, hwc, idx);
264 __x86_perf_counter_enable(counter, hwc, idx); 263 __pmc_generic_enable(counter, hwc, idx);
265} 264}
266 265
267void perf_counter_print_debug(void) 266void perf_counter_print_debug(void)
@@ -301,16 +300,16 @@ void perf_counter_print_debug(void)
301 local_irq_enable(); 300 local_irq_enable();
302} 301}
303 302
304static void x86_perf_counter_disable(struct perf_counter *counter) 303static void pmc_generic_disable(struct perf_counter *counter)
305{ 304{
306 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); 305 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
307 struct hw_perf_counter *hwc = &counter->hw; 306 struct hw_perf_counter *hwc = &counter->hw;
308 unsigned int idx = hwc->idx; 307 unsigned int idx = hwc->idx;
309 308
310 __x86_perf_counter_disable(counter, hwc, idx); 309 __pmc_generic_disable(counter, hwc, idx);
311 310
312 clear_bit(idx, cpuc->used); 311 clear_bit(idx, cpuc->used);
313 cpuc->counters[idx] = NULL; 312 cpuc->generic[idx] = NULL;
314 313
315 /* 314 /*
316 * Drain the remaining delta count out of a counter 315 * Drain the remaining delta count out of a counter
@@ -349,7 +348,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
349 __hw_perf_counter_set_period(counter, hwc, idx); 348 __hw_perf_counter_set_period(counter, hwc, idx);
350 349
351 if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE) 350 if (pmc_ctrl & ARCH_PERFMON_EVENTSEL0_ENABLE)
352 __x86_perf_counter_enable(counter, hwc, idx); 351 __pmc_generic_enable(counter, hwc, idx);
353} 352}
354 353
355static void 354static void
@@ -392,7 +391,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
392again: 391again:
393 ack = status; 392 ack = status;
394 for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { 393 for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
395 struct perf_counter *counter = cpuc->counters[bit]; 394 struct perf_counter *counter = cpuc->generic[bit];
396 395
397 clear_bit(bit, (unsigned long *) &status); 396 clear_bit(bit, (unsigned long *) &status);
398 if (!counter) 397 if (!counter)
@@ -412,7 +411,7 @@ again:
412 } 411 }
413 /* 412 /*
414 * From NMI context we cannot call into the scheduler to 413 * From NMI context we cannot call into the scheduler to
415 * do a task wakeup - but we mark these counters as 414 * do a task wakeup - but we mark these generic as
416 * wakeup_pending and initate a wakeup callback: 415 * wakeup_pending and initate a wakeup callback:
417 */ 416 */
418 if (nmi) { 417 if (nmi) {
@@ -462,7 +461,7 @@ void perf_counter_notify(struct pt_regs *regs)
462 cpuc = &per_cpu(cpu_hw_counters, cpu); 461 cpuc = &per_cpu(cpu_hw_counters, cpu);
463 462
464 for_each_bit(bit, cpuc->used, nr_hw_counters) { 463 for_each_bit(bit, cpuc->used, nr_hw_counters) {
465 struct perf_counter *counter = cpuc->counters[bit]; 464 struct perf_counter *counter = cpuc->generic[bit];
466 465
467 if (!counter) 466 if (!counter)
468 continue; 467 continue;
@@ -539,10 +538,10 @@ void __init init_hw_perf_counters(void)
539 printk(KERN_INFO "... version: %d\n", eax.split.version_id); 538 printk(KERN_INFO "... version: %d\n", eax.split.version_id);
540 printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); 539 printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters);
541 nr_hw_counters = eax.split.num_counters; 540 nr_hw_counters = eax.split.num_counters;
542 if (nr_hw_counters > MAX_HW_COUNTERS) { 541 if (nr_hw_counters > X86_PMC_MAX_GENERIC) {
543 nr_hw_counters = MAX_HW_COUNTERS; 542 nr_hw_counters = X86_PMC_MAX_GENERIC;
544 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", 543 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
545 nr_hw_counters, MAX_HW_COUNTERS); 544 nr_hw_counters, X86_PMC_MAX_GENERIC);
546 } 545 }
547 perf_counter_mask = (1 << nr_hw_counters) - 1; 546 perf_counter_mask = (1 << nr_hw_counters) - 1;
548 perf_max_counters = nr_hw_counters; 547 perf_max_counters = nr_hw_counters;
@@ -556,15 +555,15 @@ void __init init_hw_perf_counters(void)
556 register_die_notifier(&perf_counter_nmi_notifier); 555 register_die_notifier(&perf_counter_nmi_notifier);
557} 556}
558 557
559static void x86_perf_counter_read(struct perf_counter *counter) 558static void pmc_generic_read(struct perf_counter *counter)
560{ 559{
561 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); 560 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
562} 561}
563 562
564static const struct hw_perf_counter_ops x86_perf_counter_ops = { 563static const struct hw_perf_counter_ops x86_perf_counter_ops = {
565 .hw_perf_counter_enable = x86_perf_counter_enable, 564 .hw_perf_counter_enable = pmc_generic_enable,
566 .hw_perf_counter_disable = x86_perf_counter_disable, 565 .hw_perf_counter_disable = pmc_generic_disable,
567 .hw_perf_counter_read = x86_perf_counter_read, 566 .hw_perf_counter_read = pmc_generic_read,
568}; 567};
569 568
570const struct hw_perf_counter_ops * 569const struct hw_perf_counter_ops *