aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-17 04:51:15 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-23 06:45:11 -0500
commit703e937c83bbad79075a7846e062e447c2fee6a4 (patch)
tree7c3e23179f14c32368213024be20dcf27bb6e7a8 /arch
parenteb2b861810d4ff72454c83996b891df4e0aaff9a (diff)
perfcounters: add fixed-mode PMC enumeration
Enumerate fixed-mode PMCs based on CPUID, and feed that into the perfcounter code. Does not use fixed-mode PMCs yet. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/perf_counter.h23
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c23
2 files changed, 40 insertions, 6 deletions
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
index dd5a4a559e2d..945a315e6d62 100644
--- a/arch/x86/include/asm/perf_counter.h
+++ b/arch/x86/include/asm/perf_counter.h
@@ -41,6 +41,29 @@ union cpuid10_eax {
41 unsigned int full; 41 unsigned int full;
42}; 42};
43 43
44union cpuid10_edx {
45 struct {
46 unsigned int num_counters_fixed:4;
47 unsigned int reserved:28;
48 } split;
49 unsigned int full;
50};
51
52
53/*
54 * Fixed-purpose performance counters:
55 */
56
57/* Instr_Retired.Any: */
58#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
59
60/* CPU_CLK_Unhalted.Core: */
61#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
62
63/* CPU_CLK_Unhalted.Ref: */
64#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
65
66
44#ifdef CONFIG_PERF_COUNTERS 67#ifdef CONFIG_PERF_COUNTERS
45extern void init_hw_perf_counters(void); 68extern void init_hw_perf_counters(void);
46extern void perf_counters_lapic_init(int nmi); 69extern void perf_counters_lapic_init(int nmi);
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index fc3af8688232..2fca50c45979 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -27,6 +27,8 @@ static bool perf_counters_initialized __read_mostly;
27static int nr_hw_counters __read_mostly; 27static int nr_hw_counters __read_mostly;
28static u32 perf_counter_mask __read_mostly; 28static u32 perf_counter_mask __read_mostly;
29 29
30static int nr_hw_counters_fixed __read_mostly;
31
30struct cpu_hw_counters { 32struct cpu_hw_counters {
31 struct perf_counter *generic[X86_PMC_MAX_GENERIC]; 33 struct perf_counter *generic[X86_PMC_MAX_GENERIC];
32 unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)]; 34 unsigned long used[BITS_TO_LONGS(X86_PMC_MAX_GENERIC)];
@@ -519,8 +521,9 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
519void __init init_hw_perf_counters(void) 521void __init init_hw_perf_counters(void)
520{ 522{
521 union cpuid10_eax eax; 523 union cpuid10_eax eax;
522 unsigned int unused;
523 unsigned int ebx; 524 unsigned int ebx;
525 unsigned int unused;
526 union cpuid10_edx edx;
524 527
525 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 528 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
526 return; 529 return;
@@ -529,14 +532,14 @@ void __init init_hw_perf_counters(void)
529 * Check whether the Architectural PerfMon supports 532 * Check whether the Architectural PerfMon supports
530 * Branch Misses Retired Event or not. 533 * Branch Misses Retired Event or not.
531 */ 534 */
532 cpuid(10, &(eax.full), &ebx, &unused, &unused); 535 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
533 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) 536 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
534 return; 537 return;
535 538
536 printk(KERN_INFO "Intel Performance Monitoring support detected.\n"); 539 printk(KERN_INFO "Intel Performance Monitoring support detected.\n");
537 540
538 printk(KERN_INFO "... version: %d\n", eax.split.version_id); 541 printk(KERN_INFO "... version: %d\n", eax.split.version_id);
539 printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters); 542 printk(KERN_INFO "... num counters: %d\n", eax.split.num_counters);
540 nr_hw_counters = eax.split.num_counters; 543 nr_hw_counters = eax.split.num_counters;
541 if (nr_hw_counters > X86_PMC_MAX_GENERIC) { 544 if (nr_hw_counters > X86_PMC_MAX_GENERIC) {
542 nr_hw_counters = X86_PMC_MAX_GENERIC; 545 nr_hw_counters = X86_PMC_MAX_GENERIC;
@@ -546,8 +549,16 @@ void __init init_hw_perf_counters(void)
546 perf_counter_mask = (1 << nr_hw_counters) - 1; 549 perf_counter_mask = (1 << nr_hw_counters) - 1;
547 perf_max_counters = nr_hw_counters; 550 perf_max_counters = nr_hw_counters;
548 551
549 printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width); 552 printk(KERN_INFO "... bit width: %d\n", eax.split.bit_width);
550 printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length); 553 printk(KERN_INFO "... mask length: %d\n", eax.split.mask_length);
554
555 nr_hw_counters_fixed = edx.split.num_counters_fixed;
556 if (nr_hw_counters_fixed > X86_PMC_MAX_FIXED) {
557 nr_hw_counters_fixed = X86_PMC_MAX_FIXED;
558 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
559 nr_hw_counters_fixed, X86_PMC_MAX_FIXED);
560 }
561 printk(KERN_INFO "... fixed counters: %d\n", nr_hw_counters_fixed);
551 562
552 perf_counters_initialized = true; 563 perf_counters_initialized = true;
553 564