aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-07-22 14:41:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-22 14:41:56 -0400
commit3c3301083e3bea4d14c597106c7b20b4b85fc03d (patch)
tree6eabdd073bdc27eb3f0dd999ea946955afca18bf /arch/x86/kernel/cpu
parent612e900c286a9535cc17da5171b0d8dcf8f3a12f (diff)
parent0fdc7e67dd312986e30b861adff48732bd33eb3f (diff)
Merge branch 'perf-counters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-perf
* 'perf-counters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-perf: (31 commits) perf_counter tools: Give perf top inherit option perf_counter tools: Fix vmlinux symbol generation breakage perf_counter: Detect debugfs location perf_counter: Add tracepoint support to perf list, perf stat perf symbol: C++ demangling perf: avoid structure size confusion by using a fixed size perf_counter: Fix throttle/unthrottle event logging perf_counter: Improve perf stat and perf record option parsing perf_counter: PERF_SAMPLE_ID and inherited counters perf_counter: Plug more stack leaks perf: Fix stack data leak perf_counter: Remove unused variables perf_counter: Make call graph option consistent perf_counter: Add perf record option to log addresses perf_counter: Log vfork as a fork event perf_counter: Synthesize VDSO mmap event perf_counter: Make sure we dont leak kernel memory to userspace perf_counter tools: Fix index boundary check perf_counter: Fix the tracepoint channel to perfcounters perf_counter, x86: Extend perf_counter Pentium M support ...
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c253
1 files changed, 233 insertions, 20 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 36c3dc7b8991..a7aa8f900954 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -66,6 +66,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
66}; 66};
67 67
68/* 68/*
69 * Not sure about some of these
70 */
71static const u64 p6_perfmon_event_map[] =
72{
73 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
74 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
75 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
76 [PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
77 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
78 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
79 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
80};
81
82static u64 p6_pmu_event_map(int event)
83{
84 return p6_perfmon_event_map[event];
85}
86
87/*
88 * Counter setting that is specified not to count anything.
89 * We use this to effectively disable a counter.
90 *
91 * L2_RQSTS with 0 MESI unit mask.
92 */
93#define P6_NOP_COUNTER 0x0000002EULL
94
95static u64 p6_pmu_raw_event(u64 event)
96{
97#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
98#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
99#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
100#define P6_EVNTSEL_INV_MASK 0x00800000ULL
101#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
102
103#define P6_EVNTSEL_MASK \
104 (P6_EVNTSEL_EVENT_MASK | \
105 P6_EVNTSEL_UNIT_MASK | \
106 P6_EVNTSEL_EDGE_MASK | \
107 P6_EVNTSEL_INV_MASK | \
108 P6_EVNTSEL_COUNTER_MASK)
109
110 return event & P6_EVNTSEL_MASK;
111}
112
113
114/*
69 * Intel PerfMon v3. Used on Core2 and later. 115 * Intel PerfMon v3. Used on Core2 and later.
70 */ 116 */
71static const u64 intel_perfmon_event_map[] = 117static const u64 intel_perfmon_event_map[] =
@@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
666{ 712{
667 struct perf_counter_attr *attr = &counter->attr; 713 struct perf_counter_attr *attr = &counter->attr;
668 struct hw_perf_counter *hwc = &counter->hw; 714 struct hw_perf_counter *hwc = &counter->hw;
715 u64 config;
669 int err; 716 int err;
670 717
671 if (!x86_pmu_initialized()) 718 if (!x86_pmu_initialized())
@@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
718 765
719 if (attr->config >= x86_pmu.max_events) 766 if (attr->config >= x86_pmu.max_events)
720 return -EINVAL; 767 return -EINVAL;
768
721 /* 769 /*
722 * The generic map: 770 * The generic map:
723 */ 771 */
724 hwc->config |= x86_pmu.event_map(attr->config); 772 config = x86_pmu.event_map(attr->config);
773
774 if (config == 0)
775 return -ENOENT;
776
777 if (config == -1LL)
778 return -EINVAL;
779
780 hwc->config |= config;
725 781
726 return 0; 782 return 0;
727} 783}
728 784
785static void p6_pmu_disable_all(void)
786{
787 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
788 u64 val;
789
790 if (!cpuc->enabled)
791 return;
792
793 cpuc->enabled = 0;
794 barrier();
795
796 /* p6 only has one enable register */
797 rdmsrl(MSR_P6_EVNTSEL0, val);
798 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
799 wrmsrl(MSR_P6_EVNTSEL0, val);
800}
801
729static void intel_pmu_disable_all(void) 802static void intel_pmu_disable_all(void)
730{ 803{
731 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 804 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -767,6 +840,23 @@ void hw_perf_disable(void)
767 return x86_pmu.disable_all(); 840 return x86_pmu.disable_all();
768} 841}
769 842
843static void p6_pmu_enable_all(void)
844{
845 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
846 unsigned long val;
847
848 if (cpuc->enabled)
849 return;
850
851 cpuc->enabled = 1;
852 barrier();
853
854 /* p6 only has one enable register */
855 rdmsrl(MSR_P6_EVNTSEL0, val);
856 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
857 wrmsrl(MSR_P6_EVNTSEL0, val);
858}
859
770static void intel_pmu_enable_all(void) 860static void intel_pmu_enable_all(void)
771{ 861{
772 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); 862 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
@@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void)
784 barrier(); 874 barrier();
785 875
786 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 876 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
877 struct perf_counter *counter = cpuc->counters[idx];
787 u64 val; 878 u64 val;
788 879
789 if (!test_bit(idx, cpuc->active_mask)) 880 if (!test_bit(idx, cpuc->active_mask))
790 continue; 881 continue;
791 rdmsrl(MSR_K7_EVNTSEL0 + idx, val); 882
792 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) 883 val = counter->hw.config;
793 continue;
794 val |= ARCH_PERFMON_EVENTSEL0_ENABLE; 884 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
795 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 885 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
796 } 886 }
@@ -819,16 +909,13 @@ static inline void intel_pmu_ack_status(u64 ack)
819 909
820static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 910static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
821{ 911{
822 int err; 912 (void)checking_wrmsrl(hwc->config_base + idx,
823 err = checking_wrmsrl(hwc->config_base + idx,
824 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 913 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
825} 914}
826 915
827static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) 916static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
828{ 917{
829 int err; 918 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
830 err = checking_wrmsrl(hwc->config_base + idx,
831 hwc->config);
832} 919}
833 920
834static inline void 921static inline void
@@ -836,13 +923,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
836{ 923{
837 int idx = __idx - X86_PMC_IDX_FIXED; 924 int idx = __idx - X86_PMC_IDX_FIXED;
838 u64 ctrl_val, mask; 925 u64 ctrl_val, mask;
839 int err;
840 926
841 mask = 0xfULL << (idx * 4); 927 mask = 0xfULL << (idx * 4);
842 928
843 rdmsrl(hwc->config_base, ctrl_val); 929 rdmsrl(hwc->config_base, ctrl_val);
844 ctrl_val &= ~mask; 930 ctrl_val &= ~mask;
845 err = checking_wrmsrl(hwc->config_base, ctrl_val); 931 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
932}
933
934static inline void
935p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
936{
937 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
938 u64 val = P6_NOP_COUNTER;
939
940 if (cpuc->enabled)
941 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
942
943 (void)checking_wrmsrl(hwc->config_base + idx, val);
846} 944}
847 945
848static inline void 946static inline void
@@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
943 err = checking_wrmsrl(hwc->config_base, ctrl_val); 1041 err = checking_wrmsrl(hwc->config_base, ctrl_val);
944} 1042}
945 1043
1044static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1045{
1046 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1047 u64 val;
1048
1049 val = hwc->config;
1050 if (cpuc->enabled)
1051 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1052
1053 (void)checking_wrmsrl(hwc->config_base + idx, val);
1054}
1055
1056
946static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) 1057static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
947{ 1058{
948 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { 1059 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
@@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
959 1070
960 if (cpuc->enabled) 1071 if (cpuc->enabled)
961 x86_pmu_enable_counter(hwc, idx); 1072 x86_pmu_enable_counter(hwc, idx);
962 else
963 x86_pmu_disable_counter(hwc, idx);
964} 1073}
965 1074
966static int 1075static int
@@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void)
1176 local_irq_restore(flags); 1285 local_irq_restore(flags);
1177} 1286}
1178 1287
1288static int p6_pmu_handle_irq(struct pt_regs *regs)
1289{
1290 struct perf_sample_data data;
1291 struct cpu_hw_counters *cpuc;
1292 struct perf_counter *counter;
1293 struct hw_perf_counter *hwc;
1294 int idx, handled = 0;
1295 u64 val;
1296
1297 data.regs = regs;
1298 data.addr = 0;
1299
1300 cpuc = &__get_cpu_var(cpu_hw_counters);
1301
1302 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1303 if (!test_bit(idx, cpuc->active_mask))
1304 continue;
1305
1306 counter = cpuc->counters[idx];
1307 hwc = &counter->hw;
1308
1309 val = x86_perf_counter_update(counter, hwc, idx);
1310 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
1311 continue;
1312
1313 /*
1314 * counter overflow
1315 */
1316 handled = 1;
1317 data.period = counter->hw.last_period;
1318
1319 if (!x86_perf_counter_set_period(counter, hwc, idx))
1320 continue;
1321
1322 if (perf_counter_overflow(counter, 1, &data))
1323 p6_pmu_disable_counter(hwc, idx);
1324 }
1325
1326 if (handled)
1327 inc_irq_stat(apic_perf_irqs);
1328
1329 return handled;
1330}
1179 1331
1180/* 1332/*
1181 * This handler is triggered by the local APIC, so the APIC IRQ handling 1333 * This handler is triggered by the local APIC, so the APIC IRQ handling
@@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
1185{ 1337{
1186 struct perf_sample_data data; 1338 struct perf_sample_data data;
1187 struct cpu_hw_counters *cpuc; 1339 struct cpu_hw_counters *cpuc;
1188 int bit, cpu, loops; 1340 int bit, loops;
1189 u64 ack, status; 1341 u64 ack, status;
1190 1342
1191 data.regs = regs; 1343 data.regs = regs;
1192 data.addr = 0; 1344 data.addr = 0;
1193 1345
1194 cpu = smp_processor_id(); 1346 cpuc = &__get_cpu_var(cpu_hw_counters);
1195 cpuc = &per_cpu(cpu_hw_counters, cpu);
1196 1347
1197 perf_disable(); 1348 perf_disable();
1198 status = intel_pmu_get_status(); 1349 status = intel_pmu_get_status();
@@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1249 struct cpu_hw_counters *cpuc; 1400 struct cpu_hw_counters *cpuc;
1250 struct perf_counter *counter; 1401 struct perf_counter *counter;
1251 struct hw_perf_counter *hwc; 1402 struct hw_perf_counter *hwc;
1252 int cpu, idx, handled = 0; 1403 int idx, handled = 0;
1253 u64 val; 1404 u64 val;
1254 1405
1255 data.regs = regs; 1406 data.regs = regs;
1256 data.addr = 0; 1407 data.addr = 0;
1257 1408
1258 cpu = smp_processor_id(); 1409 cpuc = &__get_cpu_var(cpu_hw_counters);
1259 cpuc = &per_cpu(cpu_hw_counters, cpu);
1260 1410
1261 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1411 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1262 if (!test_bit(idx, cpuc->active_mask)) 1412 if (!test_bit(idx, cpuc->active_mask))
@@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
1353 .priority = 1 1503 .priority = 1
1354}; 1504};
1355 1505
1506static struct x86_pmu p6_pmu = {
1507 .name = "p6",
1508 .handle_irq = p6_pmu_handle_irq,
1509 .disable_all = p6_pmu_disable_all,
1510 .enable_all = p6_pmu_enable_all,
1511 .enable = p6_pmu_enable_counter,
1512 .disable = p6_pmu_disable_counter,
1513 .eventsel = MSR_P6_EVNTSEL0,
1514 .perfctr = MSR_P6_PERFCTR0,
1515 .event_map = p6_pmu_event_map,
1516 .raw_event = p6_pmu_raw_event,
1517 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
1518 .max_period = (1ULL << 31) - 1,
1519 .version = 0,
1520 .num_counters = 2,
1521 /*
1522 * Counters have 40 bits implemented. However they are designed such
1523 * that bits [32-39] are sign extensions of bit 31. As such the
1524 * effective width of a counter for P6-like PMU is 32 bits only.
1525 *
1526 * See IA-32 Intel Architecture Software developer manual Vol 3B
1527 */
1528 .counter_bits = 32,
1529 .counter_mask = (1ULL << 32) - 1,
1530};
1531
1356static struct x86_pmu intel_pmu = { 1532static struct x86_pmu intel_pmu = {
1357 .name = "Intel", 1533 .name = "Intel",
1358 .handle_irq = intel_pmu_handle_irq, 1534 .handle_irq = intel_pmu_handle_irq,
@@ -1392,6 +1568,37 @@ static struct x86_pmu amd_pmu = {
1392 .max_period = (1ULL << 47) - 1, 1568 .max_period = (1ULL << 47) - 1,
1393}; 1569};
1394 1570
1571static int p6_pmu_init(void)
1572{
1573 switch (boot_cpu_data.x86_model) {
1574 case 1:
1575 case 3: /* Pentium Pro */
1576 case 5:
1577 case 6: /* Pentium II */
1578 case 7:
1579 case 8:
1580 case 11: /* Pentium III */
1581 break;
1582 case 9:
1583 case 13:
1584 /* Pentium M */
1585 break;
1586 default:
1587 pr_cont("unsupported p6 CPU model %d ",
1588 boot_cpu_data.x86_model);
1589 return -ENODEV;
1590 }
1591
1592 if (!cpu_has_apic) {
1593 pr_info("no Local APIC, try rebooting with lapic");
1594 return -ENODEV;
1595 }
1596
1597 x86_pmu = p6_pmu;
1598
1599 return 0;
1600}
1601
1395static int intel_pmu_init(void) 1602static int intel_pmu_init(void)
1396{ 1603{
1397 union cpuid10_edx edx; 1604 union cpuid10_edx edx;
@@ -1400,8 +1607,14 @@ static int intel_pmu_init(void)
1400 unsigned int ebx; 1607 unsigned int ebx;
1401 int version; 1608 int version;
1402 1609
1403 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 1610 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1611 /* check for P6 processor family */
1612 if (boot_cpu_data.x86 == 6) {
1613 return p6_pmu_init();
1614 } else {
1404 return -ENODEV; 1615 return -ENODEV;
1616 }
1617 }
1405 1618
1406 /* 1619 /*
1407 * Check whether the Architectural PerfMon supports 1620 * Check whether the Architectural PerfMon supports