diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-07-22 12:05:48 -0400 |
---|---|---|
committer | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-07-22 12:05:48 -0400 |
commit | 1d2f37945d1b3a14086c5ea802486778b635cf97 (patch) | |
tree | b40a1a596a29acc1511f661c27f284dd06b0bc9d /arch | |
parent | 1483b19f8f5e8ad0c8816de368b099322dad4db5 (diff) | |
parent | f1c6a58121f9846ac665b0fbd3cbab90ce8bcbac (diff) |
Merge commit 'tip/perfcounters/core' into perf-counters-for-linus
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 255 |
1 files changed, 235 insertions, 20 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 36c3dc7b8991..7e346d4bc0fb 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -66,6 +66,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Not sure about some of these | ||
70 | */ | ||
71 | static const u64 p6_perfmon_event_map[] = | ||
72 | { | ||
73 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, | ||
74 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
75 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000, | ||
76 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0000, | ||
77 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
78 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
79 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, | ||
80 | }; | ||
81 | |||
82 | static u64 p6_pmu_event_map(int event) | ||
83 | { | ||
84 | return p6_perfmon_event_map[event]; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * Counter setting that is specified not to count anything. | ||
89 | * We use this to effectively disable a counter. | ||
90 | * | ||
91 | * L2_RQSTS with 0 MESI unit mask. | ||
92 | */ | ||
93 | #define P6_NOP_COUNTER 0x0000002EULL | ||
94 | |||
95 | static u64 p6_pmu_raw_event(u64 event) | ||
96 | { | ||
97 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
98 | #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
99 | #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
100 | #define P6_EVNTSEL_INV_MASK 0x00800000ULL | ||
101 | #define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL | ||
102 | |||
103 | #define P6_EVNTSEL_MASK \ | ||
104 | (P6_EVNTSEL_EVENT_MASK | \ | ||
105 | P6_EVNTSEL_UNIT_MASK | \ | ||
106 | P6_EVNTSEL_EDGE_MASK | \ | ||
107 | P6_EVNTSEL_INV_MASK | \ | ||
108 | P6_EVNTSEL_COUNTER_MASK) | ||
109 | |||
110 | return event & P6_EVNTSEL_MASK; | ||
111 | } | ||
112 | |||
113 | |||
114 | /* | ||
69 | * Intel PerfMon v3. Used on Core2 and later. | 115 | * Intel PerfMon v3. Used on Core2 and later. |
70 | */ | 116 | */ |
71 | static const u64 intel_perfmon_event_map[] = | 117 | static const u64 intel_perfmon_event_map[] = |
@@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
666 | { | 712 | { |
667 | struct perf_counter_attr *attr = &counter->attr; | 713 | struct perf_counter_attr *attr = &counter->attr; |
668 | struct hw_perf_counter *hwc = &counter->hw; | 714 | struct hw_perf_counter *hwc = &counter->hw; |
715 | u64 config; | ||
669 | int err; | 716 | int err; |
670 | 717 | ||
671 | if (!x86_pmu_initialized()) | 718 | if (!x86_pmu_initialized()) |
@@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
718 | 765 | ||
719 | if (attr->config >= x86_pmu.max_events) | 766 | if (attr->config >= x86_pmu.max_events) |
720 | return -EINVAL; | 767 | return -EINVAL; |
768 | |||
721 | /* | 769 | /* |
722 | * The generic map: | 770 | * The generic map: |
723 | */ | 771 | */ |
724 | hwc->config |= x86_pmu.event_map(attr->config); | 772 | config = x86_pmu.event_map(attr->config); |
773 | |||
774 | if (config == 0) | ||
775 | return -ENOENT; | ||
776 | |||
777 | if (config == -1LL) | ||
778 | return -EINVAL; | ||
779 | |||
780 | hwc->config |= config; | ||
725 | 781 | ||
726 | return 0; | 782 | return 0; |
727 | } | 783 | } |
728 | 784 | ||
785 | static void p6_pmu_disable_all(void) | ||
786 | { | ||
787 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
788 | u64 val; | ||
789 | |||
790 | if (!cpuc->enabled) | ||
791 | return; | ||
792 | |||
793 | cpuc->enabled = 0; | ||
794 | barrier(); | ||
795 | |||
796 | /* p6 only has one enable register */ | ||
797 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
798 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
799 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
800 | } | ||
801 | |||
729 | static void intel_pmu_disable_all(void) | 802 | static void intel_pmu_disable_all(void) |
730 | { | 803 | { |
731 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 804 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
@@ -767,6 +840,23 @@ void hw_perf_disable(void) | |||
767 | return x86_pmu.disable_all(); | 840 | return x86_pmu.disable_all(); |
768 | } | 841 | } |
769 | 842 | ||
843 | static void p6_pmu_enable_all(void) | ||
844 | { | ||
845 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
846 | unsigned long val; | ||
847 | |||
848 | if (cpuc->enabled) | ||
849 | return; | ||
850 | |||
851 | cpuc->enabled = 1; | ||
852 | barrier(); | ||
853 | |||
854 | /* p6 only has one enable register */ | ||
855 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
856 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
857 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
858 | } | ||
859 | |||
770 | static void intel_pmu_enable_all(void) | 860 | static void intel_pmu_enable_all(void) |
771 | { | 861 | { |
772 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 862 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
@@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void) | |||
784 | barrier(); | 874 | barrier(); |
785 | 875 | ||
786 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 876 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
877 | struct perf_counter *counter = cpuc->counters[idx]; | ||
787 | u64 val; | 878 | u64 val; |
788 | 879 | ||
789 | if (!test_bit(idx, cpuc->active_mask)) | 880 | if (!test_bit(idx, cpuc->active_mask)) |
790 | continue; | 881 | continue; |
791 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); | 882 | |
792 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | 883 | val = counter->hw.config; |
793 | continue; | ||
794 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 884 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
795 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | 885 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
796 | } | 886 | } |
@@ -819,16 +909,13 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
819 | 909 | ||
820 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 910 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
821 | { | 911 | { |
822 | int err; | 912 | (void)checking_wrmsrl(hwc->config_base + idx, |
823 | err = checking_wrmsrl(hwc->config_base + idx, | ||
824 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 913 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); |
825 | } | 914 | } |
826 | 915 | ||
827 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 916 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
828 | { | 917 | { |
829 | int err; | 918 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); |
830 | err = checking_wrmsrl(hwc->config_base + idx, | ||
831 | hwc->config); | ||
832 | } | 919 | } |
833 | 920 | ||
834 | static inline void | 921 | static inline void |
@@ -836,13 +923,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
836 | { | 923 | { |
837 | int idx = __idx - X86_PMC_IDX_FIXED; | 924 | int idx = __idx - X86_PMC_IDX_FIXED; |
838 | u64 ctrl_val, mask; | 925 | u64 ctrl_val, mask; |
839 | int err; | ||
840 | 926 | ||
841 | mask = 0xfULL << (idx * 4); | 927 | mask = 0xfULL << (idx * 4); |
842 | 928 | ||
843 | rdmsrl(hwc->config_base, ctrl_val); | 929 | rdmsrl(hwc->config_base, ctrl_val); |
844 | ctrl_val &= ~mask; | 930 | ctrl_val &= ~mask; |
845 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 931 | (void)checking_wrmsrl(hwc->config_base, ctrl_val); |
932 | } | ||
933 | |||
934 | static inline void | ||
935 | p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | ||
936 | { | ||
937 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
938 | u64 val = P6_NOP_COUNTER; | ||
939 | |||
940 | if (cpuc->enabled) | ||
941 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
942 | |||
943 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
846 | } | 944 | } |
847 | 945 | ||
848 | static inline void | 946 | static inline void |
@@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
943 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 1041 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
944 | } | 1042 | } |
945 | 1043 | ||
1044 | static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | ||
1045 | { | ||
1046 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1047 | u64 val; | ||
1048 | |||
1049 | val = hwc->config; | ||
1050 | if (cpuc->enabled) | ||
1051 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
1052 | |||
1053 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
1054 | } | ||
1055 | |||
1056 | |||
946 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1057 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
947 | { | 1058 | { |
948 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1059 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
@@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
959 | 1070 | ||
960 | if (cpuc->enabled) | 1071 | if (cpuc->enabled) |
961 | x86_pmu_enable_counter(hwc, idx); | 1072 | x86_pmu_enable_counter(hwc, idx); |
962 | else | ||
963 | x86_pmu_disable_counter(hwc, idx); | ||
964 | } | 1073 | } |
965 | 1074 | ||
966 | static int | 1075 | static int |
@@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void) | |||
1176 | local_irq_restore(flags); | 1285 | local_irq_restore(flags); |
1177 | } | 1286 | } |
1178 | 1287 | ||
1288 | static int p6_pmu_handle_irq(struct pt_regs *regs) | ||
1289 | { | ||
1290 | struct perf_sample_data data; | ||
1291 | struct cpu_hw_counters *cpuc; | ||
1292 | struct perf_counter *counter; | ||
1293 | struct hw_perf_counter *hwc; | ||
1294 | int idx, handled = 0; | ||
1295 | u64 val; | ||
1296 | |||
1297 | data.regs = regs; | ||
1298 | data.addr = 0; | ||
1299 | |||
1300 | cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1301 | |||
1302 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1303 | if (!test_bit(idx, cpuc->active_mask)) | ||
1304 | continue; | ||
1305 | |||
1306 | counter = cpuc->counters[idx]; | ||
1307 | hwc = &counter->hw; | ||
1308 | |||
1309 | val = x86_perf_counter_update(counter, hwc, idx); | ||
1310 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | ||
1311 | continue; | ||
1312 | |||
1313 | /* | ||
1314 | * counter overflow | ||
1315 | */ | ||
1316 | handled = 1; | ||
1317 | data.period = counter->hw.last_period; | ||
1318 | |||
1319 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | ||
1320 | continue; | ||
1321 | |||
1322 | if (perf_counter_overflow(counter, 1, &data)) | ||
1323 | p6_pmu_disable_counter(hwc, idx); | ||
1324 | } | ||
1325 | |||
1326 | if (handled) | ||
1327 | inc_irq_stat(apic_perf_irqs); | ||
1328 | |||
1329 | return handled; | ||
1330 | } | ||
1179 | 1331 | ||
1180 | /* | 1332 | /* |
1181 | * This handler is triggered by the local APIC, so the APIC IRQ handling | 1333 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
@@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1185 | { | 1337 | { |
1186 | struct perf_sample_data data; | 1338 | struct perf_sample_data data; |
1187 | struct cpu_hw_counters *cpuc; | 1339 | struct cpu_hw_counters *cpuc; |
1188 | int bit, cpu, loops; | 1340 | int bit, loops; |
1189 | u64 ack, status; | 1341 | u64 ack, status; |
1190 | 1342 | ||
1191 | data.regs = regs; | 1343 | data.regs = regs; |
1192 | data.addr = 0; | 1344 | data.addr = 0; |
1193 | 1345 | ||
1194 | cpu = smp_processor_id(); | 1346 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1195 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1196 | 1347 | ||
1197 | perf_disable(); | 1348 | perf_disable(); |
1198 | status = intel_pmu_get_status(); | 1349 | status = intel_pmu_get_status(); |
@@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
1249 | struct cpu_hw_counters *cpuc; | 1400 | struct cpu_hw_counters *cpuc; |
1250 | struct perf_counter *counter; | 1401 | struct perf_counter *counter; |
1251 | struct hw_perf_counter *hwc; | 1402 | struct hw_perf_counter *hwc; |
1252 | int cpu, idx, handled = 0; | 1403 | int idx, handled = 0; |
1253 | u64 val; | 1404 | u64 val; |
1254 | 1405 | ||
1255 | data.regs = regs; | 1406 | data.regs = regs; |
1256 | data.addr = 0; | 1407 | data.addr = 0; |
1257 | 1408 | ||
1258 | cpu = smp_processor_id(); | 1409 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1259 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1260 | 1410 | ||
1261 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1411 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1262 | if (!test_bit(idx, cpuc->active_mask)) | 1412 | if (!test_bit(idx, cpuc->active_mask)) |
@@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |||
1353 | .priority = 1 | 1503 | .priority = 1 |
1354 | }; | 1504 | }; |
1355 | 1505 | ||
1506 | static struct x86_pmu p6_pmu = { | ||
1507 | .name = "p6", | ||
1508 | .handle_irq = p6_pmu_handle_irq, | ||
1509 | .disable_all = p6_pmu_disable_all, | ||
1510 | .enable_all = p6_pmu_enable_all, | ||
1511 | .enable = p6_pmu_enable_counter, | ||
1512 | .disable = p6_pmu_disable_counter, | ||
1513 | .eventsel = MSR_P6_EVNTSEL0, | ||
1514 | .perfctr = MSR_P6_PERFCTR0, | ||
1515 | .event_map = p6_pmu_event_map, | ||
1516 | .raw_event = p6_pmu_raw_event, | ||
1517 | .max_events = ARRAY_SIZE(p6_perfmon_event_map), | ||
1518 | .max_period = (1ULL << 31) - 1, | ||
1519 | .version = 0, | ||
1520 | .num_counters = 2, | ||
1521 | /* | ||
1522 | * Counters have 40 bits implemented. However they are designed such | ||
1523 | * that bits [32-39] are sign extensions of bit 31. As such the | ||
1524 | * effective width of a counter for P6-like PMU is 32 bits only. | ||
1525 | * | ||
1526 | * See IA-32 Intel Architecture Software developer manual Vol 3B | ||
1527 | */ | ||
1528 | .counter_bits = 32, | ||
1529 | .counter_mask = (1ULL << 32) - 1, | ||
1530 | }; | ||
1531 | |||
1356 | static struct x86_pmu intel_pmu = { | 1532 | static struct x86_pmu intel_pmu = { |
1357 | .name = "Intel", | 1533 | .name = "Intel", |
1358 | .handle_irq = intel_pmu_handle_irq, | 1534 | .handle_irq = intel_pmu_handle_irq, |
@@ -1392,6 +1568,39 @@ static struct x86_pmu amd_pmu = { | |||
1392 | .max_period = (1ULL << 47) - 1, | 1568 | .max_period = (1ULL << 47) - 1, |
1393 | }; | 1569 | }; |
1394 | 1570 | ||
1571 | static int p6_pmu_init(void) | ||
1572 | { | ||
1573 | int high, low; | ||
1574 | |||
1575 | switch (boot_cpu_data.x86_model) { | ||
1576 | case 1: | ||
1577 | case 3: /* Pentium Pro */ | ||
1578 | case 5: | ||
1579 | case 6: /* Pentium II */ | ||
1580 | case 7: | ||
1581 | case 8: | ||
1582 | case 11: /* Pentium III */ | ||
1583 | break; | ||
1584 | case 9: | ||
1585 | case 13: | ||
1586 | /* Pentium M */ | ||
1587 | break; | ||
1588 | default: | ||
1589 | pr_cont("unsupported p6 CPU model %d ", | ||
1590 | boot_cpu_data.x86_model); | ||
1591 | return -ENODEV; | ||
1592 | } | ||
1593 | |||
1594 | if (!cpu_has_apic) { | ||
1595 | pr_info("no Local APIC, try rebooting with lapic"); | ||
1596 | return -ENODEV; | ||
1597 | } | ||
1598 | |||
1599 | x86_pmu = p6_pmu; | ||
1600 | |||
1601 | return 0; | ||
1602 | } | ||
1603 | |||
1395 | static int intel_pmu_init(void) | 1604 | static int intel_pmu_init(void) |
1396 | { | 1605 | { |
1397 | union cpuid10_edx edx; | 1606 | union cpuid10_edx edx; |
@@ -1400,8 +1609,14 @@ static int intel_pmu_init(void) | |||
1400 | unsigned int ebx; | 1609 | unsigned int ebx; |
1401 | int version; | 1610 | int version; |
1402 | 1611 | ||
1403 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 1612 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
1613 | /* check for P6 processor family */ | ||
1614 | if (boot_cpu_data.x86 == 6) { | ||
1615 | return p6_pmu_init(); | ||
1616 | } else { | ||
1404 | return -ENODEV; | 1617 | return -ENODEV; |
1618 | } | ||
1619 | } | ||
1405 | 1620 | ||
1406 | /* | 1621 | /* |
1407 | * Check whether the Architectural PerfMon supports | 1622 | * Check whether the Architectural PerfMon supports |