diff options
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 227 | ||||
-rw-r--r-- | tools/perf/perf.h | 8 |
2 files changed, 220 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 36c3dc7b8991..1910f39ff19a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -66,6 +66,44 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { | |||
66 | }; | 66 | }; |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * Not sure about some of these | ||
70 | */ | ||
71 | static const u64 p6_perfmon_event_map[] = | ||
72 | { | ||
73 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, | ||
74 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
75 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000, | ||
76 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0000, | ||
77 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
78 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
79 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, | ||
80 | }; | ||
81 | |||
82 | static u64 p6_pmu_event_map(int event) | ||
83 | { | ||
84 | return p6_perfmon_event_map[event]; | ||
85 | } | ||
86 | |||
87 | static u64 p6_pmu_raw_event(u64 event) | ||
88 | { | ||
89 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
90 | #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
91 | #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
92 | #define P6_EVNTSEL_INV_MASK 0x00800000ULL | ||
93 | #define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL | ||
94 | |||
95 | #define P6_EVNTSEL_MASK \ | ||
96 | (P6_EVNTSEL_EVENT_MASK | \ | ||
97 | P6_EVNTSEL_UNIT_MASK | \ | ||
98 | P6_EVNTSEL_EDGE_MASK | \ | ||
99 | P6_EVNTSEL_INV_MASK | \ | ||
100 | P6_EVNTSEL_COUNTER_MASK) | ||
101 | |||
102 | return event & P6_EVNTSEL_MASK; | ||
103 | } | ||
104 | |||
105 | |||
106 | /* | ||
69 | * Intel PerfMon v3. Used on Core2 and later. | 107 | * Intel PerfMon v3. Used on Core2 and later. |
70 | */ | 108 | */ |
71 | static const u64 intel_perfmon_event_map[] = | 109 | static const u64 intel_perfmon_event_map[] = |
@@ -726,6 +764,23 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
726 | return 0; | 764 | return 0; |
727 | } | 765 | } |
728 | 766 | ||
767 | static void p6_pmu_disable_all(void) | ||
768 | { | ||
769 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
770 | unsigned long val; | ||
771 | |||
772 | if (!cpuc->enabled) | ||
773 | return; | ||
774 | |||
775 | cpuc->enabled = 0; | ||
776 | barrier(); | ||
777 | |||
778 | /* p6 only has one enable register */ | ||
779 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
780 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
781 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
782 | } | ||
783 | |||
729 | static void intel_pmu_disable_all(void) | 784 | static void intel_pmu_disable_all(void) |
730 | { | 785 | { |
731 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 786 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
@@ -767,6 +822,23 @@ void hw_perf_disable(void) | |||
767 | return x86_pmu.disable_all(); | 822 | return x86_pmu.disable_all(); |
768 | } | 823 | } |
769 | 824 | ||
825 | static void p6_pmu_enable_all(void) | ||
826 | { | ||
827 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
828 | unsigned long val; | ||
829 | |||
830 | if (cpuc->enabled) | ||
831 | return; | ||
832 | |||
833 | cpuc->enabled = 1; | ||
834 | barrier(); | ||
835 | |||
836 | /* p6 only has one enable register */ | ||
837 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
838 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
839 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
840 | } | ||
841 | |||
770 | static void intel_pmu_enable_all(void) | 842 | static void intel_pmu_enable_all(void) |
771 | { | 843 | { |
772 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 844 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
@@ -819,16 +891,13 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
819 | 891 | ||
820 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 892 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
821 | { | 893 | { |
822 | int err; | 894 | (void)checking_wrmsrl(hwc->config_base + idx, |
823 | err = checking_wrmsrl(hwc->config_base + idx, | ||
824 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 895 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); |
825 | } | 896 | } |
826 | 897 | ||
827 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 898 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
828 | { | 899 | { |
829 | int err; | 900 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); |
830 | err = checking_wrmsrl(hwc->config_base + idx, | ||
831 | hwc->config); | ||
832 | } | 901 | } |
833 | 902 | ||
834 | static inline void | 903 | static inline void |
@@ -836,13 +905,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
836 | { | 905 | { |
837 | int idx = __idx - X86_PMC_IDX_FIXED; | 906 | int idx = __idx - X86_PMC_IDX_FIXED; |
838 | u64 ctrl_val, mask; | 907 | u64 ctrl_val, mask; |
839 | int err; | ||
840 | 908 | ||
841 | mask = 0xfULL << (idx * 4); | 909 | mask = 0xfULL << (idx * 4); |
842 | 910 | ||
843 | rdmsrl(hwc->config_base, ctrl_val); | 911 | rdmsrl(hwc->config_base, ctrl_val); |
844 | ctrl_val &= ~mask; | 912 | ctrl_val &= ~mask; |
845 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 913 | (void)checking_wrmsrl(hwc->config_base, ctrl_val); |
914 | } | ||
915 | |||
916 | static inline void | ||
917 | p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | ||
918 | { | ||
919 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
920 | unsigned long val = ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
921 | |||
922 | if (!cpuc->enabled) | ||
923 | val = 0; | ||
924 | |||
925 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
846 | } | 926 | } |
847 | 927 | ||
848 | static inline void | 928 | static inline void |
@@ -943,6 +1023,17 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
943 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 1023 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
944 | } | 1024 | } |
945 | 1025 | ||
1026 | static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | ||
1027 | { | ||
1028 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1029 | |||
1030 | if (cpuc->enabled) | ||
1031 | x86_pmu_enable_counter(hwc, idx); | ||
1032 | else | ||
1033 | x86_pmu_disable_counter(hwc, idx); | ||
1034 | } | ||
1035 | |||
1036 | |||
946 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1037 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
947 | { | 1038 | { |
948 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1039 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
@@ -1176,6 +1267,49 @@ static void intel_pmu_reset(void) | |||
1176 | local_irq_restore(flags); | 1267 | local_irq_restore(flags); |
1177 | } | 1268 | } |
1178 | 1269 | ||
1270 | static int p6_pmu_handle_irq(struct pt_regs *regs) | ||
1271 | { | ||
1272 | struct perf_sample_data data; | ||
1273 | struct cpu_hw_counters *cpuc; | ||
1274 | struct perf_counter *counter; | ||
1275 | struct hw_perf_counter *hwc; | ||
1276 | int idx, handled = 0; | ||
1277 | u64 val; | ||
1278 | |||
1279 | data.regs = regs; | ||
1280 | data.addr = 0; | ||
1281 | |||
1282 | cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1283 | |||
1284 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1285 | if (!test_bit(idx, cpuc->active_mask)) | ||
1286 | continue; | ||
1287 | |||
1288 | counter = cpuc->counters[idx]; | ||
1289 | hwc = &counter->hw; | ||
1290 | |||
1291 | val = x86_perf_counter_update(counter, hwc, idx); | ||
1292 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | ||
1293 | continue; | ||
1294 | |||
1295 | /* | ||
1296 | * counter overflow | ||
1297 | */ | ||
1298 | handled = 1; | ||
1299 | data.period = counter->hw.last_period; | ||
1300 | |||
1301 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | ||
1302 | continue; | ||
1303 | |||
1304 | if (perf_counter_overflow(counter, 1, &data)) | ||
1305 | p6_pmu_disable_counter(hwc, idx); | ||
1306 | } | ||
1307 | |||
1308 | if (handled) | ||
1309 | inc_irq_stat(apic_perf_irqs); | ||
1310 | |||
1311 | return handled; | ||
1312 | } | ||
1179 | 1313 | ||
1180 | /* | 1314 | /* |
1181 | * This handler is triggered by the local APIC, so the APIC IRQ handling | 1315 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
@@ -1185,14 +1319,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1185 | { | 1319 | { |
1186 | struct perf_sample_data data; | 1320 | struct perf_sample_data data; |
1187 | struct cpu_hw_counters *cpuc; | 1321 | struct cpu_hw_counters *cpuc; |
1188 | int bit, cpu, loops; | 1322 | int bit, loops; |
1189 | u64 ack, status; | 1323 | u64 ack, status; |
1190 | 1324 | ||
1191 | data.regs = regs; | 1325 | data.regs = regs; |
1192 | data.addr = 0; | 1326 | data.addr = 0; |
1193 | 1327 | ||
1194 | cpu = smp_processor_id(); | 1328 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1195 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1196 | 1329 | ||
1197 | perf_disable(); | 1330 | perf_disable(); |
1198 | status = intel_pmu_get_status(); | 1331 | status = intel_pmu_get_status(); |
@@ -1249,14 +1382,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
1249 | struct cpu_hw_counters *cpuc; | 1382 | struct cpu_hw_counters *cpuc; |
1250 | struct perf_counter *counter; | 1383 | struct perf_counter *counter; |
1251 | struct hw_perf_counter *hwc; | 1384 | struct hw_perf_counter *hwc; |
1252 | int cpu, idx, handled = 0; | 1385 | int idx, handled = 0; |
1253 | u64 val; | 1386 | u64 val; |
1254 | 1387 | ||
1255 | data.regs = regs; | 1388 | data.regs = regs; |
1256 | data.addr = 0; | 1389 | data.addr = 0; |
1257 | 1390 | ||
1258 | cpu = smp_processor_id(); | 1391 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1259 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1260 | 1392 | ||
1261 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1393 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1262 | if (!test_bit(idx, cpuc->active_mask)) | 1394 | if (!test_bit(idx, cpuc->active_mask)) |
@@ -1353,6 +1485,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |||
1353 | .priority = 1 | 1485 | .priority = 1 |
1354 | }; | 1486 | }; |
1355 | 1487 | ||
1488 | static struct x86_pmu p6_pmu = { | ||
1489 | .name = "p6", | ||
1490 | .handle_irq = p6_pmu_handle_irq, | ||
1491 | .disable_all = p6_pmu_disable_all, | ||
1492 | .enable_all = p6_pmu_enable_all, | ||
1493 | .enable = p6_pmu_enable_counter, | ||
1494 | .disable = p6_pmu_disable_counter, | ||
1495 | .eventsel = MSR_P6_EVNTSEL0, | ||
1496 | .perfctr = MSR_P6_PERFCTR0, | ||
1497 | .event_map = p6_pmu_event_map, | ||
1498 | .raw_event = p6_pmu_raw_event, | ||
1499 | .max_events = ARRAY_SIZE(p6_perfmon_event_map), | ||
1500 | .max_period = (1ULL << 31) - 1, | ||
1501 | .version = 0, | ||
1502 | .num_counters = 2, | ||
1503 | /* | ||
1504 | * Counters have 40 bits implemented. However they are designed such | ||
1505 | * that bits [32-39] are sign extensions of bit 31. As such the | ||
1506 | * effective width of a counter for P6-like PMU is 32 bits only. | ||
1507 | * | ||
1508 | * See IA-32 Intel Architecture Software developer manual Vol 3B | ||
1509 | */ | ||
1510 | .counter_bits = 32, | ||
1511 | .counter_mask = (1ULL << 32) - 1, | ||
1512 | }; | ||
1513 | |||
1356 | static struct x86_pmu intel_pmu = { | 1514 | static struct x86_pmu intel_pmu = { |
1357 | .name = "Intel", | 1515 | .name = "Intel", |
1358 | .handle_irq = intel_pmu_handle_irq, | 1516 | .handle_irq = intel_pmu_handle_irq, |
@@ -1392,6 +1550,41 @@ static struct x86_pmu amd_pmu = { | |||
1392 | .max_period = (1ULL << 47) - 1, | 1550 | .max_period = (1ULL << 47) - 1, |
1393 | }; | 1551 | }; |
1394 | 1552 | ||
1553 | static int p6_pmu_init(void) | ||
1554 | { | ||
1555 | int high, low; | ||
1556 | |||
1557 | switch (boot_cpu_data.x86_model) { | ||
1558 | case 1: | ||
1559 | case 3: /* Pentium Pro */ | ||
1560 | case 5: | ||
1561 | case 6: /* Pentium II */ | ||
1562 | case 7: | ||
1563 | case 8: | ||
1564 | case 11: /* Pentium III */ | ||
1565 | break; | ||
1566 | case 9: | ||
1567 | case 13: | ||
1568 | /* for Pentium M, we need to check if PMU exist */ | ||
1569 | rdmsr(MSR_IA32_MISC_ENABLE, low, high); | ||
1570 | if (low & MSR_IA32_MISC_ENABLE_EMON) | ||
1571 | break; | ||
1572 | default: | ||
1573 | pr_cont("unsupported p6 CPU model %d ", | ||
1574 | boot_cpu_data.x86_model); | ||
1575 | return -ENODEV; | ||
1576 | } | ||
1577 | |||
1578 | if (!cpu_has_apic) { | ||
1579 | pr_info("no Local APIC, try rebooting with lapic"); | ||
1580 | return -ENODEV; | ||
1581 | } | ||
1582 | |||
1583 | x86_pmu = p6_pmu; | ||
1584 | |||
1585 | return 0; | ||
1586 | } | ||
1587 | |||
1395 | static int intel_pmu_init(void) | 1588 | static int intel_pmu_init(void) |
1396 | { | 1589 | { |
1397 | union cpuid10_edx edx; | 1590 | union cpuid10_edx edx; |
@@ -1400,8 +1593,14 @@ static int intel_pmu_init(void) | |||
1400 | unsigned int ebx; | 1593 | unsigned int ebx; |
1401 | int version; | 1594 | int version; |
1402 | 1595 | ||
1403 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 1596 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
1597 | /* check for P6 processor family */ | ||
1598 | if (boot_cpu_data.x86 == 6) { | ||
1599 | return p6_pmu_init(); | ||
1600 | } else { | ||
1404 | return -ENODEV; | 1601 | return -ENODEV; |
1602 | } | ||
1603 | } | ||
1405 | 1604 | ||
1406 | /* | 1605 | /* |
1407 | * Check whether the Architectural PerfMon supports | 1606 | * Check whether the Architectural PerfMon supports |
diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 27887c916439..53bb9550def9 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h | |||
@@ -1,7 +1,13 @@ | |||
1 | #ifndef _PERF_PERF_H | 1 | #ifndef _PERF_PERF_H |
2 | #define _PERF_PERF_H | 2 | #define _PERF_PERF_H |
3 | 3 | ||
4 | #if defined(__x86_64__) || defined(__i386__) | 4 | #if defined(__i386__) |
5 | #include "../../arch/x86/include/asm/unistd.h" | ||
6 | #define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") | ||
7 | #define cpu_relax() asm volatile("rep; nop" ::: "memory"); | ||
8 | #endif | ||
9 | |||
10 | #if defined(__x86_64__) | ||
5 | #include "../../arch/x86/include/asm/unistd.h" | 11 | #include "../../arch/x86/include/asm/unistd.h" |
6 | #define rmb() asm volatile("lfence" ::: "memory") | 12 | #define rmb() asm volatile("lfence" ::: "memory") |
7 | #define cpu_relax() asm volatile("rep; nop" ::: "memory"); | 13 | #define cpu_relax() asm volatile("rep; nop" ::: "memory"); |