diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:22:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 16:22:43 -0400 |
commit | 4f0ac854167846bd55cd81dbc9a36e03708aa01c (patch) | |
tree | 0eb34d18a667f8e68ad9255f791560b028fed2a6 /arch/x86 | |
parent | b9356c53ba2f593081e5aa45eb67adcce243d1c0 (diff) | |
parent | 6b58e7f146f8d79c08f62087f928e1f01747de71 (diff) |
Merge branch 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (60 commits)
perf tools: Avoid unnecessary work in directory lookups
perf stat: Clean up statistics calculations a bit more
perf stat: More advanced variance computation
perf stat: Use stddev_mean in stead of stddev
perf stat: Remove the limit on repeat
perf stat: Change noise calculation to use stddev
x86, perf_counter, bts: Do not allow kernel BTS tracing for now
x86, perf_counter, bts: Correct pointer-to-u64 casts
x86, perf_counter, bts: Fail if BTS is not available
perf_counter: Fix output-sharing error path
perf trace: Fix read_string()
perf trace: Print out in nanoseconds
perf tools: Seek to the end of the header area
perf trace: Fix parsing of perf.data
perf trace: Sample timestamps as well
perf_counter: Introduce new (non-)paranoia level to allow raw tracepoint access
perf trace: Sample the CPU too
perf tools: Work around strict aliasing related warnings
perf tools: Clean up warnings list in the Makefile
perf tools: Complete support for dynamic strings
...
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/perf_counter.h | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 329 |
2 files changed, 333 insertions, 6 deletions
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h index fa64e401589d..e7b7c938ae27 100644 --- a/arch/x86/include/asm/perf_counter.h +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -84,6 +84,16 @@ union cpuid10_edx { | |||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | 84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b |
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | 85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) |
86 | 86 | ||
87 | /* | ||
88 | * We model BTS tracing as another fixed-mode PMC. | ||
89 | * | ||
90 | * We choose a value in the middle of the fixed counter range, since lower | ||
91 | * values are used by actual fixed counters and higher values are used | ||
92 | * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr. | ||
93 | */ | ||
94 | #define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16) | ||
95 | |||
96 | |||
87 | #ifdef CONFIG_PERF_COUNTERS | 97 | #ifdef CONFIG_PERF_COUNTERS |
88 | extern void init_hw_perf_counters(void); | 98 | extern void init_hw_perf_counters(void); |
89 | extern void perf_counters_lapic_init(void); | 99 | extern void perf_counters_lapic_init(void); |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 900332b800f8..f9cd0849bd42 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | 6 | * Copyright (C) 2009 Jaswinder Singh Rajput |
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | 7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter |
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | 8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> | ||
9 | * | 10 | * |
10 | * For licencing details see kernel-base/COPYING | 11 | * For licencing details see kernel-base/COPYING |
11 | */ | 12 | */ |
@@ -20,6 +21,7 @@ | |||
20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
21 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
22 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/cpu.h> | ||
23 | 25 | ||
24 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
25 | #include <asm/stacktrace.h> | 27 | #include <asm/stacktrace.h> |
@@ -27,12 +29,52 @@ | |||
27 | 29 | ||
28 | static u64 perf_counter_mask __read_mostly; | 30 | static u64 perf_counter_mask __read_mostly; |
29 | 31 | ||
32 | /* The maximal number of PEBS counters: */ | ||
33 | #define MAX_PEBS_COUNTERS 4 | ||
34 | |||
35 | /* The size of a BTS record in bytes: */ | ||
36 | #define BTS_RECORD_SIZE 24 | ||
37 | |||
38 | /* The size of a per-cpu BTS buffer in bytes: */ | ||
39 | #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) | ||
40 | |||
41 | /* The BTS overflow threshold in bytes from the end of the buffer: */ | ||
42 | #define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) | ||
43 | |||
44 | |||
45 | /* | ||
46 | * Bits in the debugctlmsr controlling branch tracing. | ||
47 | */ | ||
48 | #define X86_DEBUGCTL_TR (1 << 6) | ||
49 | #define X86_DEBUGCTL_BTS (1 << 7) | ||
50 | #define X86_DEBUGCTL_BTINT (1 << 8) | ||
51 | #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9) | ||
52 | #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10) | ||
53 | |||
54 | /* | ||
55 | * A debug store configuration. | ||
56 | * | ||
57 | * We only support architectures that use 64bit fields. | ||
58 | */ | ||
59 | struct debug_store { | ||
60 | u64 bts_buffer_base; | ||
61 | u64 bts_index; | ||
62 | u64 bts_absolute_maximum; | ||
63 | u64 bts_interrupt_threshold; | ||
64 | u64 pebs_buffer_base; | ||
65 | u64 pebs_index; | ||
66 | u64 pebs_absolute_maximum; | ||
67 | u64 pebs_interrupt_threshold; | ||
68 | u64 pebs_counter_reset[MAX_PEBS_COUNTERS]; | ||
69 | }; | ||
70 | |||
30 | struct cpu_hw_counters { | 71 | struct cpu_hw_counters { |
31 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | 72 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
32 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 73 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
33 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 74 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
34 | unsigned long interrupts; | 75 | unsigned long interrupts; |
35 | int enabled; | 76 | int enabled; |
77 | struct debug_store *ds; | ||
36 | }; | 78 | }; |
37 | 79 | ||
38 | /* | 80 | /* |
@@ -58,6 +100,8 @@ struct x86_pmu { | |||
58 | int apic; | 100 | int apic; |
59 | u64 max_period; | 101 | u64 max_period; |
60 | u64 intel_ctrl; | 102 | u64 intel_ctrl; |
103 | void (*enable_bts)(u64 config); | ||
104 | void (*disable_bts)(void); | ||
61 | }; | 105 | }; |
62 | 106 | ||
63 | static struct x86_pmu x86_pmu __read_mostly; | 107 | static struct x86_pmu x86_pmu __read_mostly; |
@@ -577,6 +621,9 @@ x86_perf_counter_update(struct perf_counter *counter, | |||
577 | u64 prev_raw_count, new_raw_count; | 621 | u64 prev_raw_count, new_raw_count; |
578 | s64 delta; | 622 | s64 delta; |
579 | 623 | ||
624 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
625 | return 0; | ||
626 | |||
580 | /* | 627 | /* |
581 | * Careful: an NMI might modify the previous counter value. | 628 | * Careful: an NMI might modify the previous counter value. |
582 | * | 629 | * |
@@ -666,10 +713,110 @@ static void release_pmc_hardware(void) | |||
666 | #endif | 713 | #endif |
667 | } | 714 | } |
668 | 715 | ||
716 | static inline bool bts_available(void) | ||
717 | { | ||
718 | return x86_pmu.enable_bts != NULL; | ||
719 | } | ||
720 | |||
721 | static inline void init_debug_store_on_cpu(int cpu) | ||
722 | { | ||
723 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
724 | |||
725 | if (!ds) | ||
726 | return; | ||
727 | |||
728 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, | ||
729 | (u32)((u64)(unsigned long)ds), | ||
730 | (u32)((u64)(unsigned long)ds >> 32)); | ||
731 | } | ||
732 | |||
733 | static inline void fini_debug_store_on_cpu(int cpu) | ||
734 | { | ||
735 | if (!per_cpu(cpu_hw_counters, cpu).ds) | ||
736 | return; | ||
737 | |||
738 | wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0); | ||
739 | } | ||
740 | |||
741 | static void release_bts_hardware(void) | ||
742 | { | ||
743 | int cpu; | ||
744 | |||
745 | if (!bts_available()) | ||
746 | return; | ||
747 | |||
748 | get_online_cpus(); | ||
749 | |||
750 | for_each_online_cpu(cpu) | ||
751 | fini_debug_store_on_cpu(cpu); | ||
752 | |||
753 | for_each_possible_cpu(cpu) { | ||
754 | struct debug_store *ds = per_cpu(cpu_hw_counters, cpu).ds; | ||
755 | |||
756 | if (!ds) | ||
757 | continue; | ||
758 | |||
759 | per_cpu(cpu_hw_counters, cpu).ds = NULL; | ||
760 | |||
761 | kfree((void *)(unsigned long)ds->bts_buffer_base); | ||
762 | kfree(ds); | ||
763 | } | ||
764 | |||
765 | put_online_cpus(); | ||
766 | } | ||
767 | |||
768 | static int reserve_bts_hardware(void) | ||
769 | { | ||
770 | int cpu, err = 0; | ||
771 | |||
772 | if (!bts_available()) | ||
773 | return 0; | ||
774 | |||
775 | get_online_cpus(); | ||
776 | |||
777 | for_each_possible_cpu(cpu) { | ||
778 | struct debug_store *ds; | ||
779 | void *buffer; | ||
780 | |||
781 | err = -ENOMEM; | ||
782 | buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); | ||
783 | if (unlikely(!buffer)) | ||
784 | break; | ||
785 | |||
786 | ds = kzalloc(sizeof(*ds), GFP_KERNEL); | ||
787 | if (unlikely(!ds)) { | ||
788 | kfree(buffer); | ||
789 | break; | ||
790 | } | ||
791 | |||
792 | ds->bts_buffer_base = (u64)(unsigned long)buffer; | ||
793 | ds->bts_index = ds->bts_buffer_base; | ||
794 | ds->bts_absolute_maximum = | ||
795 | ds->bts_buffer_base + BTS_BUFFER_SIZE; | ||
796 | ds->bts_interrupt_threshold = | ||
797 | ds->bts_absolute_maximum - BTS_OVFL_TH; | ||
798 | |||
799 | per_cpu(cpu_hw_counters, cpu).ds = ds; | ||
800 | err = 0; | ||
801 | } | ||
802 | |||
803 | if (err) | ||
804 | release_bts_hardware(); | ||
805 | else { | ||
806 | for_each_online_cpu(cpu) | ||
807 | init_debug_store_on_cpu(cpu); | ||
808 | } | ||
809 | |||
810 | put_online_cpus(); | ||
811 | |||
812 | return err; | ||
813 | } | ||
814 | |||
669 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 815 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
670 | { | 816 | { |
671 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { | 817 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { |
672 | release_pmc_hardware(); | 818 | release_pmc_hardware(); |
819 | release_bts_hardware(); | ||
673 | mutex_unlock(&pmc_reserve_mutex); | 820 | mutex_unlock(&pmc_reserve_mutex); |
674 | } | 821 | } |
675 | } | 822 | } |
@@ -712,6 +859,42 @@ set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) | |||
712 | return 0; | 859 | return 0; |
713 | } | 860 | } |
714 | 861 | ||
862 | static void intel_pmu_enable_bts(u64 config) | ||
863 | { | ||
864 | unsigned long debugctlmsr; | ||
865 | |||
866 | debugctlmsr = get_debugctlmsr(); | ||
867 | |||
868 | debugctlmsr |= X86_DEBUGCTL_TR; | ||
869 | debugctlmsr |= X86_DEBUGCTL_BTS; | ||
870 | debugctlmsr |= X86_DEBUGCTL_BTINT; | ||
871 | |||
872 | if (!(config & ARCH_PERFMON_EVENTSEL_OS)) | ||
873 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS; | ||
874 | |||
875 | if (!(config & ARCH_PERFMON_EVENTSEL_USR)) | ||
876 | debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR; | ||
877 | |||
878 | update_debugctlmsr(debugctlmsr); | ||
879 | } | ||
880 | |||
881 | static void intel_pmu_disable_bts(void) | ||
882 | { | ||
883 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
884 | unsigned long debugctlmsr; | ||
885 | |||
886 | if (!cpuc->ds) | ||
887 | return; | ||
888 | |||
889 | debugctlmsr = get_debugctlmsr(); | ||
890 | |||
891 | debugctlmsr &= | ||
892 | ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT | | ||
893 | X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR); | ||
894 | |||
895 | update_debugctlmsr(debugctlmsr); | ||
896 | } | ||
897 | |||
715 | /* | 898 | /* |
716 | * Setup the hardware configuration for a given attr_type | 899 | * Setup the hardware configuration for a given attr_type |
717 | */ | 900 | */ |
@@ -728,9 +911,13 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
728 | err = 0; | 911 | err = 0; |
729 | if (!atomic_inc_not_zero(&active_counters)) { | 912 | if (!atomic_inc_not_zero(&active_counters)) { |
730 | mutex_lock(&pmc_reserve_mutex); | 913 | mutex_lock(&pmc_reserve_mutex); |
731 | if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) | 914 | if (atomic_read(&active_counters) == 0) { |
732 | err = -EBUSY; | 915 | if (!reserve_pmc_hardware()) |
733 | else | 916 | err = -EBUSY; |
917 | else | ||
918 | err = reserve_bts_hardware(); | ||
919 | } | ||
920 | if (!err) | ||
734 | atomic_inc(&active_counters); | 921 | atomic_inc(&active_counters); |
735 | mutex_unlock(&pmc_reserve_mutex); | 922 | mutex_unlock(&pmc_reserve_mutex); |
736 | } | 923 | } |
@@ -793,6 +980,20 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
793 | if (config == -1LL) | 980 | if (config == -1LL) |
794 | return -EINVAL; | 981 | return -EINVAL; |
795 | 982 | ||
983 | /* | ||
984 | * Branch tracing: | ||
985 | */ | ||
986 | if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && | ||
987 | (hwc->sample_period == 1)) { | ||
988 | /* BTS is not supported by this architecture. */ | ||
989 | if (!bts_available()) | ||
990 | return -EOPNOTSUPP; | ||
991 | |||
992 | /* BTS is currently only allowed for user-mode. */ | ||
993 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
994 | return -EOPNOTSUPP; | ||
995 | } | ||
996 | |||
796 | hwc->config |= config; | 997 | hwc->config |= config; |
797 | 998 | ||
798 | return 0; | 999 | return 0; |
@@ -817,7 +1018,18 @@ static void p6_pmu_disable_all(void) | |||
817 | 1018 | ||
818 | static void intel_pmu_disable_all(void) | 1019 | static void intel_pmu_disable_all(void) |
819 | { | 1020 | { |
1021 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1022 | |||
1023 | if (!cpuc->enabled) | ||
1024 | return; | ||
1025 | |||
1026 | cpuc->enabled = 0; | ||
1027 | barrier(); | ||
1028 | |||
820 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 1029 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
1030 | |||
1031 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) | ||
1032 | intel_pmu_disable_bts(); | ||
821 | } | 1033 | } |
822 | 1034 | ||
823 | static void amd_pmu_disable_all(void) | 1035 | static void amd_pmu_disable_all(void) |
@@ -875,7 +1087,25 @@ static void p6_pmu_enable_all(void) | |||
875 | 1087 | ||
876 | static void intel_pmu_enable_all(void) | 1088 | static void intel_pmu_enable_all(void) |
877 | { | 1089 | { |
1090 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1091 | |||
1092 | if (cpuc->enabled) | ||
1093 | return; | ||
1094 | |||
1095 | cpuc->enabled = 1; | ||
1096 | barrier(); | ||
1097 | |||
878 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 1098 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
1099 | |||
1100 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | ||
1101 | struct perf_counter *counter = | ||
1102 | cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1103 | |||
1104 | if (WARN_ON_ONCE(!counter)) | ||
1105 | return; | ||
1106 | |||
1107 | intel_pmu_enable_bts(counter->hw.config); | ||
1108 | } | ||
879 | } | 1109 | } |
880 | 1110 | ||
881 | static void amd_pmu_enable_all(void) | 1111 | static void amd_pmu_enable_all(void) |
@@ -962,6 +1192,11 @@ p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | |||
962 | static inline void | 1192 | static inline void |
963 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 1193 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
964 | { | 1194 | { |
1195 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1196 | intel_pmu_disable_bts(); | ||
1197 | return; | ||
1198 | } | ||
1199 | |||
965 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1200 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
966 | intel_pmu_disable_fixed(hwc, idx); | 1201 | intel_pmu_disable_fixed(hwc, idx); |
967 | return; | 1202 | return; |
@@ -990,6 +1225,9 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
990 | s64 period = hwc->sample_period; | 1225 | s64 period = hwc->sample_period; |
991 | int err, ret = 0; | 1226 | int err, ret = 0; |
992 | 1227 | ||
1228 | if (idx == X86_PMC_IDX_FIXED_BTS) | ||
1229 | return 0; | ||
1230 | |||
993 | /* | 1231 | /* |
994 | * If we are way outside a reasoable range then just skip forward: | 1232 | * If we are way outside a reasoable range then just skip forward: |
995 | */ | 1233 | */ |
@@ -1072,6 +1310,14 @@ static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
1072 | 1310 | ||
1073 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1311 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
1074 | { | 1312 | { |
1313 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1314 | if (!__get_cpu_var(cpu_hw_counters).enabled) | ||
1315 | return; | ||
1316 | |||
1317 | intel_pmu_enable_bts(hwc->config); | ||
1318 | return; | ||
1319 | } | ||
1320 | |||
1075 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1321 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
1076 | intel_pmu_enable_fixed(hwc, idx); | 1322 | intel_pmu_enable_fixed(hwc, idx); |
1077 | return; | 1323 | return; |
@@ -1093,11 +1339,16 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
1093 | { | 1339 | { |
1094 | unsigned int event; | 1340 | unsigned int event; |
1095 | 1341 | ||
1342 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1343 | |||
1344 | if (unlikely((event == | ||
1345 | x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) && | ||
1346 | (hwc->sample_period == 1))) | ||
1347 | return X86_PMC_IDX_FIXED_BTS; | ||
1348 | |||
1096 | if (!x86_pmu.num_counters_fixed) | 1349 | if (!x86_pmu.num_counters_fixed) |
1097 | return -1; | 1350 | return -1; |
1098 | 1351 | ||
1099 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
1100 | |||
1101 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 1352 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
1102 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 1353 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
1103 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | 1354 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
@@ -1118,7 +1369,15 @@ static int x86_pmu_enable(struct perf_counter *counter) | |||
1118 | int idx; | 1369 | int idx; |
1119 | 1370 | ||
1120 | idx = fixed_mode_idx(counter, hwc); | 1371 | idx = fixed_mode_idx(counter, hwc); |
1121 | if (idx >= 0) { | 1372 | if (idx == X86_PMC_IDX_FIXED_BTS) { |
1373 | /* BTS is already occupied. */ | ||
1374 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
1375 | return -EAGAIN; | ||
1376 | |||
1377 | hwc->config_base = 0; | ||
1378 | hwc->counter_base = 0; | ||
1379 | hwc->idx = idx; | ||
1380 | } else if (idx >= 0) { | ||
1122 | /* | 1381 | /* |
1123 | * Try to get the fixed counter, if that is already taken | 1382 | * Try to get the fixed counter, if that is already taken |
1124 | * then try to get a generic counter: | 1383 | * then try to get a generic counter: |
@@ -1229,6 +1488,44 @@ void perf_counter_print_debug(void) | |||
1229 | local_irq_restore(flags); | 1488 | local_irq_restore(flags); |
1230 | } | 1489 | } |
1231 | 1490 | ||
1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | ||
1492 | struct perf_sample_data *data) | ||
1493 | { | ||
1494 | struct debug_store *ds = cpuc->ds; | ||
1495 | struct bts_record { | ||
1496 | u64 from; | ||
1497 | u64 to; | ||
1498 | u64 flags; | ||
1499 | }; | ||
1500 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | ||
1501 | unsigned long orig_ip = data->regs->ip; | ||
1502 | struct bts_record *at, *top; | ||
1503 | |||
1504 | if (!counter) | ||
1505 | return; | ||
1506 | |||
1507 | if (!ds) | ||
1508 | return; | ||
1509 | |||
1510 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | ||
1511 | top = (struct bts_record *)(unsigned long)ds->bts_index; | ||
1512 | |||
1513 | ds->bts_index = ds->bts_buffer_base; | ||
1514 | |||
1515 | for (; at < top; at++) { | ||
1516 | data->regs->ip = at->from; | ||
1517 | data->addr = at->to; | ||
1518 | |||
1519 | perf_counter_output(counter, 1, data); | ||
1520 | } | ||
1521 | |||
1522 | data->regs->ip = orig_ip; | ||
1523 | data->addr = 0; | ||
1524 | |||
1525 | /* There's new data available. */ | ||
1526 | counter->pending_kill = POLL_IN; | ||
1527 | } | ||
1528 | |||
1232 | static void x86_pmu_disable(struct perf_counter *counter) | 1529 | static void x86_pmu_disable(struct perf_counter *counter) |
1233 | { | 1530 | { |
1234 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1531 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -1253,6 +1550,15 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1253 | * that we are disabling: | 1550 | * that we are disabling: |
1254 | */ | 1551 | */ |
1255 | x86_perf_counter_update(counter, hwc, idx); | 1552 | x86_perf_counter_update(counter, hwc, idx); |
1553 | |||
1554 | /* Drain the remaining BTS records. */ | ||
1555 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | ||
1556 | struct perf_sample_data data; | ||
1557 | struct pt_regs regs; | ||
1558 | |||
1559 | data.regs = ®s; | ||
1560 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1561 | } | ||
1256 | cpuc->counters[idx] = NULL; | 1562 | cpuc->counters[idx] = NULL; |
1257 | clear_bit(idx, cpuc->used_mask); | 1563 | clear_bit(idx, cpuc->used_mask); |
1258 | 1564 | ||
@@ -1280,6 +1586,7 @@ static int intel_pmu_save_and_restart(struct perf_counter *counter) | |||
1280 | 1586 | ||
1281 | static void intel_pmu_reset(void) | 1587 | static void intel_pmu_reset(void) |
1282 | { | 1588 | { |
1589 | struct debug_store *ds = __get_cpu_var(cpu_hw_counters).ds; | ||
1283 | unsigned long flags; | 1590 | unsigned long flags; |
1284 | int idx; | 1591 | int idx; |
1285 | 1592 | ||
@@ -1297,6 +1604,8 @@ static void intel_pmu_reset(void) | |||
1297 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | 1604 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { |
1298 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | 1605 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); |
1299 | } | 1606 | } |
1607 | if (ds) | ||
1608 | ds->bts_index = ds->bts_buffer_base; | ||
1300 | 1609 | ||
1301 | local_irq_restore(flags); | 1610 | local_irq_restore(flags); |
1302 | } | 1611 | } |
@@ -1362,6 +1671,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1362 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1671 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1363 | 1672 | ||
1364 | perf_disable(); | 1673 | perf_disable(); |
1674 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1365 | status = intel_pmu_get_status(); | 1675 | status = intel_pmu_get_status(); |
1366 | if (!status) { | 1676 | if (!status) { |
1367 | perf_enable(); | 1677 | perf_enable(); |
@@ -1571,6 +1881,8 @@ static struct x86_pmu intel_pmu = { | |||
1571 | * the generic counter period: | 1881 | * the generic counter period: |
1572 | */ | 1882 | */ |
1573 | .max_period = (1ULL << 31) - 1, | 1883 | .max_period = (1ULL << 31) - 1, |
1884 | .enable_bts = intel_pmu_enable_bts, | ||
1885 | .disable_bts = intel_pmu_disable_bts, | ||
1574 | }; | 1886 | }; |
1575 | 1887 | ||
1576 | static struct x86_pmu amd_pmu = { | 1888 | static struct x86_pmu amd_pmu = { |
@@ -1962,3 +2274,8 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
1962 | 2274 | ||
1963 | return entry; | 2275 | return entry; |
1964 | } | 2276 | } |
2277 | |||
2278 | void hw_perf_counter_setup_online(int cpu) | ||
2279 | { | ||
2280 | init_debug_store_on_cpu(cpu); | ||
2281 | } | ||