diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-04-24 03:26:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-04-26 03:31:41 -0400 |
commit | 5ac2b5c2721501a8f5c5e1cd4116cbc31ace6886 (patch) | |
tree | 7edd3d6f36b723c717b7cda85e92f4d4111133a8 /arch | |
parent | 94f4db3590893c600506105b88dab581c7f6f5c8 (diff) |
perf/x86/intel/P4: Robistify P4 PMU types
Linus found, while extending integer type extension checks in the
sparse static code checker, various fragile patterns of mixed
signed/unsigned 64-bit/32-bit integer use in perf_events_p4.c.
The relevant hardware register ABI is 64 bit wide on 32-bit
kernels as well, so clean it all up a bit, remove unnecessary
casts, and make sure we use 64-bit unsigned integers in these
places.
[ Unfortunately this patch was not tested on real P4 hardware,
those are pretty rare already. If this patch causes any
problems on P4 hardware then please holler ... ]
Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Cc: David Miller <davem@davemloft.net>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20130424072630.GB1780@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/perf_event_p4.h | 62 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 9 |
2 files changed, 35 insertions, 36 deletions
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index 4f7e67e2345e..85e13ccf15c4 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -24,45 +24,45 @@ | |||
24 | #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) | 24 | #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) |
25 | #define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) | 25 | #define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) |
26 | 26 | ||
27 | #define P4_ESCR_EVENT_MASK 0x7e000000U | 27 | #define P4_ESCR_EVENT_MASK 0x7e000000ULL |
28 | #define P4_ESCR_EVENT_SHIFT 25 | 28 | #define P4_ESCR_EVENT_SHIFT 25 |
29 | #define P4_ESCR_EVENTMASK_MASK 0x01fffe00U | 29 | #define P4_ESCR_EVENTMASK_MASK 0x01fffe00ULL |
30 | #define P4_ESCR_EVENTMASK_SHIFT 9 | 30 | #define P4_ESCR_EVENTMASK_SHIFT 9 |
31 | #define P4_ESCR_TAG_MASK 0x000001e0U | 31 | #define P4_ESCR_TAG_MASK 0x000001e0ULL |
32 | #define P4_ESCR_TAG_SHIFT 5 | 32 | #define P4_ESCR_TAG_SHIFT 5 |
33 | #define P4_ESCR_TAG_ENABLE 0x00000010U | 33 | #define P4_ESCR_TAG_ENABLE 0x00000010ULL |
34 | #define P4_ESCR_T0_OS 0x00000008U | 34 | #define P4_ESCR_T0_OS 0x00000008ULL |
35 | #define P4_ESCR_T0_USR 0x00000004U | 35 | #define P4_ESCR_T0_USR 0x00000004ULL |
36 | #define P4_ESCR_T1_OS 0x00000002U | 36 | #define P4_ESCR_T1_OS 0x00000002ULL |
37 | #define P4_ESCR_T1_USR 0x00000001U | 37 | #define P4_ESCR_T1_USR 0x00000001ULL |
38 | 38 | ||
39 | #define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT) | 39 | #define P4_ESCR_EVENT(v) ((v) << P4_ESCR_EVENT_SHIFT) |
40 | #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) | 40 | #define P4_ESCR_EMASK(v) ((v) << P4_ESCR_EVENTMASK_SHIFT) |
41 | #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) | 41 | #define P4_ESCR_TAG(v) ((v) << P4_ESCR_TAG_SHIFT) |
42 | 42 | ||
43 | #define P4_CCCR_OVF 0x80000000U | 43 | #define P4_CCCR_OVF 0x80000000ULL |
44 | #define P4_CCCR_CASCADE 0x40000000U | 44 | #define P4_CCCR_CASCADE 0x40000000ULL |
45 | #define P4_CCCR_OVF_PMI_T0 0x04000000U | 45 | #define P4_CCCR_OVF_PMI_T0 0x04000000ULL |
46 | #define P4_CCCR_OVF_PMI_T1 0x08000000U | 46 | #define P4_CCCR_OVF_PMI_T1 0x08000000ULL |
47 | #define P4_CCCR_FORCE_OVF 0x02000000U | 47 | #define P4_CCCR_FORCE_OVF 0x02000000ULL |
48 | #define P4_CCCR_EDGE 0x01000000U | 48 | #define P4_CCCR_EDGE 0x01000000ULL |
49 | #define P4_CCCR_THRESHOLD_MASK 0x00f00000U | 49 | #define P4_CCCR_THRESHOLD_MASK 0x00f00000ULL |
50 | #define P4_CCCR_THRESHOLD_SHIFT 20 | 50 | #define P4_CCCR_THRESHOLD_SHIFT 20 |
51 | #define P4_CCCR_COMPLEMENT 0x00080000U | 51 | #define P4_CCCR_COMPLEMENT 0x00080000ULL |
52 | #define P4_CCCR_COMPARE 0x00040000U | 52 | #define P4_CCCR_COMPARE 0x00040000ULL |
53 | #define P4_CCCR_ESCR_SELECT_MASK 0x0000e000U | 53 | #define P4_CCCR_ESCR_SELECT_MASK 0x0000e000ULL |
54 | #define P4_CCCR_ESCR_SELECT_SHIFT 13 | 54 | #define P4_CCCR_ESCR_SELECT_SHIFT 13 |
55 | #define P4_CCCR_ENABLE 0x00001000U | 55 | #define P4_CCCR_ENABLE 0x00001000ULL |
56 | #define P4_CCCR_THREAD_SINGLE 0x00010000U | 56 | #define P4_CCCR_THREAD_SINGLE 0x00010000ULL |
57 | #define P4_CCCR_THREAD_BOTH 0x00020000U | 57 | #define P4_CCCR_THREAD_BOTH 0x00020000ULL |
58 | #define P4_CCCR_THREAD_ANY 0x00030000U | 58 | #define P4_CCCR_THREAD_ANY 0x00030000ULL |
59 | #define P4_CCCR_RESERVED 0x00000fffU | 59 | #define P4_CCCR_RESERVED 0x00000fffULL |
60 | 60 | ||
61 | #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) | 61 | #define P4_CCCR_THRESHOLD(v) ((v) << P4_CCCR_THRESHOLD_SHIFT) |
62 | #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) | 62 | #define P4_CCCR_ESEL(v) ((v) << P4_CCCR_ESCR_SELECT_SHIFT) |
63 | 63 | ||
64 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ | 64 | #define P4_GEN_ESCR_EMASK(class, name, bit) \ |
65 | class##__##name = ((1 << bit) << P4_ESCR_EVENTMASK_SHIFT) | 65 | class##__##name = ((1ULL << bit) << P4_ESCR_EVENTMASK_SHIFT) |
66 | #define P4_ESCR_EMASK_BIT(class, name) class##__##name | 66 | #define P4_ESCR_EMASK_BIT(class, name) class##__##name |
67 | 67 | ||
68 | /* | 68 | /* |
@@ -107,7 +107,7 @@ | |||
107 | * P4_PEBS_CONFIG_MASK and related bits on | 107 | * P4_PEBS_CONFIG_MASK and related bits on |
108 | * modification.) | 108 | * modification.) |
109 | */ | 109 | */ |
110 | #define P4_CONFIG_ALIASABLE (1 << 9) | 110 | #define P4_CONFIG_ALIASABLE (1ULL << 9) |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * The bits we allow to pass for RAW events | 113 | * The bits we allow to pass for RAW events |
@@ -784,17 +784,17 @@ enum P4_ESCR_EMASKS { | |||
784 | * Note we have UOP and PEBS bits reserved for now | 784 | * Note we have UOP and PEBS bits reserved for now |
785 | * just in case if we will need them once | 785 | * just in case if we will need them once |
786 | */ | 786 | */ |
787 | #define P4_PEBS_CONFIG_ENABLE (1 << 7) | 787 | #define P4_PEBS_CONFIG_ENABLE (1ULL << 7) |
788 | #define P4_PEBS_CONFIG_UOP_TAG (1 << 8) | 788 | #define P4_PEBS_CONFIG_UOP_TAG (1ULL << 8) |
789 | #define P4_PEBS_CONFIG_METRIC_MASK 0x3f | 789 | #define P4_PEBS_CONFIG_METRIC_MASK 0x3FLL |
790 | #define P4_PEBS_CONFIG_MASK 0xff | 790 | #define P4_PEBS_CONFIG_MASK 0xFFLL |
791 | 791 | ||
792 | /* | 792 | /* |
793 | * mem: Only counters MSR_IQ_COUNTER4 (16) and | 793 | * mem: Only counters MSR_IQ_COUNTER4 (16) and |
794 | * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling | 794 | * MSR_IQ_COUNTER5 (17) are allowed for PEBS sampling |
795 | */ | 795 | */ |
796 | #define P4_PEBS_ENABLE 0x02000000U | 796 | #define P4_PEBS_ENABLE 0x02000000ULL |
797 | #define P4_PEBS_ENABLE_UOP_TAG 0x01000000U | 797 | #define P4_PEBS_ENABLE_UOP_TAG 0x01000000ULL |
798 | 798 | ||
799 | #define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK) | 799 | #define p4_config_unpack_metric(v) (((u64)(v)) & P4_PEBS_CONFIG_METRIC_MASK) |
800 | #define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK) | 800 | #define p4_config_unpack_pebs(v) (((u64)(v)) & P4_PEBS_CONFIG_MASK) |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 92c7e39a079f..3486e6660357 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void) | |||
895 | * So at moment let leave metrics turned on forever -- it's | 895 | * So at moment let leave metrics turned on forever -- it's |
896 | * ok for now but need to be revisited! | 896 | * ok for now but need to be revisited! |
897 | * | 897 | * |
898 | * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0); | 898 | * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, 0); |
899 | * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0); | 899 | * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, 0); |
900 | */ | 900 | */ |
901 | } | 901 | } |
902 | 902 | ||
@@ -910,8 +910,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event) | |||
910 | * asserted again and again | 910 | * asserted again and again |
911 | */ | 911 | */ |
912 | (void)wrmsrl_safe(hwc->config_base, | 912 | (void)wrmsrl_safe(hwc->config_base, |
913 | (u64)(p4_config_unpack_cccr(hwc->config)) & | 913 | p4_config_unpack_cccr(hwc->config) & ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); |
914 | ~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED); | ||
915 | } | 914 | } |
916 | 915 | ||
917 | static void p4_pmu_disable_all(void) | 916 | static void p4_pmu_disable_all(void) |
@@ -957,7 +956,7 @@ static void p4_pmu_enable_event(struct perf_event *event) | |||
957 | u64 escr_addr, cccr; | 956 | u64 escr_addr, cccr; |
958 | 957 | ||
959 | bind = &p4_event_bind_map[idx]; | 958 | bind = &p4_event_bind_map[idx]; |
960 | escr_addr = (u64)bind->escr_msr[thread]; | 959 | escr_addr = bind->escr_msr[thread]; |
961 | 960 | ||
962 | /* | 961 | /* |
963 | * - we dont support cascaded counters yet | 962 | * - we dont support cascaded counters yet |