diff options
author | Robert Richter <robert.richter@amd.com> | 2010-02-26 11:20:55 -0500 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-05-04 05:35:28 -0400 |
commit | 8617f98c001d00b176422d707e6a67b88bcd7e0d (patch) | |
tree | e857f11dcbc6e650f4d0c30839099358c814e537 /arch/x86/oprofile/op_model_ppro.c | |
parent | 83300ce0df6b72e156b386457aa0f0902b8c0a98 (diff) |
oprofile/x86: return -EBUSY if counters are already reserved
In case a counter is already reserved by the watchdog or perf_event
subsystem, oprofile ignored this counters silently. This case is
handled now and oprofile_setup() now reports an error.
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/x86/oprofile/op_model_ppro.c')
-rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index b07d25a52f02..1fd17cfb956b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -46,21 +46,30 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
46 | } | 46 | } |
47 | } | 47 | } |
48 | 48 | ||
49 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) | 49 | static int ppro_fill_in_addresses(struct op_msrs * const msrs) |
50 | { | 50 | { |
51 | int i; | 51 | int i; |
52 | 52 | ||
53 | for (i = 0; i < num_counters; i++) { | 53 | for (i = 0; i < num_counters; i++) { |
54 | if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) | 54 | if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) |
55 | continue; | 55 | goto fail; |
56 | if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { | 56 | if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { |
57 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | 57 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
58 | continue; | 58 | goto fail; |
59 | } | 59 | } |
60 | /* both registers must be reserved */ | 60 | /* both registers must be reserved */ |
61 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | 61 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; |
62 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | 62 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; |
63 | continue; | ||
64 | fail: | ||
65 | if (!counter_config[i].enabled) | ||
66 | continue; | ||
67 | op_x86_warn_reserved(i); | ||
68 | ppro_shutdown(msrs); | ||
69 | return -EBUSY; | ||
63 | } | 70 | } |
71 | |||
72 | return 0; | ||
64 | } | 73 | } |
65 | 74 | ||
66 | 75 | ||
@@ -96,15 +105,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
96 | 105 | ||
97 | /* clear all counters */ | 106 | /* clear all counters */ |
98 | for (i = 0; i < num_counters; ++i) { | 107 | for (i = 0; i < num_counters; ++i) { |
99 | if (unlikely(!msrs->controls[i].addr)) { | 108 | if (!msrs->controls[i].addr) |
100 | if (counter_config[i].enabled && !smp_processor_id()) | ||
101 | /* | ||
102 | * counter is reserved, this is on all | ||
103 | * cpus, so report only for cpu #0 | ||
104 | */ | ||
105 | op_x86_warn_reserved(i); | ||
106 | continue; | 109 | continue; |
107 | } | ||
108 | rdmsrl(msrs->controls[i].addr, val); | 110 | rdmsrl(msrs->controls[i].addr, val); |
109 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | 111 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
110 | op_x86_warn_in_use(i); | 112 | op_x86_warn_in_use(i); |