diff options
| author | Robert Richter <robert.richter@amd.com> | 2010-02-26 11:20:55 -0500 |
|---|---|---|
| committer | Robert Richter <robert.richter@amd.com> | 2010-05-04 05:35:28 -0400 |
| commit | 8617f98c001d00b176422d707e6a67b88bcd7e0d (patch) | |
| tree | e857f11dcbc6e650f4d0c30839099358c814e537 | |
| parent | 83300ce0df6b72e156b386457aa0f0902b8c0a98 (diff) | |
oprofile/x86: return -EBUSY if counters are already reserved
In case a counter is already reserved by the watchdog or perf_event
subsystem, oprofile ignored this counters silently. This case is
handled now and oprofile_setup() now reports an error.
Signed-off-by: Robert Richter <robert.richter@amd.com>
| -rw-r--r-- | arch/x86/oprofile/nmi_int.c | 5 | ||||
| -rw-r--r-- | arch/x86/oprofile/op_model_amd.c | 24 | ||||
| -rw-r--r-- | arch/x86/oprofile/op_model_p4.c | 14 | ||||
| -rw-r--r-- | arch/x86/oprofile/op_model_ppro.c | 24 | ||||
| -rw-r--r-- | arch/x86/oprofile/op_x86_model.h | 2 |
5 files changed, 44 insertions, 25 deletions
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index c0c21f200faf..9f001d904599 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
| @@ -357,7 +357,10 @@ static int nmi_setup(void) | |||
| 357 | */ | 357 | */ |
| 358 | 358 | ||
| 359 | /* Assume saved/restored counters are the same on all CPUs */ | 359 | /* Assume saved/restored counters are the same on all CPUs */ |
| 360 | model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); | 360 | err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); |
| 361 | if (err) | ||
| 362 | goto fail; | ||
| 363 | |||
| 361 | for_each_possible_cpu(cpu) { | 364 | for_each_possible_cpu(cpu) { |
| 362 | if (!cpu) | 365 | if (!cpu) |
| 363 | continue; | 366 | continue; |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index 7e5886d54bd5..536d0b0b39a5 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
| @@ -138,21 +138,30 @@ static void op_amd_shutdown(struct op_msrs const * const msrs) | |||
| 138 | } | 138 | } |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static void op_amd_fill_in_addresses(struct op_msrs * const msrs) | 141 | static int op_amd_fill_in_addresses(struct op_msrs * const msrs) |
| 142 | { | 142 | { |
| 143 | int i; | 143 | int i; |
| 144 | 144 | ||
| 145 | for (i = 0; i < NUM_COUNTERS; i++) { | 145 | for (i = 0; i < NUM_COUNTERS; i++) { |
| 146 | if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) | 146 | if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i)) |
| 147 | continue; | 147 | goto fail; |
| 148 | if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) { | 148 | if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) { |
| 149 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); | 149 | release_perfctr_nmi(MSR_K7_PERFCTR0 + i); |
| 150 | continue; | 150 | goto fail; |
| 151 | } | 151 | } |
| 152 | /* both registers must be reserved */ | 152 | /* both registers must be reserved */ |
| 153 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; | 153 | msrs->counters[i].addr = MSR_K7_PERFCTR0 + i; |
| 154 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; | 154 | msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i; |
| 155 | continue; | ||
| 156 | fail: | ||
| 157 | if (!counter_config[i].enabled) | ||
| 158 | continue; | ||
| 159 | op_x86_warn_reserved(i); | ||
| 160 | op_amd_shutdown(msrs); | ||
| 161 | return -EBUSY; | ||
| 155 | } | 162 | } |
| 163 | |||
| 164 | return 0; | ||
| 156 | } | 165 | } |
| 157 | 166 | ||
| 158 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | 167 | static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, |
| @@ -172,15 +181,8 @@ static void op_amd_setup_ctrs(struct op_x86_model_spec const *model, | |||
| 172 | 181 | ||
| 173 | /* clear all counters */ | 182 | /* clear all counters */ |
| 174 | for (i = 0; i < NUM_COUNTERS; ++i) { | 183 | for (i = 0; i < NUM_COUNTERS; ++i) { |
| 175 | if (unlikely(!msrs->controls[i].addr)) { | 184 | if (!msrs->controls[i].addr) |
| 176 | if (counter_config[i].enabled && !smp_processor_id()) | ||
| 177 | /* | ||
| 178 | * counter is reserved, this is on all | ||
| 179 | * cpus, so report only for cpu #0 | ||
| 180 | */ | ||
| 181 | op_x86_warn_reserved(i); | ||
| 182 | continue; | 185 | continue; |
| 183 | } | ||
| 184 | rdmsrl(msrs->controls[i].addr, val); | 186 | rdmsrl(msrs->controls[i].addr, val); |
| 185 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | 187 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
| 186 | op_x86_warn_in_use(i); | 188 | op_x86_warn_in_use(i); |
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c index 7cc80df330d5..182558dd5515 100644 --- a/arch/x86/oprofile/op_model_p4.c +++ b/arch/x86/oprofile/op_model_p4.c | |||
| @@ -404,7 +404,7 @@ static void p4_shutdown(struct op_msrs const * const msrs) | |||
| 404 | } | 404 | } |
| 405 | } | 405 | } |
| 406 | 406 | ||
| 407 | static void p4_fill_in_addresses(struct op_msrs * const msrs) | 407 | static int p4_fill_in_addresses(struct op_msrs * const msrs) |
| 408 | { | 408 | { |
| 409 | unsigned int i; | 409 | unsigned int i; |
| 410 | unsigned int addr, cccraddr, stag; | 410 | unsigned int addr, cccraddr, stag; |
| @@ -486,6 +486,18 @@ static void p4_fill_in_addresses(struct op_msrs * const msrs) | |||
| 486 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; | 486 | msrs->controls[i++].addr = MSR_P4_CRU_ESCR5; |
| 487 | } | 487 | } |
| 488 | } | 488 | } |
| 489 | |||
| 490 | for (i = 0; i < num_counters; ++i) { | ||
| 491 | if (!counter_config[i].enabled) | ||
| 492 | continue; | ||
| 493 | if (msrs->controls[i].addr) | ||
| 494 | continue; | ||
| 495 | op_x86_warn_reserved(i); | ||
| 496 | p4_shutdown(msrs); | ||
| 497 | return -EBUSY; | ||
| 498 | } | ||
| 499 | |||
| 500 | return 0; | ||
| 489 | } | 501 | } |
| 490 | 502 | ||
| 491 | 503 | ||
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index b07d25a52f02..1fd17cfb956b 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
| @@ -46,21 +46,30 @@ static void ppro_shutdown(struct op_msrs const * const msrs) | |||
| 46 | } | 46 | } |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static void ppro_fill_in_addresses(struct op_msrs * const msrs) | 49 | static int ppro_fill_in_addresses(struct op_msrs * const msrs) |
| 50 | { | 50 | { |
| 51 | int i; | 51 | int i; |
| 52 | 52 | ||
| 53 | for (i = 0; i < num_counters; i++) { | 53 | for (i = 0; i < num_counters; i++) { |
| 54 | if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) | 54 | if (!reserve_perfctr_nmi(MSR_P6_PERFCTR0 + i)) |
| 55 | continue; | 55 | goto fail; |
| 56 | if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { | 56 | if (!reserve_evntsel_nmi(MSR_P6_EVNTSEL0 + i)) { |
| 57 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); | 57 | release_perfctr_nmi(MSR_P6_PERFCTR0 + i); |
| 58 | continue; | 58 | goto fail; |
| 59 | } | 59 | } |
| 60 | /* both registers must be reserved */ | 60 | /* both registers must be reserved */ |
| 61 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; | 61 | msrs->counters[i].addr = MSR_P6_PERFCTR0 + i; |
| 62 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; | 62 | msrs->controls[i].addr = MSR_P6_EVNTSEL0 + i; |
| 63 | continue; | ||
| 64 | fail: | ||
| 65 | if (!counter_config[i].enabled) | ||
| 66 | continue; | ||
| 67 | op_x86_warn_reserved(i); | ||
| 68 | ppro_shutdown(msrs); | ||
| 69 | return -EBUSY; | ||
| 63 | } | 70 | } |
| 71 | |||
| 72 | return 0; | ||
| 64 | } | 73 | } |
| 65 | 74 | ||
| 66 | 75 | ||
| @@ -96,15 +105,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model, | |||
| 96 | 105 | ||
| 97 | /* clear all counters */ | 106 | /* clear all counters */ |
| 98 | for (i = 0; i < num_counters; ++i) { | 107 | for (i = 0; i < num_counters; ++i) { |
| 99 | if (unlikely(!msrs->controls[i].addr)) { | 108 | if (!msrs->controls[i].addr) |
| 100 | if (counter_config[i].enabled && !smp_processor_id()) | ||
| 101 | /* | ||
| 102 | * counter is reserved, this is on all | ||
| 103 | * cpus, so report only for cpu #0 | ||
| 104 | */ | ||
| 105 | op_x86_warn_reserved(i); | ||
| 106 | continue; | 109 | continue; |
| 107 | } | ||
| 108 | rdmsrl(msrs->controls[i].addr, val); | 110 | rdmsrl(msrs->controls[i].addr, val); |
| 109 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | 111 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) |
| 110 | op_x86_warn_in_use(i); | 112 | op_x86_warn_in_use(i); |
diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h index ff82a755edd4..551401398fba 100644 --- a/arch/x86/oprofile/op_x86_model.h +++ b/arch/x86/oprofile/op_x86_model.h | |||
| @@ -41,7 +41,7 @@ struct op_x86_model_spec { | |||
| 41 | u16 event_mask; | 41 | u16 event_mask; |
| 42 | int (*init)(struct oprofile_operations *ops); | 42 | int (*init)(struct oprofile_operations *ops); |
| 43 | void (*exit)(void); | 43 | void (*exit)(void); |
| 44 | void (*fill_in_addresses)(struct op_msrs * const msrs); | 44 | int (*fill_in_addresses)(struct op_msrs * const msrs); |
| 45 | void (*setup_ctrs)(struct op_x86_model_spec const *model, | 45 | void (*setup_ctrs)(struct op_x86_model_spec const *model, |
| 46 | struct op_msrs const * const msrs); | 46 | struct op_msrs const * const msrs); |
| 47 | int (*check_ctrs)(struct pt_regs * const regs, | 47 | int (*check_ctrs)(struct pt_regs * const regs, |
