aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-19 08:22:50 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-22 08:18:26 -0400
commit6809b6ea73f7291f2e495d40397f1172c9caa77e (patch)
tree3653223bfd80867056da5e4f57be9512c67d706f /arch/x86/kernel
parent5553be2620ac901c21a25657bd5b59f73254e6d5 (diff)
perf, x86: Less disastrous PEBS/BTS buffer allocation failure
Currently PEBS/BTS buffers are allocated when we instantiate the first event, when this fails everything fails. This is a problem because esp. BTS tries to allocate a rather large buffer (64K), which can easily fail. This patch changes the logic such that when either buffer allocation fails, we simply don't allow events that would use these facilities, but continue functioning for all other events. This logic comes from a much larger patch proposed by Stephane. Suggested-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Stephane Eranian <eranian@google.com> LKML-Reference: <20101019134808.354429461@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c58
2 files changed, 47 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f369c53315a..61e78f65106 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -238,6 +238,7 @@ struct x86_pmu {
238 * Intel DebugStore bits 238 * Intel DebugStore bits
239 */ 239 */
240 int bts, pebs; 240 int bts, pebs;
241 int bts_active, pebs_active;
241 int pebs_record_size; 242 int pebs_record_size;
242 void (*drain_pebs)(struct pt_regs *regs); 243 void (*drain_pebs)(struct pt_regs *regs);
243 struct event_constraint *pebs_constraints; 244 struct event_constraint *pebs_constraints;
@@ -478,7 +479,7 @@ static int x86_setup_perfctr(struct perf_event *event)
478 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 479 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
479 (hwc->sample_period == 1)) { 480 (hwc->sample_period == 1)) {
480 /* BTS is not supported by this architecture. */ 481 /* BTS is not supported by this architecture. */
481 if (!x86_pmu.bts) 482 if (!x86_pmu.bts_active)
482 return -EOPNOTSUPP; 483 return -EOPNOTSUPP;
483 484
484 /* BTS is currently only allowed for user-mode. */ 485 /* BTS is currently only allowed for user-mode. */
@@ -497,7 +498,7 @@ static int x86_pmu_hw_config(struct perf_event *event)
497 int precise = 0; 498 int precise = 0;
498 499
499 /* Support for constant skid */ 500 /* Support for constant skid */
500 if (x86_pmu.pebs) { 501 if (x86_pmu.pebs_active) {
501 precise++; 502 precise++;
502 503
503 /* Support for IP fixup */ 504 /* Support for IP fixup */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3c86f4d2f02..05c7db68277 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -193,36 +193,66 @@ static void release_ds_buffers(void)
193 193
194static int reserve_ds_buffers(void) 194static int reserve_ds_buffers(void)
195{ 195{
196 int cpu, err = 0; 196 int bts_err = 0, pebs_err = 0;
197 int cpu;
198
199 x86_pmu.bts_active = 0;
200 x86_pmu.pebs_active = 0;
197 201
198 if (!x86_pmu.bts && !x86_pmu.pebs) 202 if (!x86_pmu.bts && !x86_pmu.pebs)
199 return 0; 203 return 0;
200 204
205 if (!x86_pmu.bts)
206 bts_err = 1;
207
208 if (!x86_pmu.pebs)
209 pebs_err = 1;
210
201 get_online_cpus(); 211 get_online_cpus();
202 212
203 for_each_possible_cpu(cpu) { 213 for_each_possible_cpu(cpu) {
204 if (alloc_ds_buffer(cpu)) 214 if (alloc_ds_buffer(cpu)) {
205 break; 215 bts_err = 1;
216 pebs_err = 1;
217 }
206 218
207 if (alloc_bts_buffer(cpu)) 219 if (!bts_err && alloc_bts_buffer(cpu))
208 break; 220 bts_err = 1;
221
222 if (!pebs_err && alloc_pebs_buffer(cpu))
223 pebs_err = 1;
209 224
210 if (alloc_pebs_buffer(cpu)) 225 if (bts_err && pebs_err)
211 break; 226 break;
227 }
228
229 if (bts_err) {
230 for_each_possible_cpu(cpu)
231 release_bts_buffer(cpu);
232 }
212 233
213 err = 0; 234 if (pebs_err) {
235 for_each_possible_cpu(cpu)
236 release_pebs_buffer(cpu);
214 } 237 }
215 238
216 if (err) 239 if (bts_err && pebs_err) {
217 release_ds_buffers(); 240 for_each_possible_cpu(cpu)
218 else { 241 release_ds_buffer(cpu);
242 } else {
243 if (x86_pmu.bts && !bts_err)
244 x86_pmu.bts_active = 1;
245
246 if (x86_pmu.pebs && !pebs_err)
247 x86_pmu.pebs_active = 1;
248
219 for_each_online_cpu(cpu) 249 for_each_online_cpu(cpu)
220 init_debug_store_on_cpu(cpu); 250 init_debug_store_on_cpu(cpu);
221 } 251 }
222 252
223 put_online_cpus(); 253 put_online_cpus();
224 254
225 return err; 255 return 0;
226} 256}
227 257
228/* 258/*
@@ -287,7 +317,7 @@ static int intel_pmu_drain_bts_buffer(void)
287 if (!event) 317 if (!event)
288 return 0; 318 return 0;
289 319
290 if (!ds) 320 if (!x86_pmu.bts_active)
291 return 0; 321 return 0;
292 322
293 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; 323 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
@@ -557,7 +587,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
557 struct pebs_record_core *at, *top; 587 struct pebs_record_core *at, *top;
558 int n; 588 int n;
559 589
560 if (!ds || !x86_pmu.pebs) 590 if (!x86_pmu.pebs_active)
561 return; 591 return;
562 592
563 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; 593 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
@@ -599,7 +629,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
599 u64 status = 0; 629 u64 status = 0;
600 int bit, n; 630 int bit, n;
601 631
602 if (!ds || !x86_pmu.pebs) 632 if (!x86_pmu.pebs_active)
603 return; 633 return;
604 634
605 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; 635 at = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;