aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-19 08:22:50 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-22 08:18:26 -0400
commit6809b6ea73f7291f2e495d40397f1172c9caa77e (patch)
tree3653223bfd80867056da5e4f57be9512c67d706f /arch/x86/kernel/cpu/perf_event.c
parent5553be2620ac901c21a25657bd5b59f73254e6d5 (diff)
perf, x86: Less disastrous PEBS/BTS buffer allocation failure
Currently PEBS/BTS buffers are allocated when we instantiate the first event, when this fails everything fails. This is a problem because esp. BTS tries to allocate a rather large buffer (64K), which can easily fail. This patch changes the logic such that when either buffer allocation fails, we simply don't allow events that would use these facilities, but continue functioning for all other events. This logic comes from a much larger patch proposed by Stephane. Suggested-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Stephane Eranian <eranian@google.com> LKML-Reference: <20101019134808.354429461@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index f369c53315a..61e78f65106 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -238,6 +238,7 @@ struct x86_pmu {
238 * Intel DebugStore bits 238 * Intel DebugStore bits
239 */ 239 */
240 int bts, pebs; 240 int bts, pebs;
241 int bts_active, pebs_active;
241 int pebs_record_size; 242 int pebs_record_size;
242 void (*drain_pebs)(struct pt_regs *regs); 243 void (*drain_pebs)(struct pt_regs *regs);
243 struct event_constraint *pebs_constraints; 244 struct event_constraint *pebs_constraints;
@@ -478,7 +479,7 @@ static int x86_setup_perfctr(struct perf_event *event)
478 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) && 479 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
479 (hwc->sample_period == 1)) { 480 (hwc->sample_period == 1)) {
480 /* BTS is not supported by this architecture. */ 481 /* BTS is not supported by this architecture. */
481 if (!x86_pmu.bts) 482 if (!x86_pmu.bts_active)
482 return -EOPNOTSUPP; 483 return -EOPNOTSUPP;
483 484
484 /* BTS is currently only allowed for user-mode. */ 485 /* BTS is currently only allowed for user-mode. */
@@ -497,7 +498,7 @@ static int x86_pmu_hw_config(struct perf_event *event)
497 int precise = 0; 498 int precise = 0;
498 499
499 /* Support for constant skid */ 500 /* Support for constant skid */
500 if (x86_pmu.pebs) { 501 if (x86_pmu.pebs_active) {
501 precise++; 502 precise++;
502 503
503 /* Support for IP fixup */ 504 /* Support for IP fixup */