aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>2016-03-02 06:24:14 -0500
committerIngo Molnar <mingo@kernel.org>2016-03-08 06:18:31 -0500
commit927a5570855836e5d5859a80ce7e91e963545e8f (patch)
tree0eed4ff4664a465d7a6d9bb81413c4d04ee9fd8c
parentb9461ba85f11de61d11ad4e13c806ff174ddf577 (diff)
perf/core: Fix perf_sched_count derailment
The error path in perf_event_open() is such that asking for a sampling event on a PMU that doesn't generate interrupts will end up in dropping the perf_sched_count even though it hasn't been incremented for this event yet. Given a sufficient amount of these calls, we'll end up disabling scheduler's jump label even though we'd still have active events in the system, thereby facilitating the arrival of the infernal regions upon us. I'm fixing this by moving account_event() inside perf_event_alloc(). Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <stable@vger.kernel.org> Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: vince@deater.net Link: http://lkml.kernel.org/r/1456917854-29427-1-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5dcc0bd08d11..b7231498de47 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8000,6 +8000,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
8000 } 8000 }
8001 } 8001 }
8002 8002
8003 /* symmetric to unaccount_event() in _free_event() */
8004 account_event(event);
8005
8003 return event; 8006 return event;
8004 8007
8005err_per_task: 8008err_per_task:
@@ -8363,8 +8366,6 @@ SYSCALL_DEFINE5(perf_event_open,
8363 } 8366 }
8364 } 8367 }
8365 8368
8366 account_event(event);
8367
8368 /* 8369 /*
8369 * Special case software events and allow them to be part of 8370 * Special case software events and allow them to be part of
8370 * any hardware group. 8371 * any hardware group.
@@ -8661,8 +8662,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
8661 /* Mark owner so we could distinguish it from user events. */ 8662 /* Mark owner so we could distinguish it from user events. */
8662 event->owner = TASK_TOMBSTONE; 8663 event->owner = TASK_TOMBSTONE;
8663 8664
8664 account_event(event);
8665
8666 ctx = find_get_context(event->pmu, task, event); 8665 ctx = find_get_context(event->pmu, task, event);
8667 if (IS_ERR(ctx)) { 8666 if (IS_ERR(ctx)) {
8668 err = PTR_ERR(ctx); 8667 err = PTR_ERR(ctx);