aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@redhat.com>2014-04-07 05:04:08 -0400
committerThomas Gleixner <tglx@linutronix.de>2014-05-19 08:44:55 -0400
commit39af6b1678afa5880dda7e375cf3f9d395087f6d (patch)
treebfed90a00897a1f4588a1ed8163a138d341b7eaa /kernel/events
parent0819b2e30ccb93edf04876237b6205eef84ec8d2 (diff)
perf: Prevent false warning in perf_swevent_add
The perf cpu offline callback takes down all cpu context events and releases swhash->swevent_hlist. This could race with task context software event being just scheduled on this cpu via perf_swevent_add while cpu hotplug code already cleaned up event's data. The race happens in the gap between the cpu notifier code and the cpu being actually taken down. Note that only cpu ctx events are terminated in the perf cpu hotplug code. It's easily reproduced with: $ perf record -e faults perf bench sched pipe while putting one of the cpus offline: # echo 0 > /sys/devices/system/cpu/cpu1/online Console emits following warning: WARNING: CPU: 1 PID: 2845 at kernel/events/core.c:5672 perf_swevent_add+0x18d/0x1a0() Modules linked in: CPU: 1 PID: 2845 Comm: sched-pipe Tainted: G W 3.14.0+ #256 Hardware name: Intel Corporation Montevina platform/To be filled by O.E.M., BIOS AMVACRB1.86C.0066.B00.0805070703 05/07/2008 0000000000000009 ffff880077233ab8 ffffffff81665a23 0000000000200005 0000000000000000 ffff880077233af8 ffffffff8104732c 0000000000000046 ffff88007467c800 0000000000000002 ffff88007a9cf2a0 0000000000000001 Call Trace: [<ffffffff81665a23>] dump_stack+0x4f/0x7c [<ffffffff8104732c>] warn_slowpath_common+0x8c/0xc0 [<ffffffff8104737a>] warn_slowpath_null+0x1a/0x20 [<ffffffff8110fb3d>] perf_swevent_add+0x18d/0x1a0 [<ffffffff811162ae>] event_sched_in.isra.75+0x9e/0x1f0 [<ffffffff8111646a>] group_sched_in+0x6a/0x1f0 [<ffffffff81083dd5>] ? sched_clock_local+0x25/0xa0 [<ffffffff811167e6>] ctx_sched_in+0x1f6/0x450 [<ffffffff8111757b>] perf_event_sched_in+0x6b/0xa0 [<ffffffff81117a4b>] perf_event_context_sched_in+0x7b/0xc0 [<ffffffff81117ece>] __perf_event_task_sched_in+0x43e/0x460 [<ffffffff81096f1e>] ? put_lock_stats.isra.18+0xe/0x30 [<ffffffff8107b3c8>] finish_task_switch+0xb8/0x100 [<ffffffff8166a7de>] __schedule+0x30e/0xad0 [<ffffffff81172dd2>] ? pipe_read+0x3e2/0x560 [<ffffffff8166b45e>] ? preempt_schedule_irq+0x3e/0x70 [<ffffffff8166b45e>] ? preempt_schedule_irq+0x3e/0x70 [<ffffffff8166b464>] preempt_schedule_irq+0x44/0x70 [<ffffffff816707f0>] retint_kernel+0x20/0x30 [<ffffffff8109e60a>] ? lockdep_sys_exit+0x1a/0x90 [<ffffffff812a4234>] lockdep_sys_exit_thunk+0x35/0x67 [<ffffffff81679321>] ? sysret_check+0x5/0x56 Fixing this by tracking the cpu hotplug state and displaying the WARN only if current cpu is initialized properly. Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: stable@vger.kernel.org Reported-by: Fengguang Wu <fengguang.wu@intel.com> Signed-off-by: Jiri Olsa <jolsa@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1396861448-10097-1-git-send-email-jolsa@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c13
1 files changed, 12 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1d1ec6453a08..feb1329ca331 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5419,6 +5419,9 @@ struct swevent_htable {
5419 5419
5420 /* Recursion avoidance in each contexts */ 5420 /* Recursion avoidance in each contexts */
5421 int recursion[PERF_NR_CONTEXTS]; 5421 int recursion[PERF_NR_CONTEXTS];
5422
5423 /* Keeps track of cpu being initialized/exited */
5424 bool online;
5422}; 5425};
5423 5426
5424static DEFINE_PER_CPU(struct swevent_htable, swevent_htable); 5427static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
@@ -5665,8 +5668,14 @@ static int perf_swevent_add(struct perf_event *event, int flags)
5665 hwc->state = !(flags & PERF_EF_START); 5668 hwc->state = !(flags & PERF_EF_START);
5666 5669
5667 head = find_swevent_head(swhash, event); 5670 head = find_swevent_head(swhash, event);
5668 if (WARN_ON_ONCE(!head)) 5671 if (!head) {
5672 /*
5673 * We can race with cpu hotplug code. Do not
5674 * WARN if the cpu just got unplugged.
5675 */
5676 WARN_ON_ONCE(swhash->online);
5669 return -EINVAL; 5677 return -EINVAL;
5678 }
5670 5679
5671 hlist_add_head_rcu(&event->hlist_entry, head); 5680 hlist_add_head_rcu(&event->hlist_entry, head);
5672 5681
@@ -7845,6 +7854,7 @@ static void perf_event_init_cpu(int cpu)
7845 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7854 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7846 7855
7847 mutex_lock(&swhash->hlist_mutex); 7856 mutex_lock(&swhash->hlist_mutex);
7857 swhash->online = true;
7848 if (swhash->hlist_refcount > 0) { 7858 if (swhash->hlist_refcount > 0) {
7849 struct swevent_hlist *hlist; 7859 struct swevent_hlist *hlist;
7850 7860
@@ -7902,6 +7912,7 @@ static void perf_event_exit_cpu(int cpu)
7902 perf_event_exit_cpu_context(cpu); 7912 perf_event_exit_cpu_context(cpu);
7903 7913
7904 mutex_lock(&swhash->hlist_mutex); 7914 mutex_lock(&swhash->hlist_mutex);
7915 swhash->online = false;
7905 swevent_hlist_release(swhash); 7916 swevent_hlist_release(swhash);
7906 mutex_unlock(&swhash->hlist_mutex); 7917 mutex_unlock(&swhash->hlist_mutex);
7907} 7918}