aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-03 10:44:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-03 10:44:04 -0400
commit4536e4d1d21c8172402a2217b0fa1880665ace36 (patch)
tree0bcf19fda146cb4afbdd6487218fb6ac16b80fcd
parent43672a0784707d795556b1f93925da8b8e797d03 (diff)
Revert "perf: Add PM notifiers to fix CPU hotplug races"
This reverts commit 144060fee07e9c22e179d00819c83c86fbcbf82c. It causes a resume regression for Andi on his Acer Aspire 1830T post 3.1. The screen just stays black after wakeup. Also, it really looks like the wrong way to suspend and resume perf events: I think they should be done as part of the CPU suspend and resume, rather than as a notifier that does smp_call_function(). Reported-by: Andi Kleen <andi@firstfloor.org> Acked-by: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Rafael J. Wysocki <rjw@sisk.pl> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/events/core.c97
1 files changed, 2 insertions, 95 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 12a0287e0358..e1253faa34dd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -29,7 +29,6 @@
29#include <linux/hardirq.h> 29#include <linux/hardirq.h>
30#include <linux/rculist.h> 30#include <linux/rculist.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/suspend.h>
33#include <linux/syscalls.h> 32#include <linux/syscalls.h>
34#include <linux/anon_inodes.h> 33#include <linux/anon_inodes.h>
35#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
@@ -6853,7 +6852,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
6853 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6852 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6854 6853
6855 mutex_lock(&swhash->hlist_mutex); 6854 mutex_lock(&swhash->hlist_mutex);
6856 if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) { 6855 if (swhash->hlist_refcount > 0) {
6857 struct swevent_hlist *hlist; 6856 struct swevent_hlist *hlist;
6858 6857
6859 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 6858 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -6942,14 +6941,7 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6942{ 6941{
6943 unsigned int cpu = (long)hcpu; 6942 unsigned int cpu = (long)hcpu;
6944 6943
6945 /* 6944 switch (action & ~CPU_TASKS_FROZEN) {
6946 * Ignore suspend/resume action, the perf_pm_notifier will
6947 * take care of that.
6948 */
6949 if (action & CPU_TASKS_FROZEN)
6950 return NOTIFY_OK;
6951
6952 switch (action) {
6953 6945
6954 case CPU_UP_PREPARE: 6946 case CPU_UP_PREPARE:
6955 case CPU_DOWN_FAILED: 6947 case CPU_DOWN_FAILED:
@@ -6968,90 +6960,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6968 return NOTIFY_OK; 6960 return NOTIFY_OK;
6969} 6961}
6970 6962
6971static void perf_pm_resume_cpu(void *unused)
6972{
6973 struct perf_cpu_context *cpuctx;
6974 struct perf_event_context *ctx;
6975 struct pmu *pmu;
6976 int idx;
6977
6978 idx = srcu_read_lock(&pmus_srcu);
6979 list_for_each_entry_rcu(pmu, &pmus, entry) {
6980 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6981 ctx = cpuctx->task_ctx;
6982
6983 perf_ctx_lock(cpuctx, ctx);
6984 perf_pmu_disable(cpuctx->ctx.pmu);
6985
6986 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
6987 if (ctx)
6988 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
6989
6990 perf_pmu_enable(cpuctx->ctx.pmu);
6991 perf_ctx_unlock(cpuctx, ctx);
6992 }
6993 srcu_read_unlock(&pmus_srcu, idx);
6994}
6995
6996static void perf_pm_suspend_cpu(void *unused)
6997{
6998 struct perf_cpu_context *cpuctx;
6999 struct perf_event_context *ctx;
7000 struct pmu *pmu;
7001 int idx;
7002
7003 idx = srcu_read_lock(&pmus_srcu);
7004 list_for_each_entry_rcu(pmu, &pmus, entry) {
7005 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7006 ctx = cpuctx->task_ctx;
7007
7008 perf_ctx_lock(cpuctx, ctx);
7009 perf_pmu_disable(cpuctx->ctx.pmu);
7010
7011 perf_event_sched_in(cpuctx, ctx, current);
7012
7013 perf_pmu_enable(cpuctx->ctx.pmu);
7014 perf_ctx_unlock(cpuctx, ctx);
7015 }
7016 srcu_read_unlock(&pmus_srcu, idx);
7017}
7018
7019static int perf_resume(void)
7020{
7021 get_online_cpus();
7022 smp_call_function(perf_pm_resume_cpu, NULL, 1);
7023 put_online_cpus();
7024
7025 return NOTIFY_OK;
7026}
7027
7028static int perf_suspend(void)
7029{
7030 get_online_cpus();
7031 smp_call_function(perf_pm_suspend_cpu, NULL, 1);
7032 put_online_cpus();
7033
7034 return NOTIFY_OK;
7035}
7036
7037static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
7038{
7039 switch (action) {
7040 case PM_POST_HIBERNATION:
7041 case PM_POST_SUSPEND:
7042 return perf_resume();
7043 case PM_HIBERNATION_PREPARE:
7044 case PM_SUSPEND_PREPARE:
7045 return perf_suspend();
7046 default:
7047 return NOTIFY_DONE;
7048 }
7049}
7050
7051static struct notifier_block perf_pm_notifier = {
7052 .notifier_call = perf_pm,
7053};
7054
7055void __init perf_event_init(void) 6963void __init perf_event_init(void)
7056{ 6964{
7057 int ret; 6965 int ret;
@@ -7066,7 +6974,6 @@ void __init perf_event_init(void)
7066 perf_tp_register(); 6974 perf_tp_register();
7067 perf_cpu_notifier(perf_cpu_notify); 6975 perf_cpu_notifier(perf_cpu_notify);
7068 register_reboot_notifier(&perf_reboot_notifier); 6976 register_reboot_notifier(&perf_reboot_notifier);
7069 register_pm_notifier(&perf_pm_notifier);
7070 6977
7071 ret = init_hw_breakpoint(); 6978 ret = init_hw_breakpoint();
7072 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 6979 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);