aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c74
-rw-r--r--kernel/events/core.c97
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/power/qos.c3
-rw-r--r--kernel/sched.c2
5 files changed, 78 insertions, 100 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 12b7458f23b1..aa39dd7a3846 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,6 +15,7 @@
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/suspend.h>
18 19
19#ifdef CONFIG_SMP 20#ifdef CONFIG_SMP
20/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 21/* Serializes the updates to cpu_online_mask, cpu_present_mask */
@@ -476,6 +477,79 @@ static int alloc_frozen_cpus(void)
476 return 0; 477 return 0;
477} 478}
478core_initcall(alloc_frozen_cpus); 479core_initcall(alloc_frozen_cpus);
480
481/*
482 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
483 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
484 * to continue until any currently running CPU hotplug operation gets
485 * completed.
486 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
487 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
488 * CPU hotplug path and released only after it is complete. Thus, we
489 * (and hence the freezer) will block here until any currently running CPU
490 * hotplug operation gets completed.
491 */
492void cpu_hotplug_disable_before_freeze(void)
493{
494 cpu_maps_update_begin();
495 cpu_hotplug_disabled = 1;
496 cpu_maps_update_done();
497}
498
499
500/*
501 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
502 * disabled while beginning to freeze tasks).
503 */
504void cpu_hotplug_enable_after_thaw(void)
505{
506 cpu_maps_update_begin();
507 cpu_hotplug_disabled = 0;
508 cpu_maps_update_done();
509}
510
511/*
512 * When callbacks for CPU hotplug notifications are being executed, we must
513 * ensure that the state of the system with respect to the tasks being frozen
514 * or not, as reported by the notification, remains unchanged *throughout the
515 * duration* of the execution of the callbacks.
516 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
517 *
518 * This synchronization is implemented by mutually excluding regular CPU
519 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
520 * Hibernate notifications.
521 */
522static int
523cpu_hotplug_pm_callback(struct notifier_block *nb,
524 unsigned long action, void *ptr)
525{
526 switch (action) {
527
528 case PM_SUSPEND_PREPARE:
529 case PM_HIBERNATION_PREPARE:
530 cpu_hotplug_disable_before_freeze();
531 break;
532
533 case PM_POST_SUSPEND:
534 case PM_POST_HIBERNATION:
535 cpu_hotplug_enable_after_thaw();
536 break;
537
538 default:
539 return NOTIFY_DONE;
540 }
541
542 return NOTIFY_OK;
543}
544
545
546int cpu_hotplug_pm_sync_init(void)
547{
548 pm_notifier(cpu_hotplug_pm_callback, 0);
549 return 0;
550}
551core_initcall(cpu_hotplug_pm_sync_init);
552
479#endif /* CONFIG_PM_SLEEP_SMP */ 553#endif /* CONFIG_PM_SLEEP_SMP */
480 554
481/** 555/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 12a0287e0358..e1253faa34dd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -29,7 +29,6 @@
29#include <linux/hardirq.h> 29#include <linux/hardirq.h>
30#include <linux/rculist.h> 30#include <linux/rculist.h>
31#include <linux/uaccess.h> 31#include <linux/uaccess.h>
32#include <linux/suspend.h>
33#include <linux/syscalls.h> 32#include <linux/syscalls.h>
34#include <linux/anon_inodes.h> 33#include <linux/anon_inodes.h>
35#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
@@ -6853,7 +6852,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
6853 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 6852 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6854 6853
6855 mutex_lock(&swhash->hlist_mutex); 6854 mutex_lock(&swhash->hlist_mutex);
6856 if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) { 6855 if (swhash->hlist_refcount > 0) {
6857 struct swevent_hlist *hlist; 6856 struct swevent_hlist *hlist;
6858 6857
6859 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); 6858 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -6942,14 +6941,7 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6942{ 6941{
6943 unsigned int cpu = (long)hcpu; 6942 unsigned int cpu = (long)hcpu;
6944 6943
6945 /* 6944 switch (action & ~CPU_TASKS_FROZEN) {
6946 * Ignore suspend/resume action, the perf_pm_notifier will
6947 * take care of that.
6948 */
6949 if (action & CPU_TASKS_FROZEN)
6950 return NOTIFY_OK;
6951
6952 switch (action) {
6953 6945
6954 case CPU_UP_PREPARE: 6946 case CPU_UP_PREPARE:
6955 case CPU_DOWN_FAILED: 6947 case CPU_DOWN_FAILED:
@@ -6968,90 +6960,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6968 return NOTIFY_OK; 6960 return NOTIFY_OK;
6969} 6961}
6970 6962
6971static void perf_pm_resume_cpu(void *unused)
6972{
6973 struct perf_cpu_context *cpuctx;
6974 struct perf_event_context *ctx;
6975 struct pmu *pmu;
6976 int idx;
6977
6978 idx = srcu_read_lock(&pmus_srcu);
6979 list_for_each_entry_rcu(pmu, &pmus, entry) {
6980 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6981 ctx = cpuctx->task_ctx;
6982
6983 perf_ctx_lock(cpuctx, ctx);
6984 perf_pmu_disable(cpuctx->ctx.pmu);
6985
6986 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
6987 if (ctx)
6988 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
6989
6990 perf_pmu_enable(cpuctx->ctx.pmu);
6991 perf_ctx_unlock(cpuctx, ctx);
6992 }
6993 srcu_read_unlock(&pmus_srcu, idx);
6994}
6995
6996static void perf_pm_suspend_cpu(void *unused)
6997{
6998 struct perf_cpu_context *cpuctx;
6999 struct perf_event_context *ctx;
7000 struct pmu *pmu;
7001 int idx;
7002
7003 idx = srcu_read_lock(&pmus_srcu);
7004 list_for_each_entry_rcu(pmu, &pmus, entry) {
7005 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7006 ctx = cpuctx->task_ctx;
7007
7008 perf_ctx_lock(cpuctx, ctx);
7009 perf_pmu_disable(cpuctx->ctx.pmu);
7010
7011 perf_event_sched_in(cpuctx, ctx, current);
7012
7013 perf_pmu_enable(cpuctx->ctx.pmu);
7014 perf_ctx_unlock(cpuctx, ctx);
7015 }
7016 srcu_read_unlock(&pmus_srcu, idx);
7017}
7018
7019static int perf_resume(void)
7020{
7021 get_online_cpus();
7022 smp_call_function(perf_pm_resume_cpu, NULL, 1);
7023 put_online_cpus();
7024
7025 return NOTIFY_OK;
7026}
7027
7028static int perf_suspend(void)
7029{
7030 get_online_cpus();
7031 smp_call_function(perf_pm_suspend_cpu, NULL, 1);
7032 put_online_cpus();
7033
7034 return NOTIFY_OK;
7035}
7036
7037static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
7038{
7039 switch (action) {
7040 case PM_POST_HIBERNATION:
7041 case PM_POST_SUSPEND:
7042 return perf_resume();
7043 case PM_HIBERNATION_PREPARE:
7044 case PM_SUSPEND_PREPARE:
7045 return perf_suspend();
7046 default:
7047 return NOTIFY_DONE;
7048 }
7049}
7050
7051static struct notifier_block perf_pm_notifier = {
7052 .notifier_call = perf_pm,
7053};
7054
7055void __init perf_event_init(void) 6963void __init perf_event_init(void)
7056{ 6964{
7057 int ret; 6965 int ret;
@@ -7066,7 +6974,6 @@ void __init perf_event_init(void)
7066 perf_tp_register(); 6974 perf_tp_register();
7067 perf_cpu_notifier(perf_cpu_notify); 6975 perf_cpu_notifier(perf_cpu_notify);
7068 register_reboot_notifier(&perf_reboot_notifier); 6976 register_reboot_notifier(&perf_reboot_notifier);
7069 register_pm_notifier(&perf_pm_notifier);
7070 6977
7071 ret = init_hw_breakpoint(); 6978 ret = init_hw_breakpoint();
7072 WARN(ret, "hw_breakpoint initialization failed with: %d", ret); 6979 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 66a594e8ad2f..7b01de98bb6a 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -67,7 +67,7 @@ static void fake_signal_wake_up(struct task_struct *p)
67 unsigned long flags; 67 unsigned long flags;
68 68
69 spin_lock_irqsave(&p->sighand->siglock, flags); 69 spin_lock_irqsave(&p->sighand->siglock, flags);
70 signal_wake_up(p, 1); 70 signal_wake_up(p, 0);
71 spin_unlock_irqrestore(&p->sighand->siglock, flags); 71 spin_unlock_irqrestore(&p->sighand->siglock, flags);
72} 72}
73 73
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 1c1797dd1d1d..5167d996cd02 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -386,8 +386,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
386 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); 386 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
387 filp->private_data = req; 387 filp->private_data = req;
388 388
389 if (filp->private_data) 389 return 0;
390 return 0;
391 } 390 }
392 return -EPERM; 391 return -EPERM;
393} 392}
diff --git a/kernel/sched.c b/kernel/sched.c
index d87c6e5d4e8c..0e9344a71be3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7087,8 +7087,6 @@ static int __init isolated_cpu_setup(char *str)
7087 7087
7088__setup("isolcpus=", isolated_cpu_setup); 7088__setup("isolcpus=", isolated_cpu_setup);
7089 7089
7090#define SD_NODES_PER_DOMAIN 16
7091
7092#ifdef CONFIG_NUMA 7090#ifdef CONFIG_NUMA
7093 7091
7094/** 7092/**