aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c55
1 files changed, 23 insertions, 32 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b5e4ab2d427e..198a38883e64 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -133,6 +133,27 @@ static void cpu_hotplug_done(void)
133 mutex_unlock(&cpu_hotplug.lock); 133 mutex_unlock(&cpu_hotplug.lock);
134} 134}
135 135
136/*
137 * Wait for currently running CPU hotplug operations to complete (if any) and
138 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
139 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
140 * hotplug path before performing hotplug operations. So acquiring that lock
141 * guarantees mutual exclusion from any currently running hotplug operations.
142 */
143void cpu_hotplug_disable(void)
144{
145 cpu_maps_update_begin();
146 cpu_hotplug_disabled = 1;
147 cpu_maps_update_done();
148}
149
150void cpu_hotplug_enable(void)
151{
152 cpu_maps_update_begin();
153 cpu_hotplug_disabled = 0;
154 cpu_maps_update_done();
155}
156
136#else /* #if CONFIG_HOTPLUG_CPU */ 157#else /* #if CONFIG_HOTPLUG_CPU */
137static void cpu_hotplug_begin(void) {} 158static void cpu_hotplug_begin(void) {}
138static void cpu_hotplug_done(void) {} 159static void cpu_hotplug_done(void) {}
@@ -541,36 +562,6 @@ static int __init alloc_frozen_cpus(void)
541core_initcall(alloc_frozen_cpus); 562core_initcall(alloc_frozen_cpus);
542 563
543/* 564/*
544 * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
545 * hotplug when tasks are about to be frozen. Also, don't allow the freezer
546 * to continue until any currently running CPU hotplug operation gets
547 * completed.
548 * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
549 * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
550 * CPU hotplug path and released only after it is complete. Thus, we
551 * (and hence the freezer) will block here until any currently running CPU
552 * hotplug operation gets completed.
553 */
554void cpu_hotplug_disable_before_freeze(void)
555{
556 cpu_maps_update_begin();
557 cpu_hotplug_disabled = 1;
558 cpu_maps_update_done();
559}
560
561
562/*
563 * When tasks have been thawed, re-enable regular CPU hotplug (which had been
564 * disabled while beginning to freeze tasks).
565 */
566void cpu_hotplug_enable_after_thaw(void)
567{
568 cpu_maps_update_begin();
569 cpu_hotplug_disabled = 0;
570 cpu_maps_update_done();
571}
572
573/*
574 * When callbacks for CPU hotplug notifications are being executed, we must 565 * When callbacks for CPU hotplug notifications are being executed, we must
575 * ensure that the state of the system with respect to the tasks being frozen 566 * ensure that the state of the system with respect to the tasks being frozen
576 * or not, as reported by the notification, remains unchanged *throughout the 567 * or not, as reported by the notification, remains unchanged *throughout the
@@ -589,12 +580,12 @@ cpu_hotplug_pm_callback(struct notifier_block *nb,
589 580
590 case PM_SUSPEND_PREPARE: 581 case PM_SUSPEND_PREPARE:
591 case PM_HIBERNATION_PREPARE: 582 case PM_HIBERNATION_PREPARE:
592 cpu_hotplug_disable_before_freeze(); 583 cpu_hotplug_disable();
593 break; 584 break;
594 585
595 case PM_POST_SUSPEND: 586 case PM_POST_SUSPEND:
596 case PM_POST_HIBERNATION: 587 case PM_POST_HIBERNATION:
597 cpu_hotplug_enable_after_thaw(); 588 cpu_hotplug_enable();
598 break; 589 break;
599 590
600 default: 591 default: