aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Kconfig12
-rw-r--r--kernel/power/Makefile1
-rw-r--r--kernel/power/console.c27
-rw-r--r--kernel/power/disk.c4
-rw-r--r--kernel/power/pm.c205
-rw-r--r--kernel/power/process.c29
-rw-r--r--kernel/power/snapshot.c41
7 files changed, 75 insertions, 244 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 79833170bb9c..b45da40e8d25 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -19,16 +19,6 @@ config PM
19 will issue the hlt instruction if nothing is to be done, thereby 19 will issue the hlt instruction if nothing is to be done, thereby
20 sending the processor to sleep and saving power. 20 sending the processor to sleep and saving power.
21 21
22config PM_LEGACY
23 bool "Legacy Power Management API (DEPRECATED)"
24 depends on PM
25 default n
26 ---help---
27 Support for pm_register() and friends. This old API is obsoleted
28 by the driver model.
29
30 If unsure, say N.
31
32config PM_DEBUG 22config PM_DEBUG
33 bool "Power Management Debug Support" 23 bool "Power Management Debug Support"
34 depends on PM 24 depends on PM
@@ -190,7 +180,7 @@ config APM_EMULATION
190 notification of APM "events" (e.g. battery status change). 180 notification of APM "events" (e.g. battery status change).
191 181
192 In order to use APM, you will need supporting software. For location 182 In order to use APM, you will need supporting software. For location
193 and more information, read <file:Documentation/pm.txt> and the 183 and more information, read <file:Documentation/power/pm.txt> and the
194 Battery Powered Linux mini-HOWTO, available from 184 Battery Powered Linux mini-HOWTO, available from
195 <http://www.tldp.org/docs.html#howto>. 185 <http://www.tldp.org/docs.html#howto>.
196 186
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index f7dfff28ecdb..597823b5b700 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -4,7 +4,6 @@ EXTRA_CFLAGS += -DDEBUG
4endif 4endif
5 5
6obj-y := main.o 6obj-y := main.o
7obj-$(CONFIG_PM_LEGACY) += pm.o
8obj-$(CONFIG_PM_SLEEP) += process.o console.o 7obj-$(CONFIG_PM_SLEEP) += process.o console.o
9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o 8obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
10 9
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 89bcf4973ee5..b8628be2a465 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -7,17 +7,39 @@
7#include <linux/vt_kern.h> 7#include <linux/vt_kern.h>
8#include <linux/kbd_kern.h> 8#include <linux/kbd_kern.h>
9#include <linux/console.h> 9#include <linux/console.h>
10#include <linux/module.h>
10#include "power.h" 11#include "power.h"
11 12
12#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE) 13#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
13#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) 14#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
14 15
15static int orig_fgconsole, orig_kmsg; 16static int orig_fgconsole, orig_kmsg;
17static int disable_vt_switch;
18
19/*
20 * Normally during a suspend, we allocate a new console and switch to it.
21 * When we resume, we switch back to the original console. This switch
22 * can be slow, so on systems where the framebuffer can handle restoration
23 * of video registers anyways, there's little point in doing the console
24 * switch. This function allows you to disable it by passing it '0'.
25 */
26void pm_set_vt_switch(int do_switch)
27{
28 acquire_console_sem();
29 disable_vt_switch = !do_switch;
30 release_console_sem();
31}
32EXPORT_SYMBOL(pm_set_vt_switch);
16 33
17int pm_prepare_console(void) 34int pm_prepare_console(void)
18{ 35{
19 acquire_console_sem(); 36 acquire_console_sem();
20 37
38 if (disable_vt_switch) {
39 release_console_sem();
40 return 0;
41 }
42
21 orig_fgconsole = fg_console; 43 orig_fgconsole = fg_console;
22 44
23 if (vc_allocate(SUSPEND_CONSOLE)) { 45 if (vc_allocate(SUSPEND_CONSOLE)) {
@@ -50,9 +72,12 @@ int pm_prepare_console(void)
50void pm_restore_console(void) 72void pm_restore_console(void)
51{ 73{
52 acquire_console_sem(); 74 acquire_console_sem();
75 if (disable_vt_switch) {
76 release_console_sem();
77 return;
78 }
53 set_console(orig_fgconsole); 79 set_console(orig_fgconsole);
54 release_console_sem(); 80 release_console_sem();
55 kmsg_redirect = orig_kmsg; 81 kmsg_redirect = orig_kmsg;
56 return;
57} 82}
58#endif 83#endif
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 859a8e59773a..14a656cdc652 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -391,7 +391,7 @@ int hibernation_platform_enter(void)
391 goto Close; 391 goto Close;
392 392
393 suspend_console(); 393 suspend_console();
394 error = device_suspend(PMSG_SUSPEND); 394 error = device_suspend(PMSG_HIBERNATE);
395 if (error) 395 if (error)
396 goto Resume_console; 396 goto Resume_console;
397 397
@@ -404,7 +404,7 @@ int hibernation_platform_enter(void)
404 goto Finish; 404 goto Finish;
405 405
406 local_irq_disable(); 406 local_irq_disable();
407 error = device_power_down(PMSG_SUSPEND); 407 error = device_power_down(PMSG_HIBERNATE);
408 if (!error) { 408 if (!error) {
409 hibernation_ops->enter(); 409 hibernation_ops->enter();
410 /* We should never get here */ 410 /* We should never get here */
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
deleted file mode 100644
index 60c73fa670d5..000000000000
--- a/kernel/power/pm.c
+++ /dev/null
@@ -1,205 +0,0 @@
1/*
2 * pm.c - Power management interface
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/mm.h>
24#include <linux/slab.h>
25#include <linux/pm.h>
26#include <linux/pm_legacy.h>
27#include <linux/interrupt.h>
28#include <linux/mutex.h>
29
30/*
31 * Locking notes:
32 * pm_devs_lock can be a semaphore providing pm ops are not called
33 * from an interrupt handler (already a bad idea so no change here). Each
34 * change must be protected so that an unlink of an entry doesn't clash
35 * with a pm send - which is permitted to sleep in the current architecture
36 *
37 * Module unloads clashing with pm events now work out safely, the module
38 * unload path will block until the event has been sent. It may well block
39 * until a resume but that will be fine.
40 */
41
42static DEFINE_MUTEX(pm_devs_lock);
43static LIST_HEAD(pm_devs);
44
45/**
46 * pm_register - register a device with power management
47 * @type: device type
48 * @id: device ID
49 * @callback: callback function
50 *
51 * Add a device to the list of devices that wish to be notified about
52 * power management events. A &pm_dev structure is returned on success,
53 * on failure the return is %NULL.
54 *
55 * The callback function will be called in process context and
56 * it may sleep.
57 */
58
59struct pm_dev *pm_register(pm_dev_t type,
60 unsigned long id,
61 pm_callback callback)
62{
63 struct pm_dev *dev = kzalloc(sizeof(struct pm_dev), GFP_KERNEL);
64 if (dev) {
65 dev->type = type;
66 dev->id = id;
67 dev->callback = callback;
68
69 mutex_lock(&pm_devs_lock);
70 list_add(&dev->entry, &pm_devs);
71 mutex_unlock(&pm_devs_lock);
72 }
73 return dev;
74}
75
76/**
77 * pm_send - send request to a single device
78 * @dev: device to send to
79 * @rqst: power management request
80 * @data: data for the callback
81 *
82 * Issue a power management request to a given device. The
83 * %PM_SUSPEND and %PM_RESUME events are handled specially. The
84 * data field must hold the intended next state. No call is made
85 * if the state matches.
86 *
87 * BUGS: what stops two power management requests occurring in parallel
88 * and conflicting.
89 *
90 * WARNING: Calling pm_send directly is not generally recommended, in
91 * particular there is no locking against the pm_dev going away. The
92 * caller must maintain all needed locking or have 'inside knowledge'
93 * on the safety. Also remember that this function is not locked against
94 * pm_unregister. This means that you must handle SMP races on callback
95 * execution and unload yourself.
96 */
97
98static int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
99{
100 int status = 0;
101 unsigned long prev_state, next_state;
102
103 if (in_interrupt())
104 BUG();
105
106 switch (rqst) {
107 case PM_SUSPEND:
108 case PM_RESUME:
109 prev_state = dev->state;
110 next_state = (unsigned long) data;
111 if (prev_state != next_state) {
112 if (dev->callback)
113 status = (*dev->callback)(dev, rqst, data);
114 if (!status) {
115 dev->state = next_state;
116 dev->prev_state = prev_state;
117 }
118 }
119 else {
120 dev->prev_state = prev_state;
121 }
122 break;
123 default:
124 if (dev->callback)
125 status = (*dev->callback)(dev, rqst, data);
126 break;
127 }
128 return status;
129}
130
131/*
132 * Undo incomplete request
133 */
134static void pm_undo_all(struct pm_dev *last)
135{
136 struct list_head *entry = last->entry.prev;
137 while (entry != &pm_devs) {
138 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
139 if (dev->state != dev->prev_state) {
140 /* previous state was zero (running) resume or
141 * previous state was non-zero (suspended) suspend
142 */
143 pm_request_t undo = (dev->prev_state
144 ? PM_SUSPEND:PM_RESUME);
145 pm_send(dev, undo, (void*) dev->prev_state);
146 }
147 entry = entry->prev;
148 }
149}
150
151/**
152 * pm_send_all - send request to all managed devices
153 * @rqst: power management request
154 * @data: data for the callback
155 *
156 * Issue a power management request to a all devices. The
157 * %PM_SUSPEND events are handled specially. Any device is
158 * permitted to fail a suspend by returning a non zero (error)
159 * value from its callback function. If any device vetoes a
160 * suspend request then all other devices that have suspended
161 * during the processing of this request are restored to their
162 * previous state.
163 *
164 * WARNING: This function takes the pm_devs_lock. The lock is not dropped until
165 * the callbacks have completed. This prevents races against pm locking
166 * functions, races against module unload pm_unregister code. It does
167 * mean however that you must not issue pm_ functions within the callback
168 * or you will deadlock and users will hate you.
169 *
170 * Zero is returned on success. If a suspend fails then the status
171 * from the device that vetoes the suspend is returned.
172 *
173 * BUGS: what stops two power management requests occurring in parallel
174 * and conflicting.
175 */
176
177int pm_send_all(pm_request_t rqst, void *data)
178{
179 struct list_head *entry;
180
181 mutex_lock(&pm_devs_lock);
182 entry = pm_devs.next;
183 while (entry != &pm_devs) {
184 struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
185 if (dev->callback) {
186 int status = pm_send(dev, rqst, data);
187 if (status) {
188 /* return devices to previous state on
189 * failed suspend request
190 */
191 if (rqst == PM_SUSPEND)
192 pm_undo_all(dev);
193 mutex_unlock(&pm_devs_lock);
194 return status;
195 }
196 }
197 entry = entry->next;
198 }
199 mutex_unlock(&pm_devs_lock);
200 return 0;
201}
202
203EXPORT_SYMBOL(pm_register);
204EXPORT_SYMBOL(pm_send_all);
205
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 7c2118f9597f..f1d0b345c9ba 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -75,22 +75,15 @@ void refrigerator(void)
75 __set_current_state(save); 75 __set_current_state(save);
76} 76}
77 77
78static void fake_signal_wake_up(struct task_struct *p, int resume) 78static void fake_signal_wake_up(struct task_struct *p)
79{ 79{
80 unsigned long flags; 80 unsigned long flags;
81 81
82 spin_lock_irqsave(&p->sighand->siglock, flags); 82 spin_lock_irqsave(&p->sighand->siglock, flags);
83 signal_wake_up(p, resume); 83 signal_wake_up(p, 0);
84 spin_unlock_irqrestore(&p->sighand->siglock, flags); 84 spin_unlock_irqrestore(&p->sighand->siglock, flags);
85} 85}
86 86
87static void send_fake_signal(struct task_struct *p)
88{
89 if (task_is_stopped(p))
90 force_sig_specific(SIGSTOP, p);
91 fake_signal_wake_up(p, task_is_stopped(p));
92}
93
94static int has_mm(struct task_struct *p) 87static int has_mm(struct task_struct *p)
95{ 88{
96 return (p->mm && !(p->flags & PF_BORROWED_MM)); 89 return (p->mm && !(p->flags & PF_BORROWED_MM));
@@ -121,7 +114,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only)
121 if (freezing(p)) { 114 if (freezing(p)) {
122 if (has_mm(p)) { 115 if (has_mm(p)) {
123 if (!signal_pending(p)) 116 if (!signal_pending(p))
124 fake_signal_wake_up(p, 0); 117 fake_signal_wake_up(p);
125 } else { 118 } else {
126 if (with_mm_only) 119 if (with_mm_only)
127 ret = 0; 120 ret = 0;
@@ -135,7 +128,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only)
135 } else { 128 } else {
136 if (has_mm(p)) { 129 if (has_mm(p)) {
137 set_freeze_flag(p); 130 set_freeze_flag(p);
138 send_fake_signal(p); 131 fake_signal_wake_up(p);
139 } else { 132 } else {
140 if (with_mm_only) { 133 if (with_mm_only) {
141 ret = 0; 134 ret = 0;
@@ -182,15 +175,17 @@ static int try_to_freeze_tasks(int freeze_user_space)
182 if (frozen(p) || !freezeable(p)) 175 if (frozen(p) || !freezeable(p))
183 continue; 176 continue;
184 177
185 if (task_is_traced(p) && frozen(p->parent)) {
186 cancel_freezing(p);
187 continue;
188 }
189
190 if (!freeze_task(p, freeze_user_space)) 178 if (!freeze_task(p, freeze_user_space))
191 continue; 179 continue;
192 180
193 if (!freezer_should_skip(p)) 181 /*
182 * Now that we've done set_freeze_flag, don't
183 * perturb a task in TASK_STOPPED or TASK_TRACED.
184 * It is "frozen enough". If the task does wake
185 * up, it will immediately call try_to_freeze.
186 */
187 if (!task_is_stopped_or_traced(p) &&
188 !freezer_should_skip(p))
194 todo++; 189 todo++;
195 } while_each_thread(g, p); 190 } while_each_thread(g, p);
196 read_unlock(&tasklist_lock); 191 read_unlock(&tasklist_lock);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 72a020cabb4c..5f91a07c4eac 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -447,7 +447,7 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
447 * of @bm->cur_zone_bm are updated. 447 * of @bm->cur_zone_bm are updated.
448 */ 448 */
449 449
450static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 450static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
451 void **addr, unsigned int *bit_nr) 451 void **addr, unsigned int *bit_nr)
452{ 452{
453 struct zone_bitmap *zone_bm; 453 struct zone_bitmap *zone_bm;
@@ -461,7 +461,8 @@ static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
461 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) { 461 while (pfn < zone_bm->start_pfn || pfn >= zone_bm->end_pfn) {
462 zone_bm = zone_bm->next; 462 zone_bm = zone_bm->next;
463 463
464 BUG_ON(!zone_bm); 464 if (!zone_bm)
465 return -EFAULT;
465 } 466 }
466 bm->cur.zone_bm = zone_bm; 467 bm->cur.zone_bm = zone_bm;
467 } 468 }
@@ -479,23 +480,40 @@ static void memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
479 pfn -= bb->start_pfn; 480 pfn -= bb->start_pfn;
480 *bit_nr = pfn % BM_BITS_PER_CHUNK; 481 *bit_nr = pfn % BM_BITS_PER_CHUNK;
481 *addr = bb->data + pfn / BM_BITS_PER_CHUNK; 482 *addr = bb->data + pfn / BM_BITS_PER_CHUNK;
483 return 0;
482} 484}
483 485
484static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) 486static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
485{ 487{
486 void *addr; 488 void *addr;
487 unsigned int bit; 489 unsigned int bit;
490 int error;
488 491
489 memory_bm_find_bit(bm, pfn, &addr, &bit); 492 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
493 BUG_ON(error);
490 set_bit(bit, addr); 494 set_bit(bit, addr);
491} 495}
492 496
497static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
498{
499 void *addr;
500 unsigned int bit;
501 int error;
502
503 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
504 if (!error)
505 set_bit(bit, addr);
506 return error;
507}
508
493static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn) 509static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
494{ 510{
495 void *addr; 511 void *addr;
496 unsigned int bit; 512 unsigned int bit;
513 int error;
497 514
498 memory_bm_find_bit(bm, pfn, &addr, &bit); 515 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
516 BUG_ON(error);
499 clear_bit(bit, addr); 517 clear_bit(bit, addr);
500} 518}
501 519
@@ -503,8 +521,10 @@ static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
503{ 521{
504 void *addr; 522 void *addr;
505 unsigned int bit; 523 unsigned int bit;
524 int error;
506 525
507 memory_bm_find_bit(bm, pfn, &addr, &bit); 526 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
527 BUG_ON(error);
508 return test_bit(bit, addr); 528 return test_bit(bit, addr);
509} 529}
510 530
@@ -709,8 +729,15 @@ static void mark_nosave_pages(struct memory_bitmap *bm)
709 region->end_pfn << PAGE_SHIFT); 729 region->end_pfn << PAGE_SHIFT);
710 730
711 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++) 731 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
712 if (pfn_valid(pfn)) 732 if (pfn_valid(pfn)) {
713 memory_bm_set_bit(bm, pfn); 733 /*
734 * It is safe to ignore the result of
735 * mem_bm_set_bit_check() here, since we won't
736 * touch the PFNs for which the error is
737 * returned anyway.
738 */
739 mem_bm_set_bit_check(bm, pfn);
740 }
714 } 741 }
715} 742}
716 743