aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/power/devices.txt34
-rw-r--r--arch/alpha/include/asm/suspend.h6
-rw-r--r--arch/arm/include/asm/suspend.h4
-rw-r--r--arch/ia64/include/asm/suspend.h1
-rw-r--r--arch/m68k/include/asm/suspend.h6
-rw-r--r--arch/mips/include/asm/suspend.h6
-rw-r--r--arch/s390/include/asm/suspend.h5
-rw-r--r--arch/um/include/asm/suspend.h4
-rw-r--r--arch/x86/kernel/acpi/sleep.c2
-rw-r--r--arch/x86/kernel/apm_32.c14
-rw-r--r--arch/x86/power/Makefile2
-rw-r--r--arch/x86/power/cpu.c (renamed from arch/x86/power/cpu_64.c)165
-rw-r--r--arch/x86/power/cpu_32.c148
-rw-r--r--drivers/base/platform.c36
-rw-r--r--drivers/base/power/main.c94
-rw-r--r--drivers/base/sys.c16
-rw-r--r--drivers/xen/manage.c16
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/pm.h11
-rw-r--r--include/linux/suspend.h18
-rw-r--r--kernel/kexec.c14
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/Makefile5
-rw-r--r--kernel/power/hibernate.c (renamed from kernel/power/disk.c)34
-rw-r--r--kernel/power/hibernate_nvs.c135
-rw-r--r--kernel/power/main.c521
-rw-r--r--kernel/power/power.h25
-rw-r--r--kernel/power/snapshot.c80
-rw-r--r--kernel/power/suspend.c300
-rw-r--r--kernel/power/suspend_test.c187
-rw-r--r--kernel/power/swsusp.c198
-rw-r--r--mm/vmscan.c4
33 files changed, 984 insertions, 1122 deletions
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 421e7d00ffd0..c9abbd86bc18 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -75,9 +75,6 @@ may need to apply in domain-specific ways to their devices:
75struct bus_type { 75struct bus_type {
76 ... 76 ...
77 int (*suspend)(struct device *dev, pm_message_t state); 77 int (*suspend)(struct device *dev, pm_message_t state);
78 int (*suspend_late)(struct device *dev, pm_message_t state);
79
80 int (*resume_early)(struct device *dev);
81 int (*resume)(struct device *dev); 78 int (*resume)(struct device *dev);
82}; 79};
83 80
@@ -226,20 +223,7 @@ The phases are seen by driver notifications issued in this order:
226 223
227 This call should handle parts of device suspend logic that require 224 This call should handle parts of device suspend logic that require
228 sleeping. It probably does work to quiesce the device which hasn't 225 sleeping. It probably does work to quiesce the device which hasn't
229 been abstracted into class.suspend() or bus.suspend_late(). 226 been abstracted into class.suspend().
230
231 3 bus.suspend_late(dev, message) is called with IRQs disabled, and
232 with only one CPU active. Until the bus.resume_early() phase
233 completes (see later), IRQs are not enabled again. This method
234 won't be exposed by all busses; for message based busses like USB,
235 I2C, or SPI, device interactions normally require IRQs. This bus
236 call may be morphed into a driver call with bus-specific parameters.
237
238 This call might save low level hardware state that might otherwise
239 be lost in the upcoming low power state, and actually put the
240 device into a low power state ... so that in some cases the device
241 may stay partly usable until this late. This "late" call may also
242 help when coping with hardware that behaves badly.
243 227
244The pm_message_t parameter is currently used to refine those semantics 228The pm_message_t parameter is currently used to refine those semantics
245(described later). 229(described later).
@@ -351,19 +335,11 @@ devices processing each phase's calls before the next phase begins.
351 335
352The phases are seen by driver notifications issued in this order: 336The phases are seen by driver notifications issued in this order:
353 337
354 1 bus.resume_early(dev) is called with IRQs disabled, and with 338 1 bus.resume(dev) reverses the effects of bus.suspend(). This may
355 only one CPU active. As with bus.suspend_late(), this method 339 be morphed into a device driver call with bus-specific parameters;
356 won't be supported on busses that require IRQs in order to 340 implementations may sleep.
357 interact with devices.
358
359 This reverses the effects of bus.suspend_late().
360
361 2 bus.resume(dev) is called next. This may be morphed into a device
362 driver call with bus-specific parameters; implementations may sleep.
363
364 This reverses the effects of bus.suspend().
365 341
366 3 class.resume(dev) is called for devices associated with a class 342 2 class.resume(dev) is called for devices associated with a class
367 that has such a method. Implementations may sleep. 343 that has such a method. Implementations may sleep.
368 344
369 This reverses the effects of class.suspend(), and would usually 345 This reverses the effects of class.suspend(), and would usually
diff --git a/arch/alpha/include/asm/suspend.h b/arch/alpha/include/asm/suspend.h
deleted file mode 100644
index c7042d575851..000000000000
--- a/arch/alpha/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ALPHA_SUSPEND_H
2#define __ALPHA_SUSPEND_H
3
4/* Dummy include. */
5
6#endif /* __ALPHA_SUSPEND_H */
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
deleted file mode 100644
index cf0d0bdee74d..000000000000
--- a/arch/arm/include/asm/suspend.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef _ASMARM_SUSPEND_H
2#define _ASMARM_SUSPEND_H
3
4#endif
diff --git a/arch/ia64/include/asm/suspend.h b/arch/ia64/include/asm/suspend.h
deleted file mode 100644
index b05bbb6074e2..000000000000
--- a/arch/ia64/include/asm/suspend.h
+++ /dev/null
@@ -1 +0,0 @@
1/* dummy (must be non-empty to prevent prejudicial removal...) */
diff --git a/arch/m68k/include/asm/suspend.h b/arch/m68k/include/asm/suspend.h
deleted file mode 100644
index 57b3ddb4d269..000000000000
--- a/arch/m68k/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _M68K_SUSPEND_H
2#define _M68K_SUSPEND_H
3
4/* Dummy include. */
5
6#endif /* _M68K_SUSPEND_H */
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h
deleted file mode 100644
index 2562f8f9be0e..000000000000
--- a/arch/mips/include/asm/suspend.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SUSPEND_H
2#define __ASM_SUSPEND_H
3
4/* Somewhen... Maybe :-) */
5
6#endif /* __ASM_SUSPEND_H */
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
deleted file mode 100644
index 1f34580e67a7..000000000000
--- a/arch/s390/include/asm/suspend.h
+++ /dev/null
@@ -1,5 +0,0 @@
1#ifndef __ASM_S390_SUSPEND_H
2#define __ASM_S390_SUSPEND_H
3
4#endif
5
diff --git a/arch/um/include/asm/suspend.h b/arch/um/include/asm/suspend.h
deleted file mode 100644
index f4e8e007f468..000000000000
--- a/arch/um/include/asm/suspend.h
+++ /dev/null
@@ -1,4 +0,0 @@
1#ifndef __UM_SUSPEND_H
2#define __UM_SUSPEND_H
3
4#endif
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 7c243a2c5115..ca93638ba430 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -104,7 +104,7 @@ int acpi_save_state_mem(void)
104 initial_gs = per_cpu_offset(smp_processor_id()); 104 initial_gs = per_cpu_offset(smp_processor_id());
105#endif 105#endif
106 initial_code = (unsigned long)wakeup_long64; 106 initial_code = (unsigned long)wakeup_long64;
107 saved_magic = 0x123456789abcdef0; 107 saved_magic = 0x123456789abcdef0L;
108#endif /* CONFIG_64BIT */ 108#endif /* CONFIG_64BIT */
109 109
110 return 0; 110 return 0;
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 49e0939bac42..79302e9a33a4 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1233,9 +1233,9 @@ static int suspend(int vetoable)
1233 int err; 1233 int err;
1234 struct apm_user *as; 1234 struct apm_user *as;
1235 1235
1236 device_suspend(PMSG_SUSPEND); 1236 dpm_suspend_start(PMSG_SUSPEND);
1237 1237
1238 device_power_down(PMSG_SUSPEND); 1238 dpm_suspend_noirq(PMSG_SUSPEND);
1239 1239
1240 local_irq_disable(); 1240 local_irq_disable();
1241 sysdev_suspend(PMSG_SUSPEND); 1241 sysdev_suspend(PMSG_SUSPEND);
@@ -1259,9 +1259,9 @@ static int suspend(int vetoable)
1259 sysdev_resume(); 1259 sysdev_resume();
1260 local_irq_enable(); 1260 local_irq_enable();
1261 1261
1262 device_power_up(PMSG_RESUME); 1262 dpm_resume_noirq(PMSG_RESUME);
1263 1263
1264 device_resume(PMSG_RESUME); 1264 dpm_resume_end(PMSG_RESUME);
1265 queue_event(APM_NORMAL_RESUME, NULL); 1265 queue_event(APM_NORMAL_RESUME, NULL);
1266 spin_lock(&user_list_lock); 1266 spin_lock(&user_list_lock);
1267 for (as = user_list; as != NULL; as = as->next) { 1267 for (as = user_list; as != NULL; as = as->next) {
@@ -1277,7 +1277,7 @@ static void standby(void)
1277{ 1277{
1278 int err; 1278 int err;
1279 1279
1280 device_power_down(PMSG_SUSPEND); 1280 dpm_suspend_noirq(PMSG_SUSPEND);
1281 1281
1282 local_irq_disable(); 1282 local_irq_disable();
1283 sysdev_suspend(PMSG_SUSPEND); 1283 sysdev_suspend(PMSG_SUSPEND);
@@ -1291,7 +1291,7 @@ static void standby(void)
1291 sysdev_resume(); 1291 sysdev_resume();
1292 local_irq_enable(); 1292 local_irq_enable();
1293 1293
1294 device_power_up(PMSG_RESUME); 1294 dpm_resume_noirq(PMSG_RESUME);
1295} 1295}
1296 1296
1297static apm_event_t get_event(void) 1297static apm_event_t get_event(void)
@@ -1376,7 +1376,7 @@ static void check_events(void)
1376 ignore_bounce = 1; 1376 ignore_bounce = 1;
1377 if ((event != APM_NORMAL_RESUME) 1377 if ((event != APM_NORMAL_RESUME)
1378 || (ignore_normal_resume == 0)) { 1378 || (ignore_normal_resume == 0)) {
1379 device_resume(PMSG_RESUME); 1379 dpm_resume_end(PMSG_RESUME);
1380 queue_event(event, NULL); 1380 queue_event(event, NULL);
1381 } 1381 }
1382 ignore_normal_resume = 0; 1382 ignore_normal_resume = 0;
diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile
index 58b32db33125..de2abbd07544 100644
--- a/arch/x86/power/Makefile
+++ b/arch/x86/power/Makefile
@@ -3,5 +3,5 @@
3nostackp := $(call cc-option, -fno-stack-protector) 3nostackp := $(call cc-option, -fno-stack-protector)
4CFLAGS_cpu_$(BITS).o := $(nostackp) 4CFLAGS_cpu_$(BITS).o := $(nostackp)
5 5
6obj-$(CONFIG_PM_SLEEP) += cpu_$(BITS).o 6obj-$(CONFIG_PM_SLEEP) += cpu.o
7obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o 7obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o
diff --git a/arch/x86/power/cpu_64.c b/arch/x86/power/cpu.c
index 5343540f2607..d277ef1eea51 100644
--- a/arch/x86/power/cpu_64.c
+++ b/arch/x86/power/cpu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Suspend and hibernation support for x86-64 2 * Suspend support specific for i386/x86-64.
3 * 3 *
4 * Distribute under GPLv2 4 * Distribute under GPLv2
5 * 5 *
@@ -8,18 +8,28 @@
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org> 8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9 */ 9 */
10 10
11#include <linux/smp.h>
12#include <linux/suspend.h> 11#include <linux/suspend.h>
13#include <asm/proto.h> 12#include <linux/smp.h>
14#include <asm/page.h> 13
15#include <asm/pgtable.h> 14#include <asm/pgtable.h>
15#include <asm/proto.h>
16#include <asm/mtrr.h> 16#include <asm/mtrr.h>
17#include <asm/page.h>
18#include <asm/mce.h>
17#include <asm/xcr.h> 19#include <asm/xcr.h>
18#include <asm/suspend.h> 20#include <asm/suspend.h>
19 21
20static void fix_processor_context(void); 22#ifdef CONFIG_X86_32
23static struct saved_context saved_context;
21 24
25unsigned long saved_context_ebx;
26unsigned long saved_context_esp, saved_context_ebp;
27unsigned long saved_context_esi, saved_context_edi;
28unsigned long saved_context_eflags;
29#else
30/* CONFIG_X86_64 */
22struct saved_context saved_context; 31struct saved_context saved_context;
32#endif
23 33
24/** 34/**
25 * __save_processor_state - save CPU registers before creating a 35 * __save_processor_state - save CPU registers before creating a
@@ -38,19 +48,35 @@ struct saved_context saved_context;
38 */ 48 */
39static void __save_processor_state(struct saved_context *ctxt) 49static void __save_processor_state(struct saved_context *ctxt)
40{ 50{
51#ifdef CONFIG_X86_32
52 mtrr_save_fixed_ranges(NULL);
53#endif
41 kernel_fpu_begin(); 54 kernel_fpu_begin();
42 55
43 /* 56 /*
44 * descriptor tables 57 * descriptor tables
45 */ 58 */
59#ifdef CONFIG_X86_32
60 store_gdt(&ctxt->gdt);
61 store_idt(&ctxt->idt);
62#else
63/* CONFIG_X86_64 */
46 store_gdt((struct desc_ptr *)&ctxt->gdt_limit); 64 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
47 store_idt((struct desc_ptr *)&ctxt->idt_limit); 65 store_idt((struct desc_ptr *)&ctxt->idt_limit);
66#endif
48 store_tr(ctxt->tr); 67 store_tr(ctxt->tr);
49 68
50 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 69 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
51 /* 70 /*
52 * segment registers 71 * segment registers
53 */ 72 */
73#ifdef CONFIG_X86_32
74 savesegment(es, ctxt->es);
75 savesegment(fs, ctxt->fs);
76 savesegment(gs, ctxt->gs);
77 savesegment(ss, ctxt->ss);
78#else
79/* CONFIG_X86_64 */
54 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); 80 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
55 asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); 81 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
56 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); 82 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
@@ -62,30 +88,87 @@ static void __save_processor_state(struct saved_context *ctxt)
62 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 88 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
63 mtrr_save_fixed_ranges(NULL); 89 mtrr_save_fixed_ranges(NULL);
64 90
91 rdmsrl(MSR_EFER, ctxt->efer);
92#endif
93
65 /* 94 /*
66 * control registers 95 * control registers
67 */ 96 */
68 rdmsrl(MSR_EFER, ctxt->efer);
69 ctxt->cr0 = read_cr0(); 97 ctxt->cr0 = read_cr0();
70 ctxt->cr2 = read_cr2(); 98 ctxt->cr2 = read_cr2();
71 ctxt->cr3 = read_cr3(); 99 ctxt->cr3 = read_cr3();
100#ifdef CONFIG_X86_32
101 ctxt->cr4 = read_cr4_safe();
102#else
103/* CONFIG_X86_64 */
72 ctxt->cr4 = read_cr4(); 104 ctxt->cr4 = read_cr4();
73 ctxt->cr8 = read_cr8(); 105 ctxt->cr8 = read_cr8();
106#endif
74} 107}
75 108
109/* Needed by apm.c */
76void save_processor_state(void) 110void save_processor_state(void)
77{ 111{
78 __save_processor_state(&saved_context); 112 __save_processor_state(&saved_context);
79} 113}
114#ifdef CONFIG_X86_32
115EXPORT_SYMBOL(save_processor_state);
116#endif
80 117
81static void do_fpu_end(void) 118static void do_fpu_end(void)
82{ 119{
83 /* 120 /*
84 * Restore FPU regs if necessary 121 * Restore FPU regs if necessary.
85 */ 122 */
86 kernel_fpu_end(); 123 kernel_fpu_end();
87} 124}
88 125
126static void fix_processor_context(void)
127{
128 int cpu = smp_processor_id();
129 struct tss_struct *t = &per_cpu(init_tss, cpu);
130
131 set_tss_desc(cpu, t); /*
132 * This just modifies memory; should not be
133 * necessary. But... This is necessary, because
134 * 386 hardware has concept of busy TSS or some
135 * similar stupidity.
136 */
137
138#ifdef CONFIG_X86_64
139 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
140
141 syscall_init(); /* This sets MSR_*STAR and related */
142#endif
143 load_TR_desc(); /* This does ltr */
144 load_LDT(&current->active_mm->context); /* This does lldt */
145
146 /*
147 * Now maybe reload the debug registers
148 */
149 if (current->thread.debugreg7) {
150#ifdef CONFIG_X86_32
151 set_debugreg(current->thread.debugreg0, 0);
152 set_debugreg(current->thread.debugreg1, 1);
153 set_debugreg(current->thread.debugreg2, 2);
154 set_debugreg(current->thread.debugreg3, 3);
155 /* no 4 and 5 */
156 set_debugreg(current->thread.debugreg6, 6);
157 set_debugreg(current->thread.debugreg7, 7);
158#else
159 /* CONFIG_X86_64 */
160 loaddebug(&current->thread, 0);
161 loaddebug(&current->thread, 1);
162 loaddebug(&current->thread, 2);
163 loaddebug(&current->thread, 3);
164 /* no 4 and 5 */
165 loaddebug(&current->thread, 6);
166 loaddebug(&current->thread, 7);
167#endif
168 }
169
170}
171
89/** 172/**
90 * __restore_processor_state - restore the contents of CPU registers saved 173 * __restore_processor_state - restore the contents of CPU registers saved
91 * by __save_processor_state() 174 * by __save_processor_state()
@@ -96,9 +179,16 @@ static void __restore_processor_state(struct saved_context *ctxt)
96 /* 179 /*
97 * control registers 180 * control registers
98 */ 181 */
182 /* cr4 was introduced in the Pentium CPU */
183#ifdef CONFIG_X86_32
184 if (ctxt->cr4)
185 write_cr4(ctxt->cr4);
186#else
187/* CONFIG X86_64 */
99 wrmsrl(MSR_EFER, ctxt->efer); 188 wrmsrl(MSR_EFER, ctxt->efer);
100 write_cr8(ctxt->cr8); 189 write_cr8(ctxt->cr8);
101 write_cr4(ctxt->cr4); 190 write_cr4(ctxt->cr4);
191#endif
102 write_cr3(ctxt->cr3); 192 write_cr3(ctxt->cr3);
103 write_cr2(ctxt->cr2); 193 write_cr2(ctxt->cr2);
104 write_cr0(ctxt->cr0); 194 write_cr0(ctxt->cr0);
@@ -107,13 +197,31 @@ static void __restore_processor_state(struct saved_context *ctxt)
107 * now restore the descriptor tables to their proper values 197 * now restore the descriptor tables to their proper values
108 * ltr is done i fix_processor_context(). 198 * ltr is done i fix_processor_context().
109 */ 199 */
200#ifdef CONFIG_X86_32
201 load_gdt(&ctxt->gdt);
202 load_idt(&ctxt->idt);
203#else
204/* CONFIG_X86_64 */
110 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit); 205 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
111 load_idt((const struct desc_ptr *)&ctxt->idt_limit); 206 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
112 207#endif
113 208
114 /* 209 /*
115 * segment registers 210 * segment registers
116 */ 211 */
212#ifdef CONFIG_X86_32
213 loadsegment(es, ctxt->es);
214 loadsegment(fs, ctxt->fs);
215 loadsegment(gs, ctxt->gs);
216 loadsegment(ss, ctxt->ss);
217
218 /*
219 * sysenter MSRs
220 */
221 if (boot_cpu_has(X86_FEATURE_SEP))
222 enable_sep_cpu();
223#else
224/* CONFIG_X86_64 */
117 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); 225 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
118 asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); 226 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
119 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); 227 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
@@ -123,6 +231,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
123 wrmsrl(MSR_FS_BASE, ctxt->fs_base); 231 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
124 wrmsrl(MSR_GS_BASE, ctxt->gs_base); 232 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
125 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); 233 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
234#endif
126 235
127 /* 236 /*
128 * restore XCR0 for xsave capable cpu's. 237 * restore XCR0 for xsave capable cpu's.
@@ -134,41 +243,17 @@ static void __restore_processor_state(struct saved_context *ctxt)
134 243
135 do_fpu_end(); 244 do_fpu_end();
136 mtrr_ap_init(); 245 mtrr_ap_init();
246
247#ifdef CONFIG_X86_32
248 mcheck_init(&boot_cpu_data);
249#endif
137} 250}
138 251
252/* Needed by apm.c */
139void restore_processor_state(void) 253void restore_processor_state(void)
140{ 254{
141 __restore_processor_state(&saved_context); 255 __restore_processor_state(&saved_context);
142} 256}
143 257#ifdef CONFIG_X86_32
144static void fix_processor_context(void) 258EXPORT_SYMBOL(restore_processor_state);
145{ 259#endif
146 int cpu = smp_processor_id();
147 struct tss_struct *t = &per_cpu(init_tss, cpu);
148
149 /*
150 * This just modifies memory; should not be necessary. But... This
151 * is necessary, because 386 hardware has concept of busy TSS or some
152 * similar stupidity.
153 */
154 set_tss_desc(cpu, t);
155
156 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
157
158 syscall_init(); /* This sets MSR_*STAR and related */
159 load_TR_desc(); /* This does ltr */
160 load_LDT(&current->active_mm->context); /* This does lldt */
161
162 /*
163 * Now maybe reload the debug registers
164 */
165 if (current->thread.debugreg7){
166 loaddebug(&current->thread, 0);
167 loaddebug(&current->thread, 1);
168 loaddebug(&current->thread, 2);
169 loaddebug(&current->thread, 3);
170 /* no 4 and 5 */
171 loaddebug(&current->thread, 6);
172 loaddebug(&current->thread, 7);
173 }
174}
diff --git a/arch/x86/power/cpu_32.c b/arch/x86/power/cpu_32.c
deleted file mode 100644
index ce702c5b3a2c..000000000000
--- a/arch/x86/power/cpu_32.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * Suspend support specific for i386.
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2002 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8 */
9
10#include <linux/module.h>
11#include <linux/suspend.h>
12#include <asm/mtrr.h>
13#include <asm/mce.h>
14#include <asm/xcr.h>
15#include <asm/suspend.h>
16
17static struct saved_context saved_context;
18
19unsigned long saved_context_ebx;
20unsigned long saved_context_esp, saved_context_ebp;
21unsigned long saved_context_esi, saved_context_edi;
22unsigned long saved_context_eflags;
23
24static void __save_processor_state(struct saved_context *ctxt)
25{
26 mtrr_save_fixed_ranges(NULL);
27 kernel_fpu_begin();
28
29 /*
30 * descriptor tables
31 */
32 store_gdt(&ctxt->gdt);
33 store_idt(&ctxt->idt);
34 store_tr(ctxt->tr);
35
36 /*
37 * segment registers
38 */
39 savesegment(es, ctxt->es);
40 savesegment(fs, ctxt->fs);
41 savesegment(gs, ctxt->gs);
42 savesegment(ss, ctxt->ss);
43
44 /*
45 * control registers
46 */
47 ctxt->cr0 = read_cr0();
48 ctxt->cr2 = read_cr2();
49 ctxt->cr3 = read_cr3();
50 ctxt->cr4 = read_cr4_safe();
51}
52
53/* Needed by apm.c */
54void save_processor_state(void)
55{
56 __save_processor_state(&saved_context);
57}
58EXPORT_SYMBOL(save_processor_state);
59
60static void do_fpu_end(void)
61{
62 /*
63 * Restore FPU regs if necessary.
64 */
65 kernel_fpu_end();
66}
67
68static void fix_processor_context(void)
69{
70 int cpu = smp_processor_id();
71 struct tss_struct *t = &per_cpu(init_tss, cpu);
72
73 set_tss_desc(cpu, t); /*
74 * This just modifies memory; should not be
75 * necessary. But... This is necessary, because
76 * 386 hardware has concept of busy TSS or some
77 * similar stupidity.
78 */
79
80 load_TR_desc(); /* This does ltr */
81 load_LDT(&current->active_mm->context); /* This does lldt */
82
83 /*
84 * Now maybe reload the debug registers
85 */
86 if (current->thread.debugreg7) {
87 set_debugreg(current->thread.debugreg0, 0);
88 set_debugreg(current->thread.debugreg1, 1);
89 set_debugreg(current->thread.debugreg2, 2);
90 set_debugreg(current->thread.debugreg3, 3);
91 /* no 4 and 5 */
92 set_debugreg(current->thread.debugreg6, 6);
93 set_debugreg(current->thread.debugreg7, 7);
94 }
95
96}
97
98static void __restore_processor_state(struct saved_context *ctxt)
99{
100 /*
101 * control registers
102 */
103 /* cr4 was introduced in the Pentium CPU */
104 if (ctxt->cr4)
105 write_cr4(ctxt->cr4);
106 write_cr3(ctxt->cr3);
107 write_cr2(ctxt->cr2);
108 write_cr0(ctxt->cr0);
109
110 /*
111 * now restore the descriptor tables to their proper values
112 * ltr is done i fix_processor_context().
113 */
114 load_gdt(&ctxt->gdt);
115 load_idt(&ctxt->idt);
116
117 /*
118 * segment registers
119 */
120 loadsegment(es, ctxt->es);
121 loadsegment(fs, ctxt->fs);
122 loadsegment(gs, ctxt->gs);
123 loadsegment(ss, ctxt->ss);
124
125 /*
126 * sysenter MSRs
127 */
128 if (boot_cpu_has(X86_FEATURE_SEP))
129 enable_sep_cpu();
130
131 /*
132 * restore XCR0 for xsave capable cpu's.
133 */
134 if (cpu_has_xsave)
135 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
136
137 fix_processor_context();
138 do_fpu_end();
139 mtrr_ap_init();
140 mcheck_init(&boot_cpu_data);
141}
142
143/* Needed by apm.c */
144void restore_processor_state(void)
145{
146 __restore_processor_state(&saved_context);
147}
148EXPORT_SYMBOL(restore_processor_state);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8b4708e06244..ead3f64c41d0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -469,22 +469,6 @@ static void platform_drv_shutdown(struct device *_dev)
469 drv->shutdown(dev); 469 drv->shutdown(dev);
470} 470}
471 471
472static int platform_drv_suspend(struct device *_dev, pm_message_t state)
473{
474 struct platform_driver *drv = to_platform_driver(_dev->driver);
475 struct platform_device *dev = to_platform_device(_dev);
476
477 return drv->suspend(dev, state);
478}
479
480static int platform_drv_resume(struct device *_dev)
481{
482 struct platform_driver *drv = to_platform_driver(_dev->driver);
483 struct platform_device *dev = to_platform_device(_dev);
484
485 return drv->resume(dev);
486}
487
488/** 472/**
489 * platform_driver_register 473 * platform_driver_register
490 * @drv: platform driver structure 474 * @drv: platform driver structure
@@ -498,10 +482,10 @@ int platform_driver_register(struct platform_driver *drv)
498 drv->driver.remove = platform_drv_remove; 482 drv->driver.remove = platform_drv_remove;
499 if (drv->shutdown) 483 if (drv->shutdown)
500 drv->driver.shutdown = platform_drv_shutdown; 484 drv->driver.shutdown = platform_drv_shutdown;
501 if (drv->suspend) 485 if (drv->suspend || drv->resume)
502 drv->driver.suspend = platform_drv_suspend; 486 pr_warning("Platform driver '%s' needs updating - please use "
503 if (drv->resume) 487 "dev_pm_ops\n", drv->driver.name);
504 drv->driver.resume = platform_drv_resume; 488
505 return driver_register(&drv->driver); 489 return driver_register(&drv->driver);
506} 490}
507EXPORT_SYMBOL_GPL(platform_driver_register); 491EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -633,10 +617,12 @@ static int platform_match(struct device *dev, struct device_driver *drv)
633 617
634static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 618static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
635{ 619{
620 struct platform_driver *pdrv = to_platform_driver(dev->driver);
621 struct platform_device *pdev = to_platform_device(dev);
636 int ret = 0; 622 int ret = 0;
637 623
638 if (dev->driver && dev->driver->suspend) 624 if (dev->driver && pdrv->suspend)
639 ret = dev->driver->suspend(dev, mesg); 625 ret = pdrv->suspend(pdev, mesg);
640 626
641 return ret; 627 return ret;
642} 628}
@@ -667,10 +653,12 @@ static int platform_legacy_resume_early(struct device *dev)
667 653
668static int platform_legacy_resume(struct device *dev) 654static int platform_legacy_resume(struct device *dev)
669{ 655{
656 struct platform_driver *pdrv = to_platform_driver(dev->driver);
657 struct platform_device *pdev = to_platform_device(dev);
670 int ret = 0; 658 int ret = 0;
671 659
672 if (dev->driver && dev->driver->resume) 660 if (dev->driver && pdrv->resume)
673 ret = dev->driver->resume(dev); 661 ret = pdrv->resume(pdev);
674 662
675 return ret; 663 return ret;
676} 664}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3e4bc699bc0f..fae725458981 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -315,13 +315,13 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
315/*------------------------- Resume routines -------------------------*/ 315/*------------------------- Resume routines -------------------------*/
316 316
317/** 317/**
318 * resume_device_noirq - Power on one device (early resume). 318 * device_resume_noirq - Power on one device (early resume).
319 * @dev: Device. 319 * @dev: Device.
320 * @state: PM transition of the system being carried out. 320 * @state: PM transition of the system being carried out.
321 * 321 *
322 * Must be called with interrupts disabled. 322 * Must be called with interrupts disabled.
323 */ 323 */
324static int resume_device_noirq(struct device *dev, pm_message_t state) 324static int device_resume_noirq(struct device *dev, pm_message_t state)
325{ 325{
326 int error = 0; 326 int error = 0;
327 327
@@ -334,9 +334,6 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
334 if (dev->bus->pm) { 334 if (dev->bus->pm) {
335 pm_dev_dbg(dev, state, "EARLY "); 335 pm_dev_dbg(dev, state, "EARLY ");
336 error = pm_noirq_op(dev, dev->bus->pm, state); 336 error = pm_noirq_op(dev, dev->bus->pm, state);
337 } else if (dev->bus->resume_early) {
338 pm_dev_dbg(dev, state, "legacy EARLY ");
339 error = dev->bus->resume_early(dev);
340 } 337 }
341 End: 338 End:
342 TRACE_RESUME(error); 339 TRACE_RESUME(error);
@@ -344,16 +341,16 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
344} 341}
345 342
346/** 343/**
347 * dpm_power_up - Power on all regular (non-sysdev) devices. 344 * dpm_resume_noirq - Power on all regular (non-sysdev) devices.
348 * @state: PM transition of the system being carried out. 345 * @state: PM transition of the system being carried out.
349 * 346 *
350 * Execute the appropriate "noirq resume" callback for all devices marked 347 * Call the "noirq" resume handlers for all devices marked as
351 * as DPM_OFF_IRQ. 348 * DPM_OFF_IRQ and enable device drivers to receive interrupts.
352 * 349 *
353 * Must be called under dpm_list_mtx. Device drivers should not receive 350 * Must be called under dpm_list_mtx. Device drivers should not receive
354 * interrupts while it's being executed. 351 * interrupts while it's being executed.
355 */ 352 */
356static void dpm_power_up(pm_message_t state) 353void dpm_resume_noirq(pm_message_t state)
357{ 354{
358 struct device *dev; 355 struct device *dev;
359 356
@@ -363,33 +360,21 @@ static void dpm_power_up(pm_message_t state)
363 int error; 360 int error;
364 361
365 dev->power.status = DPM_OFF; 362 dev->power.status = DPM_OFF;
366 error = resume_device_noirq(dev, state); 363 error = device_resume_noirq(dev, state);
367 if (error) 364 if (error)
368 pm_dev_err(dev, state, " early", error); 365 pm_dev_err(dev, state, " early", error);
369 } 366 }
370 mutex_unlock(&dpm_list_mtx); 367 mutex_unlock(&dpm_list_mtx);
371}
372
373/**
374 * device_power_up - Turn on all devices that need special attention.
375 * @state: PM transition of the system being carried out.
376 *
377 * Call the "early" resume handlers and enable device drivers to receive
378 * interrupts.
379 */
380void device_power_up(pm_message_t state)
381{
382 dpm_power_up(state);
383 resume_device_irqs(); 368 resume_device_irqs();
384} 369}
385EXPORT_SYMBOL_GPL(device_power_up); 370EXPORT_SYMBOL_GPL(dpm_resume_noirq);
386 371
387/** 372/**
388 * resume_device - Restore state for one device. 373 * device_resume - Restore state for one device.
389 * @dev: Device. 374 * @dev: Device.
390 * @state: PM transition of the system being carried out. 375 * @state: PM transition of the system being carried out.
391 */ 376 */
392static int resume_device(struct device *dev, pm_message_t state) 377static int device_resume(struct device *dev, pm_message_t state)
393{ 378{
394 int error = 0; 379 int error = 0;
395 380
@@ -414,9 +399,6 @@ static int resume_device(struct device *dev, pm_message_t state)
414 if (dev->type->pm) { 399 if (dev->type->pm) {
415 pm_dev_dbg(dev, state, "type "); 400 pm_dev_dbg(dev, state, "type ");
416 error = pm_op(dev, dev->type->pm, state); 401 error = pm_op(dev, dev->type->pm, state);
417 } else if (dev->type->resume) {
418 pm_dev_dbg(dev, state, "legacy type ");
419 error = dev->type->resume(dev);
420 } 402 }
421 if (error) 403 if (error)
422 goto End; 404 goto End;
@@ -462,7 +444,7 @@ static void dpm_resume(pm_message_t state)
462 dev->power.status = DPM_RESUMING; 444 dev->power.status = DPM_RESUMING;
463 mutex_unlock(&dpm_list_mtx); 445 mutex_unlock(&dpm_list_mtx);
464 446
465 error = resume_device(dev, state); 447 error = device_resume(dev, state);
466 448
467 mutex_lock(&dpm_list_mtx); 449 mutex_lock(&dpm_list_mtx);
468 if (error) 450 if (error)
@@ -480,11 +462,11 @@ static void dpm_resume(pm_message_t state)
480} 462}
481 463
482/** 464/**
483 * complete_device - Complete a PM transition for given device 465 * device_complete - Complete a PM transition for given device
484 * @dev: Device. 466 * @dev: Device.
485 * @state: PM transition of the system being carried out. 467 * @state: PM transition of the system being carried out.
486 */ 468 */
487static void complete_device(struct device *dev, pm_message_t state) 469static void device_complete(struct device *dev, pm_message_t state)
488{ 470{
489 down(&dev->sem); 471 down(&dev->sem);
490 472
@@ -527,7 +509,7 @@ static void dpm_complete(pm_message_t state)
527 dev->power.status = DPM_ON; 509 dev->power.status = DPM_ON;
528 mutex_unlock(&dpm_list_mtx); 510 mutex_unlock(&dpm_list_mtx);
529 511
530 complete_device(dev, state); 512 device_complete(dev, state);
531 513
532 mutex_lock(&dpm_list_mtx); 514 mutex_lock(&dpm_list_mtx);
533 } 515 }
@@ -540,19 +522,19 @@ static void dpm_complete(pm_message_t state)
540} 522}
541 523
542/** 524/**
543 * device_resume - Restore state of each device in system. 525 * dpm_resume_end - Restore state of each device in system.
544 * @state: PM transition of the system being carried out. 526 * @state: PM transition of the system being carried out.
545 * 527 *
546 * Resume all the devices, unlock them all, and allow new 528 * Resume all the devices, unlock them all, and allow new
547 * devices to be registered once again. 529 * devices to be registered once again.
548 */ 530 */
549void device_resume(pm_message_t state) 531void dpm_resume_end(pm_message_t state)
550{ 532{
551 might_sleep(); 533 might_sleep();
552 dpm_resume(state); 534 dpm_resume(state);
553 dpm_complete(state); 535 dpm_complete(state);
554} 536}
555EXPORT_SYMBOL_GPL(device_resume); 537EXPORT_SYMBOL_GPL(dpm_resume_end);
556 538
557 539
558/*------------------------- Suspend routines -------------------------*/ 540/*------------------------- Suspend routines -------------------------*/
@@ -577,13 +559,13 @@ static pm_message_t resume_event(pm_message_t sleep_state)
577} 559}
578 560
579/** 561/**
580 * suspend_device_noirq - Shut down one device (late suspend). 562 * device_suspend_noirq - Shut down one device (late suspend).
581 * @dev: Device. 563 * @dev: Device.
582 * @state: PM transition of the system being carried out. 564 * @state: PM transition of the system being carried out.
583 * 565 *
584 * This is called with interrupts off and only a single CPU running. 566 * This is called with interrupts off and only a single CPU running.
585 */ 567 */
586static int suspend_device_noirq(struct device *dev, pm_message_t state) 568static int device_suspend_noirq(struct device *dev, pm_message_t state)
587{ 569{
588 int error = 0; 570 int error = 0;
589 571
@@ -593,24 +575,20 @@ static int suspend_device_noirq(struct device *dev, pm_message_t state)
593 if (dev->bus->pm) { 575 if (dev->bus->pm) {
594 pm_dev_dbg(dev, state, "LATE "); 576 pm_dev_dbg(dev, state, "LATE ");
595 error = pm_noirq_op(dev, dev->bus->pm, state); 577 error = pm_noirq_op(dev, dev->bus->pm, state);
596 } else if (dev->bus->suspend_late) {
597 pm_dev_dbg(dev, state, "legacy LATE ");
598 error = dev->bus->suspend_late(dev, state);
599 suspend_report_result(dev->bus->suspend_late, error);
600 } 578 }
601 return error; 579 return error;
602} 580}
603 581
604/** 582/**
605 * device_power_down - Shut down special devices. 583 * dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
606 * @state: PM transition of the system being carried out. 584 * @state: PM transition of the system being carried out.
607 * 585 *
608 * Prevent device drivers from receiving interrupts and call the "late" 586 * Prevent device drivers from receiving interrupts and call the "noirq"
609 * suspend handlers. 587 * suspend handlers.
610 * 588 *
611 * Must be called under dpm_list_mtx. 589 * Must be called under dpm_list_mtx.
612 */ 590 */
613int device_power_down(pm_message_t state) 591int dpm_suspend_noirq(pm_message_t state)
614{ 592{
615 struct device *dev; 593 struct device *dev;
616 int error = 0; 594 int error = 0;
@@ -618,7 +596,7 @@ int device_power_down(pm_message_t state)
618 suspend_device_irqs(); 596 suspend_device_irqs();
619 mutex_lock(&dpm_list_mtx); 597 mutex_lock(&dpm_list_mtx);
620 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 598 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
621 error = suspend_device_noirq(dev, state); 599 error = device_suspend_noirq(dev, state);
622 if (error) { 600 if (error) {
623 pm_dev_err(dev, state, " late", error); 601 pm_dev_err(dev, state, " late", error);
624 break; 602 break;
@@ -627,17 +605,17 @@ int device_power_down(pm_message_t state)
627 } 605 }
628 mutex_unlock(&dpm_list_mtx); 606 mutex_unlock(&dpm_list_mtx);
629 if (error) 607 if (error)
630 device_power_up(resume_event(state)); 608 dpm_resume_noirq(resume_event(state));
631 return error; 609 return error;
632} 610}
633EXPORT_SYMBOL_GPL(device_power_down); 611EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
634 612
635/** 613/**
636 * suspend_device - Save state of one device. 614 * device_suspend - Save state of one device.
637 * @dev: Device. 615 * @dev: Device.
638 * @state: PM transition of the system being carried out. 616 * @state: PM transition of the system being carried out.
639 */ 617 */
640static int suspend_device(struct device *dev, pm_message_t state) 618static int device_suspend(struct device *dev, pm_message_t state)
641{ 619{
642 int error = 0; 620 int error = 0;
643 621
@@ -660,10 +638,6 @@ static int suspend_device(struct device *dev, pm_message_t state)
660 if (dev->type->pm) { 638 if (dev->type->pm) {
661 pm_dev_dbg(dev, state, "type "); 639 pm_dev_dbg(dev, state, "type ");
662 error = pm_op(dev, dev->type->pm, state); 640 error = pm_op(dev, dev->type->pm, state);
663 } else if (dev->type->suspend) {
664 pm_dev_dbg(dev, state, "legacy type ");
665 error = dev->type->suspend(dev, state);
666 suspend_report_result(dev->type->suspend, error);
667 } 641 }
668 if (error) 642 if (error)
669 goto End; 643 goto End;
@@ -704,7 +678,7 @@ static int dpm_suspend(pm_message_t state)
704 get_device(dev); 678 get_device(dev);
705 mutex_unlock(&dpm_list_mtx); 679 mutex_unlock(&dpm_list_mtx);
706 680
707 error = suspend_device(dev, state); 681 error = device_suspend(dev, state);
708 682
709 mutex_lock(&dpm_list_mtx); 683 mutex_lock(&dpm_list_mtx);
710 if (error) { 684 if (error) {
@@ -723,11 +697,11 @@ static int dpm_suspend(pm_message_t state)
723} 697}
724 698
725/** 699/**
726 * prepare_device - Execute the ->prepare() callback(s) for given device. 700 * device_prepare - Execute the ->prepare() callback(s) for given device.
727 * @dev: Device. 701 * @dev: Device.
728 * @state: PM transition of the system being carried out. 702 * @state: PM transition of the system being carried out.
729 */ 703 */
730static int prepare_device(struct device *dev, pm_message_t state) 704static int device_prepare(struct device *dev, pm_message_t state)
731{ 705{
732 int error = 0; 706 int error = 0;
733 707
@@ -781,7 +755,7 @@ static int dpm_prepare(pm_message_t state)
781 dev->power.status = DPM_PREPARING; 755 dev->power.status = DPM_PREPARING;
782 mutex_unlock(&dpm_list_mtx); 756 mutex_unlock(&dpm_list_mtx);
783 757
784 error = prepare_device(dev, state); 758 error = device_prepare(dev, state);
785 759
786 mutex_lock(&dpm_list_mtx); 760 mutex_lock(&dpm_list_mtx);
787 if (error) { 761 if (error) {
@@ -807,12 +781,12 @@ static int dpm_prepare(pm_message_t state)
807} 781}
808 782
809/** 783/**
810 * device_suspend - Save state and stop all devices in system. 784 * dpm_suspend_start - Save state and stop all devices in system.
811 * @state: PM transition of the system being carried out. 785 * @state: PM transition of the system being carried out.
812 * 786 *
813 * Prepare and suspend all devices. 787 * Prepare and suspend all devices.
814 */ 788 */
815int device_suspend(pm_message_t state) 789int dpm_suspend_start(pm_message_t state)
816{ 790{
817 int error; 791 int error;
818 792
@@ -822,7 +796,7 @@ int device_suspend(pm_message_t state)
822 error = dpm_suspend(state); 796 error = dpm_suspend(state);
823 return error; 797 return error;
824} 798}
825EXPORT_SYMBOL_GPL(device_suspend); 799EXPORT_SYMBOL_GPL(dpm_suspend_start);
826 800
827void __suspend_report_result(const char *function, void *fn, int ret) 801void __suspend_report_result(const char *function, void *fn, int ret)
828{ 802{
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 3236b434b964..9742a78c9fe4 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -343,11 +343,15 @@ static void __sysdev_resume(struct sys_device *dev)
343 /* First, call the class-specific one */ 343 /* First, call the class-specific one */
344 if (cls->resume) 344 if (cls->resume)
345 cls->resume(dev); 345 cls->resume(dev);
346 WARN_ONCE(!irqs_disabled(),
347 "Interrupts enabled after %pF\n", cls->resume);
346 348
347 /* Call auxillary drivers next. */ 349 /* Call auxillary drivers next. */
348 list_for_each_entry(drv, &cls->drivers, entry) { 350 list_for_each_entry(drv, &cls->drivers, entry) {
349 if (drv->resume) 351 if (drv->resume)
350 drv->resume(dev); 352 drv->resume(dev);
353 WARN_ONCE(!irqs_disabled(),
354 "Interrupts enabled after %pF\n", drv->resume);
351 } 355 }
352} 356}
353 357
@@ -377,6 +381,9 @@ int sysdev_suspend(pm_message_t state)
377 if (ret) 381 if (ret)
378 return ret; 382 return ret;
379 383
384 WARN_ONCE(!irqs_disabled(),
385 "Interrupts enabled while suspending system devices\n");
386
380 pr_debug("Suspending System Devices\n"); 387 pr_debug("Suspending System Devices\n");
381 388
382 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { 389 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
@@ -393,6 +400,9 @@ int sysdev_suspend(pm_message_t state)
393 if (ret) 400 if (ret)
394 goto aux_driver; 401 goto aux_driver;
395 } 402 }
403 WARN_ONCE(!irqs_disabled(),
404 "Interrupts enabled after %pF\n",
405 drv->suspend);
396 } 406 }
397 407
398 /* Now call the generic one */ 408 /* Now call the generic one */
@@ -400,6 +410,9 @@ int sysdev_suspend(pm_message_t state)
400 ret = cls->suspend(sysdev, state); 410 ret = cls->suspend(sysdev, state);
401 if (ret) 411 if (ret)
402 goto cls_driver; 412 goto cls_driver;
413 WARN_ONCE(!irqs_disabled(),
414 "Interrupts enabled after %pF\n",
415 cls->suspend);
403 } 416 }
404 } 417 }
405 } 418 }
@@ -452,6 +465,9 @@ int sysdev_resume(void)
452{ 465{
453 struct sysdev_class *cls; 466 struct sysdev_class *cls;
454 467
468 WARN_ONCE(!irqs_disabled(),
469 "Interrupts enabled while resuming system devices\n");
470
455 pr_debug("Resuming System Devices\n"); 471 pr_debug("Resuming System Devices\n");
456 472
457 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { 473 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index fddc2025dece..10d03d7931c4 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -43,7 +43,7 @@ static int xen_suspend(void *data)
43 if (err) { 43 if (err) {
44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n", 44 printk(KERN_ERR "xen_suspend: sysdev_suspend failed: %d\n",
45 err); 45 err);
46 device_power_up(PMSG_RESUME); 46 dpm_resume_noirq(PMSG_RESUME);
47 return err; 47 return err;
48 } 48 }
49 49
@@ -69,7 +69,7 @@ static int xen_suspend(void *data)
69 } 69 }
70 70
71 sysdev_resume(); 71 sysdev_resume();
72 device_power_up(PMSG_RESUME); 72 dpm_resume_noirq(PMSG_RESUME);
73 73
74 return 0; 74 return 0;
75} 75}
@@ -92,18 +92,18 @@ static void do_suspend(void)
92 } 92 }
93#endif 93#endif
94 94
95 err = device_suspend(PMSG_SUSPEND); 95 err = dpm_suspend_start(PMSG_SUSPEND);
96 if (err) { 96 if (err) {
97 printk(KERN_ERR "xen suspend: device_suspend %d\n", err); 97 printk(KERN_ERR "xen suspend: dpm_suspend_start %d\n", err);
98 goto out; 98 goto out;
99 } 99 }
100 100
101 printk(KERN_DEBUG "suspending xenstore...\n"); 101 printk(KERN_DEBUG "suspending xenstore...\n");
102 xs_suspend(); 102 xs_suspend();
103 103
104 err = device_power_down(PMSG_SUSPEND); 104 err = dpm_suspend_noirq(PMSG_SUSPEND);
105 if (err) { 105 if (err) {
106 printk(KERN_ERR "device_power_down failed: %d\n", err); 106 printk(KERN_ERR "dpm_suspend_noirq failed: %d\n", err);
107 goto resume_devices; 107 goto resume_devices;
108 } 108 }
109 109
@@ -119,10 +119,10 @@ static void do_suspend(void)
119 } else 119 } else
120 xs_suspend_cancel(); 120 xs_suspend_cancel();
121 121
122 device_power_up(PMSG_RESUME); 122 dpm_resume_noirq(PMSG_RESUME);
123 123
124resume_devices: 124resume_devices:
125 device_resume(PMSG_RESUME); 125 dpm_resume_end(PMSG_RESUME);
126 126
127 /* Make sure timer events get retriggered on all CPUs */ 127 /* Make sure timer events get retriggered on all CPUs */
128 clock_was_set(); 128 clock_was_set();
diff --git a/include/linux/device.h b/include/linux/device.h
index 5d5c197bad45..a4a7b10aaa48 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -62,8 +62,6 @@ struct bus_type {
62 void (*shutdown)(struct device *dev); 62 void (*shutdown)(struct device *dev);
63 63
64 int (*suspend)(struct device *dev, pm_message_t state); 64 int (*suspend)(struct device *dev, pm_message_t state);
65 int (*suspend_late)(struct device *dev, pm_message_t state);
66 int (*resume_early)(struct device *dev);
67 int (*resume)(struct device *dev); 65 int (*resume)(struct device *dev);
68 66
69 struct dev_pm_ops *pm; 67 struct dev_pm_ops *pm;
@@ -291,9 +289,6 @@ struct device_type {
291 int (*uevent)(struct device *dev, struct kobj_uevent_env *env); 289 int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
292 void (*release)(struct device *dev); 290 void (*release)(struct device *dev);
293 291
294 int (*suspend)(struct device *dev, pm_message_t state);
295 int (*resume)(struct device *dev);
296
297 struct dev_pm_ops *pm; 292 struct dev_pm_ops *pm;
298}; 293};
299 294
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index ff374ceface0..c41e812e9d5e 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -183,6 +183,7 @@ extern void disable_irq(unsigned int irq);
183extern void enable_irq(unsigned int irq); 183extern void enable_irq(unsigned int irq);
184 184
185/* The following three functions are for the core kernel use only. */ 185/* The following three functions are for the core kernel use only. */
186#ifdef CONFIG_GENERIC_HARDIRQS
186extern void suspend_device_irqs(void); 187extern void suspend_device_irqs(void);
187extern void resume_device_irqs(void); 188extern void resume_device_irqs(void);
188#ifdef CONFIG_PM_SLEEP 189#ifdef CONFIG_PM_SLEEP
@@ -190,6 +191,11 @@ extern int check_wakeup_irqs(void);
190#else 191#else
191static inline int check_wakeup_irqs(void) { return 0; } 192static inline int check_wakeup_irqs(void) { return 0; }
192#endif 193#endif
194#else
195static inline void suspend_device_irqs(void) { };
196static inline void resume_device_irqs(void) { };
197static inline int check_wakeup_irqs(void) { return 0; }
198#endif
193 199
194#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 200#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
195 201
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 1d4e2d289821..b3f74764a586 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -382,14 +382,13 @@ struct dev_pm_info {
382#ifdef CONFIG_PM_SLEEP 382#ifdef CONFIG_PM_SLEEP
383extern void device_pm_lock(void); 383extern void device_pm_lock(void);
384extern int sysdev_resume(void); 384extern int sysdev_resume(void);
385extern void device_power_up(pm_message_t state); 385extern void dpm_resume_noirq(pm_message_t state);
386extern void device_resume(pm_message_t state); 386extern void dpm_resume_end(pm_message_t state);
387 387
388extern void device_pm_unlock(void); 388extern void device_pm_unlock(void);
389extern int sysdev_suspend(pm_message_t state); 389extern int sysdev_suspend(pm_message_t state);
390extern int device_power_down(pm_message_t state); 390extern int dpm_suspend_noirq(pm_message_t state);
391extern int device_suspend(pm_message_t state); 391extern int dpm_suspend_start(pm_message_t state);
392extern int device_prepare_suspend(pm_message_t state);
393 392
394extern void __suspend_report_result(const char *function, void *fn, int ret); 393extern void __suspend_report_result(const char *function, void *fn, int ret);
395 394
@@ -403,7 +402,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
403#define device_pm_lock() do {} while (0) 402#define device_pm_lock() do {} while (0)
404#define device_pm_unlock() do {} while (0) 403#define device_pm_unlock() do {} while (0)
405 404
406static inline int device_suspend(pm_message_t state) 405static inline int dpm_suspend_start(pm_message_t state)
407{ 406{
408 return 0; 407 return 0;
409} 408}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 795032edfc46..cd15df6c63cd 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -245,11 +245,6 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
245 245
246extern void hibernation_set_ops(struct platform_hibernation_ops *ops); 246extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
247extern int hibernate(void); 247extern int hibernate(void);
248extern int hibernate_nvs_register(unsigned long start, unsigned long size);
249extern int hibernate_nvs_alloc(void);
250extern void hibernate_nvs_free(void);
251extern void hibernate_nvs_save(void);
252extern void hibernate_nvs_restore(void);
253extern bool system_entering_hibernation(void); 248extern bool system_entering_hibernation(void);
254#else /* CONFIG_HIBERNATION */ 249#else /* CONFIG_HIBERNATION */
255static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } 250static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
@@ -258,6 +253,16 @@ static inline void swsusp_unset_page_free(struct page *p) {}
258 253
259static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} 254static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
260static inline int hibernate(void) { return -ENOSYS; } 255static inline int hibernate(void) { return -ENOSYS; }
256static inline bool system_entering_hibernation(void) { return false; }
257#endif /* CONFIG_HIBERNATION */
258
259#ifdef CONFIG_HIBERNATION_NVS
260extern int hibernate_nvs_register(unsigned long start, unsigned long size);
261extern int hibernate_nvs_alloc(void);
262extern void hibernate_nvs_free(void);
263extern void hibernate_nvs_save(void);
264extern void hibernate_nvs_restore(void);
265#else /* CONFIG_HIBERNATION_NVS */
261static inline int hibernate_nvs_register(unsigned long a, unsigned long b) 266static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
262{ 267{
263 return 0; 268 return 0;
@@ -266,8 +271,7 @@ static inline int hibernate_nvs_alloc(void) { return 0; }
266static inline void hibernate_nvs_free(void) {} 271static inline void hibernate_nvs_free(void) {}
267static inline void hibernate_nvs_save(void) {} 272static inline void hibernate_nvs_save(void) {}
268static inline void hibernate_nvs_restore(void) {} 273static inline void hibernate_nvs_restore(void) {}
269static inline bool system_entering_hibernation(void) { return false; } 274#endif /* CONFIG_HIBERNATION_NVS */
270#endif /* CONFIG_HIBERNATION */
271 275
272#ifdef CONFIG_PM_SLEEP 276#ifdef CONFIG_PM_SLEEP
273void save_processor_state(void); 277void save_processor_state(void);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index e4983770913b..ae1c35201cc8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1448,17 +1448,17 @@ int kernel_kexec(void)
1448 goto Restore_console; 1448 goto Restore_console;
1449 } 1449 }
1450 suspend_console(); 1450 suspend_console();
1451 error = device_suspend(PMSG_FREEZE); 1451 error = dpm_suspend_start(PMSG_FREEZE);
1452 if (error) 1452 if (error)
1453 goto Resume_console; 1453 goto Resume_console;
1454 /* At this point, device_suspend() has been called, 1454 /* At this point, dpm_suspend_start() has been called,
1455 * but *not* device_power_down(). We *must* 1455 * but *not* dpm_suspend_noirq(). We *must* call
1456 * device_power_down() now. Otherwise, drivers for 1456 * dpm_suspend_noirq() now. Otherwise, drivers for
1457 * some devices (e.g. interrupt controllers) become 1457 * some devices (e.g. interrupt controllers) become
1458 * desynchronized with the actual state of the 1458 * desynchronized with the actual state of the
1459 * hardware at resume time, and evil weirdness ensues. 1459 * hardware at resume time, and evil weirdness ensues.
1460 */ 1460 */
1461 error = device_power_down(PMSG_FREEZE); 1461 error = dpm_suspend_noirq(PMSG_FREEZE);
1462 if (error) 1462 if (error)
1463 goto Resume_devices; 1463 goto Resume_devices;
1464 error = disable_nonboot_cpus(); 1464 error = disable_nonboot_cpus();
@@ -1486,9 +1486,9 @@ int kernel_kexec(void)
1486 local_irq_enable(); 1486 local_irq_enable();
1487 Enable_cpus: 1487 Enable_cpus:
1488 enable_nonboot_cpus(); 1488 enable_nonboot_cpus();
1489 device_power_up(PMSG_RESTORE); 1489 dpm_resume_noirq(PMSG_RESTORE);
1490 Resume_devices: 1490 Resume_devices:
1491 device_resume(PMSG_RESTORE); 1491 dpm_resume_end(PMSG_RESTORE);
1492 Resume_console: 1492 Resume_console:
1493 resume_console(); 1493 resume_console();
1494 thaw_processes(); 1494 thaw_processes();
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 23bd4daeb96b..72067cbdb37f 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -116,9 +116,13 @@ config SUSPEND_FREEZER
116 116
117 Turning OFF this setting is NOT recommended! If in doubt, say Y. 117 Turning OFF this setting is NOT recommended! If in doubt, say Y.
118 118
119config HIBERNATION_NVS
120 bool
121
119config HIBERNATION 122config HIBERNATION
120 bool "Hibernation (aka 'suspend to disk')" 123 bool "Hibernation (aka 'suspend to disk')"
121 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE 124 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
125 select HIBERNATION_NVS if HAS_IOMEM
122 ---help--- 126 ---help---
123 Enable the suspend to disk (STD) functionality, which is usually 127 Enable the suspend to disk (STD) functionality, which is usually
124 called "hibernation" in user interfaces. STD checkpoints the 128 called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 720ea4f781bd..c3b81c30e5d5 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -6,6 +6,9 @@ endif
6obj-$(CONFIG_PM) += main.o 6obj-$(CONFIG_PM) += main.o
7obj-$(CONFIG_PM_SLEEP) += console.o 7obj-$(CONFIG_PM_SLEEP) += console.o
8obj-$(CONFIG_FREEZER) += process.o 8obj-$(CONFIG_FREEZER) += process.o
9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o 9obj-$(CONFIG_SUSPEND) += suspend.o
10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
11obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o
12obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
10 13
11obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 14obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/hibernate.c
index 5cb080e7eebd..81d2e7464893 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/hibernate.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * kernel/power/disk.c - Suspend-to-disk support. 2 * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support.
3 * 3 *
4 * Copyright (c) 2003 Patrick Mochel 4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab 5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2004 Pavel Machek <pavel@suse.cz> 6 * Copyright (c) 2004 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc.
7 * 8 *
8 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
9 *
10 */ 10 */
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
@@ -215,13 +215,13 @@ static int create_image(int platform_mode)
215 if (error) 215 if (error)
216 return error; 216 return error;
217 217
218 /* At this point, device_suspend() has been called, but *not* 218 /* At this point, dpm_suspend_start() has been called, but *not*
219 * device_power_down(). We *must* call device_power_down() now. 219 * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
220 * Otherwise, drivers for some devices (e.g. interrupt controllers) 220 * Otherwise, drivers for some devices (e.g. interrupt controllers)
221 * become desynchronized with the actual state of the hardware 221 * become desynchronized with the actual state of the hardware
222 * at resume time, and evil weirdness ensues. 222 * at resume time, and evil weirdness ensues.
223 */ 223 */
224 error = device_power_down(PMSG_FREEZE); 224 error = dpm_suspend_noirq(PMSG_FREEZE);
225 if (error) { 225 if (error) {
226 printk(KERN_ERR "PM: Some devices failed to power down, " 226 printk(KERN_ERR "PM: Some devices failed to power down, "
227 "aborting hibernation\n"); 227 "aborting hibernation\n");
@@ -262,7 +262,7 @@ static int create_image(int platform_mode)
262 262
263 Power_up: 263 Power_up:
264 sysdev_resume(); 264 sysdev_resume();
265 /* NOTE: device_power_up() is just a resume() for devices 265 /* NOTE: dpm_resume_noirq() is just a resume() for devices
266 * that suspended with irqs off ... no overall powerup. 266 * that suspended with irqs off ... no overall powerup.
267 */ 267 */
268 268
@@ -275,7 +275,7 @@ static int create_image(int platform_mode)
275 Platform_finish: 275 Platform_finish:
276 platform_finish(platform_mode); 276 platform_finish(platform_mode);
277 277
278 device_power_up(in_suspend ? 278 dpm_resume_noirq(in_suspend ?
279 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 279 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
280 280
281 return error; 281 return error;
@@ -304,7 +304,7 @@ int hibernation_snapshot(int platform_mode)
304 goto Close; 304 goto Close;
305 305
306 suspend_console(); 306 suspend_console();
307 error = device_suspend(PMSG_FREEZE); 307 error = dpm_suspend_start(PMSG_FREEZE);
308 if (error) 308 if (error)
309 goto Recover_platform; 309 goto Recover_platform;
310 310
@@ -315,7 +315,7 @@ int hibernation_snapshot(int platform_mode)
315 /* Control returns here after successful restore */ 315 /* Control returns here after successful restore */
316 316
317 Resume_devices: 317 Resume_devices:
318 device_resume(in_suspend ? 318 dpm_resume_end(in_suspend ?
319 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 319 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
320 resume_console(); 320 resume_console();
321 Close: 321 Close:
@@ -339,7 +339,7 @@ static int resume_target_kernel(bool platform_mode)
339{ 339{
340 int error; 340 int error;
341 341
342 error = device_power_down(PMSG_QUIESCE); 342 error = dpm_suspend_noirq(PMSG_QUIESCE);
343 if (error) { 343 if (error) {
344 printk(KERN_ERR "PM: Some devices failed to power down, " 344 printk(KERN_ERR "PM: Some devices failed to power down, "
345 "aborting resume\n"); 345 "aborting resume\n");
@@ -394,7 +394,7 @@ static int resume_target_kernel(bool platform_mode)
394 Cleanup: 394 Cleanup:
395 platform_restore_cleanup(platform_mode); 395 platform_restore_cleanup(platform_mode);
396 396
397 device_power_up(PMSG_RECOVER); 397 dpm_resume_noirq(PMSG_RECOVER);
398 398
399 return error; 399 return error;
400} 400}
@@ -414,10 +414,10 @@ int hibernation_restore(int platform_mode)
414 414
415 pm_prepare_console(); 415 pm_prepare_console();
416 suspend_console(); 416 suspend_console();
417 error = device_suspend(PMSG_QUIESCE); 417 error = dpm_suspend_start(PMSG_QUIESCE);
418 if (!error) { 418 if (!error) {
419 error = resume_target_kernel(platform_mode); 419 error = resume_target_kernel(platform_mode);
420 device_resume(PMSG_RECOVER); 420 dpm_resume_end(PMSG_RECOVER);
421 } 421 }
422 resume_console(); 422 resume_console();
423 pm_restore_console(); 423 pm_restore_console();
@@ -447,14 +447,14 @@ int hibernation_platform_enter(void)
447 447
448 entering_platform_hibernation = true; 448 entering_platform_hibernation = true;
449 suspend_console(); 449 suspend_console();
450 error = device_suspend(PMSG_HIBERNATE); 450 error = dpm_suspend_start(PMSG_HIBERNATE);
451 if (error) { 451 if (error) {
452 if (hibernation_ops->recover) 452 if (hibernation_ops->recover)
453 hibernation_ops->recover(); 453 hibernation_ops->recover();
454 goto Resume_devices; 454 goto Resume_devices;
455 } 455 }
456 456
457 error = device_power_down(PMSG_HIBERNATE); 457 error = dpm_suspend_noirq(PMSG_HIBERNATE);
458 if (error) 458 if (error)
459 goto Resume_devices; 459 goto Resume_devices;
460 460
@@ -479,11 +479,11 @@ int hibernation_platform_enter(void)
479 Platofrm_finish: 479 Platofrm_finish:
480 hibernation_ops->finish(); 480 hibernation_ops->finish();
481 481
482 device_power_up(PMSG_RESTORE); 482 dpm_suspend_noirq(PMSG_RESTORE);
483 483
484 Resume_devices: 484 Resume_devices:
485 entering_platform_hibernation = false; 485 entering_platform_hibernation = false;
486 device_resume(PMSG_RESTORE); 486 dpm_resume_end(PMSG_RESTORE);
487 resume_console(); 487 resume_console();
488 488
489 Close: 489 Close:
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/hibernate_nvs.c
new file mode 100644
index 000000000000..39ac698ef836
--- /dev/null
+++ b/kernel/power/hibernate_nvs.c
@@ -0,0 +1,135 @@
1/*
2 * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
3 *
4 * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <linux/list.h>
12#include <linux/mm.h>
13#include <linux/suspend.h>
14
15/*
16 * Platforms, like ACPI, may want us to save some memory used by them during
17 * hibernation and to restore the contents of this memory during the subsequent
18 * resume. The code below implements a mechanism allowing us to do that.
19 */
20
21struct nvs_page {
22 unsigned long phys_start;
23 unsigned int size;
24 void *kaddr;
25 void *data;
26 struct list_head node;
27};
28
29static LIST_HEAD(nvs_list);
30
31/**
32 * hibernate_nvs_register - register platform NVS memory region to save
33 * @start - physical address of the region
34 * @size - size of the region
35 *
36 * The NVS region need not be page-aligned (both ends) and we arrange
37 * things so that the data from page-aligned addresses in this region will
38 * be copied into separate RAM pages.
39 */
40int hibernate_nvs_register(unsigned long start, unsigned long size)
41{
42 struct nvs_page *entry, *next;
43
44 while (size > 0) {
45 unsigned int nr_bytes;
46
47 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
48 if (!entry)
49 goto Error;
50
51 list_add_tail(&entry->node, &nvs_list);
52 entry->phys_start = start;
53 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
54 entry->size = (size < nr_bytes) ? size : nr_bytes;
55
56 start += entry->size;
57 size -= entry->size;
58 }
59 return 0;
60
61 Error:
62 list_for_each_entry_safe(entry, next, &nvs_list, node) {
63 list_del(&entry->node);
64 kfree(entry);
65 }
66 return -ENOMEM;
67}
68
69/**
70 * hibernate_nvs_free - free data pages allocated for saving NVS regions
71 */
72void hibernate_nvs_free(void)
73{
74 struct nvs_page *entry;
75
76 list_for_each_entry(entry, &nvs_list, node)
77 if (entry->data) {
78 free_page((unsigned long)entry->data);
79 entry->data = NULL;
80 if (entry->kaddr) {
81 iounmap(entry->kaddr);
82 entry->kaddr = NULL;
83 }
84 }
85}
86
87/**
88 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
89 */
90int hibernate_nvs_alloc(void)
91{
92 struct nvs_page *entry;
93
94 list_for_each_entry(entry, &nvs_list, node) {
95 entry->data = (void *)__get_free_page(GFP_KERNEL);
96 if (!entry->data) {
97 hibernate_nvs_free();
98 return -ENOMEM;
99 }
100 }
101 return 0;
102}
103
104/**
105 * hibernate_nvs_save - save NVS memory regions
106 */
107void hibernate_nvs_save(void)
108{
109 struct nvs_page *entry;
110
111 printk(KERN_INFO "PM: Saving platform NVS memory\n");
112
113 list_for_each_entry(entry, &nvs_list, node)
114 if (entry->data) {
115 entry->kaddr = ioremap(entry->phys_start, entry->size);
116 memcpy(entry->data, entry->kaddr, entry->size);
117 }
118}
119
120/**
121 * hibernate_nvs_restore - restore NVS memory regions
122 *
123 * This function is going to be called with interrupts disabled, so it
124 * cannot iounmap the virtual addresses used to access the NVS region.
125 */
126void hibernate_nvs_restore(void)
127{
128 struct nvs_page *entry;
129
130 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
131
132 list_for_each_entry(entry, &nvs_list, node)
133 if (entry->data)
134 memcpy(entry->kaddr, entry->data, entry->size);
135}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 868028280d13..f710e36930cc 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -8,20 +8,9 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/module.h>
12#include <linux/suspend.h>
13#include <linux/kobject.h> 11#include <linux/kobject.h>
14#include <linux/string.h> 12#include <linux/string.h>
15#include <linux/delay.h>
16#include <linux/errno.h>
17#include <linux/kmod.h>
18#include <linux/init.h>
19#include <linux/console.h>
20#include <linux/cpu.h>
21#include <linux/resume-trace.h> 13#include <linux/resume-trace.h>
22#include <linux/freezer.h>
23#include <linux/vmstat.h>
24#include <linux/syscalls.h>
25 14
26#include "power.h" 15#include "power.h"
27 16
@@ -119,373 +108,6 @@ power_attr(pm_test);
119 108
120#endif /* CONFIG_PM_SLEEP */ 109#endif /* CONFIG_PM_SLEEP */
121 110
122#ifdef CONFIG_SUSPEND
123
124static int suspend_test(int level)
125{
126#ifdef CONFIG_PM_DEBUG
127 if (pm_test_level == level) {
128 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
129 mdelay(5000);
130 return 1;
131 }
132#endif /* !CONFIG_PM_DEBUG */
133 return 0;
134}
135
136#ifdef CONFIG_PM_TEST_SUSPEND
137
138/*
139 * We test the system suspend code by setting an RTC wakealarm a short
140 * time in the future, then suspending. Suspending the devices won't
141 * normally take long ... some systems only need a few milliseconds.
142 *
143 * The time it takes is system-specific though, so when we test this
144 * during system bootup we allow a LOT of time.
145 */
146#define TEST_SUSPEND_SECONDS 5
147
148static unsigned long suspend_test_start_time;
149
150static void suspend_test_start(void)
151{
152 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
153 * What we want is a hardware counter that will work correctly even
154 * during the irqs-are-off stages of the suspend/resume cycle...
155 */
156 suspend_test_start_time = jiffies;
157}
158
159static void suspend_test_finish(const char *label)
160{
161 long nj = jiffies - suspend_test_start_time;
162 unsigned msec;
163
164 msec = jiffies_to_msecs(abs(nj));
165 pr_info("PM: %s took %d.%03d seconds\n", label,
166 msec / 1000, msec % 1000);
167
168 /* Warning on suspend means the RTC alarm period needs to be
169 * larger -- the system was sooo slooowwww to suspend that the
170 * alarm (should have) fired before the system went to sleep!
171 *
172 * Warning on either suspend or resume also means the system
173 * has some performance issues. The stack dump of a WARN_ON
174 * is more likely to get the right attention than a printk...
175 */
176 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
177}
178
179#else
180
181static void suspend_test_start(void)
182{
183}
184
185static void suspend_test_finish(const char *label)
186{
187}
188
189#endif
190
191/* This is just an arbitrary number */
192#define FREE_PAGE_NUMBER (100)
193
194static struct platform_suspend_ops *suspend_ops;
195
196/**
197 * suspend_set_ops - Set the global suspend method table.
198 * @ops: Pointer to ops structure.
199 */
200
201void suspend_set_ops(struct platform_suspend_ops *ops)
202{
203 mutex_lock(&pm_mutex);
204 suspend_ops = ops;
205 mutex_unlock(&pm_mutex);
206}
207
208/**
209 * suspend_valid_only_mem - generic memory-only valid callback
210 *
211 * Platform drivers that implement mem suspend only and only need
212 * to check for that in their .valid callback can use this instead
213 * of rolling their own .valid callback.
214 */
215int suspend_valid_only_mem(suspend_state_t state)
216{
217 return state == PM_SUSPEND_MEM;
218}
219
220/**
221 * suspend_prepare - Do prep work before entering low-power state.
222 *
223 * This is common code that is called for each state that we're entering.
224 * Run suspend notifiers, allocate a console and stop all processes.
225 */
226static int suspend_prepare(void)
227{
228 int error;
229 unsigned int free_pages;
230
231 if (!suspend_ops || !suspend_ops->enter)
232 return -EPERM;
233
234 pm_prepare_console();
235
236 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
237 if (error)
238 goto Finish;
239
240 error = usermodehelper_disable();
241 if (error)
242 goto Finish;
243
244 if (suspend_freeze_processes()) {
245 error = -EAGAIN;
246 goto Thaw;
247 }
248
249 free_pages = global_page_state(NR_FREE_PAGES);
250 if (free_pages < FREE_PAGE_NUMBER) {
251 pr_debug("PM: free some memory\n");
252 shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
253 if (nr_free_pages() < FREE_PAGE_NUMBER) {
254 error = -ENOMEM;
255 printk(KERN_ERR "PM: No enough memory\n");
256 }
257 }
258 if (!error)
259 return 0;
260
261 Thaw:
262 suspend_thaw_processes();
263 usermodehelper_enable();
264 Finish:
265 pm_notifier_call_chain(PM_POST_SUSPEND);
266 pm_restore_console();
267 return error;
268}
269
270/* default implementation */
271void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
272{
273 local_irq_disable();
274}
275
276/* default implementation */
277void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
278{
279 local_irq_enable();
280}
281
282/**
283 * suspend_enter - enter the desired system sleep state.
284 * @state: state to enter
285 *
286 * This function should be called after devices have been suspended.
287 */
288static int suspend_enter(suspend_state_t state)
289{
290 int error;
291
292 if (suspend_ops->prepare) {
293 error = suspend_ops->prepare();
294 if (error)
295 return error;
296 }
297
298 error = device_power_down(PMSG_SUSPEND);
299 if (error) {
300 printk(KERN_ERR "PM: Some devices failed to power down\n");
301 goto Platfrom_finish;
302 }
303
304 if (suspend_ops->prepare_late) {
305 error = suspend_ops->prepare_late();
306 if (error)
307 goto Power_up_devices;
308 }
309
310 if (suspend_test(TEST_PLATFORM))
311 goto Platform_wake;
312
313 error = disable_nonboot_cpus();
314 if (error || suspend_test(TEST_CPUS))
315 goto Enable_cpus;
316
317 arch_suspend_disable_irqs();
318 BUG_ON(!irqs_disabled());
319
320 error = sysdev_suspend(PMSG_SUSPEND);
321 if (!error) {
322 if (!suspend_test(TEST_CORE))
323 error = suspend_ops->enter(state);
324 sysdev_resume();
325 }
326
327 arch_suspend_enable_irqs();
328 BUG_ON(irqs_disabled());
329
330 Enable_cpus:
331 enable_nonboot_cpus();
332
333 Platform_wake:
334 if (suspend_ops->wake)
335 suspend_ops->wake();
336
337 Power_up_devices:
338 device_power_up(PMSG_RESUME);
339
340 Platfrom_finish:
341 if (suspend_ops->finish)
342 suspend_ops->finish();
343
344 return error;
345}
346
347/**
348 * suspend_devices_and_enter - suspend devices and enter the desired system
349 * sleep state.
350 * @state: state to enter
351 */
352int suspend_devices_and_enter(suspend_state_t state)
353{
354 int error;
355
356 if (!suspend_ops)
357 return -ENOSYS;
358
359 if (suspend_ops->begin) {
360 error = suspend_ops->begin(state);
361 if (error)
362 goto Close;
363 }
364 suspend_console();
365 suspend_test_start();
366 error = device_suspend(PMSG_SUSPEND);
367 if (error) {
368 printk(KERN_ERR "PM: Some devices failed to suspend\n");
369 goto Recover_platform;
370 }
371 suspend_test_finish("suspend devices");
372 if (suspend_test(TEST_DEVICES))
373 goto Recover_platform;
374
375 suspend_enter(state);
376
377 Resume_devices:
378 suspend_test_start();
379 device_resume(PMSG_RESUME);
380 suspend_test_finish("resume devices");
381 resume_console();
382 Close:
383 if (suspend_ops->end)
384 suspend_ops->end();
385 return error;
386
387 Recover_platform:
388 if (suspend_ops->recover)
389 suspend_ops->recover();
390 goto Resume_devices;
391}
392
393/**
394 * suspend_finish - Do final work before exiting suspend sequence.
395 *
396 * Call platform code to clean up, restart processes, and free the
397 * console that we've allocated. This is not called for suspend-to-disk.
398 */
399static void suspend_finish(void)
400{
401 suspend_thaw_processes();
402 usermodehelper_enable();
403 pm_notifier_call_chain(PM_POST_SUSPEND);
404 pm_restore_console();
405}
406
407
408
409
410static const char * const pm_states[PM_SUSPEND_MAX] = {
411 [PM_SUSPEND_STANDBY] = "standby",
412 [PM_SUSPEND_MEM] = "mem",
413};
414
415static inline int valid_state(suspend_state_t state)
416{
417 /* All states need lowlevel support and need to be valid
418 * to the lowlevel implementation, no valid callback
419 * implies that none are valid. */
420 if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
421 return 0;
422 return 1;
423}
424
425
426/**
427 * enter_state - Do common work of entering low-power state.
428 * @state: pm_state structure for state we're entering.
429 *
430 * Make sure we're the only ones trying to enter a sleep state. Fail
431 * if someone has beat us to it, since we don't want anything weird to
432 * happen when we wake up.
433 * Then, do the setup for suspend, enter the state, and cleaup (after
434 * we've woken up).
435 */
436static int enter_state(suspend_state_t state)
437{
438 int error;
439
440 if (!valid_state(state))
441 return -ENODEV;
442
443 if (!mutex_trylock(&pm_mutex))
444 return -EBUSY;
445
446 printk(KERN_INFO "PM: Syncing filesystems ... ");
447 sys_sync();
448 printk("done.\n");
449
450 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
451 error = suspend_prepare();
452 if (error)
453 goto Unlock;
454
455 if (suspend_test(TEST_FREEZER))
456 goto Finish;
457
458 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
459 error = suspend_devices_and_enter(state);
460
461 Finish:
462 pr_debug("PM: Finishing wakeup.\n");
463 suspend_finish();
464 Unlock:
465 mutex_unlock(&pm_mutex);
466 return error;
467}
468
469
470/**
471 * pm_suspend - Externally visible function for suspending system.
472 * @state: Enumerated value of state to enter.
473 *
474 * Determine whether or not value is within range, get state
475 * structure, and enter (above).
476 */
477
478int pm_suspend(suspend_state_t state)
479{
480 if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
481 return enter_state(state);
482 return -EINVAL;
483}
484
485EXPORT_SYMBOL(pm_suspend);
486
487#endif /* CONFIG_SUSPEND */
488
489struct kobject *power_kobj; 111struct kobject *power_kobj;
490 112
491/** 113/**
@@ -498,7 +120,6 @@ struct kobject *power_kobj;
498 * store() accepts one of those strings, translates it into the 120 * store() accepts one of those strings, translates it into the
499 * proper enumerated value, and initiates a suspend transition. 121 * proper enumerated value, and initiates a suspend transition.
500 */ 122 */
501
502static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 123static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
503 char *buf) 124 char *buf)
504{ 125{
@@ -596,7 +217,6 @@ static struct attribute_group attr_group = {
596 .attrs = g, 217 .attrs = g,
597}; 218};
598 219
599
600static int __init pm_init(void) 220static int __init pm_init(void)
601{ 221{
602 power_kobj = kobject_create_and_add("power", NULL); 222 power_kobj = kobject_create_and_add("power", NULL);
@@ -606,144 +226,3 @@ static int __init pm_init(void)
606} 226}
607 227
608core_initcall(pm_init); 228core_initcall(pm_init);
609
610
611#ifdef CONFIG_PM_TEST_SUSPEND
612
613#include <linux/rtc.h>
614
615/*
616 * To test system suspend, we need a hands-off mechanism to resume the
617 * system. RTCs wake alarms are a common self-contained mechanism.
618 */
619
620static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
621{
622 static char err_readtime[] __initdata =
623 KERN_ERR "PM: can't read %s time, err %d\n";
624 static char err_wakealarm [] __initdata =
625 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
626 static char err_suspend[] __initdata =
627 KERN_ERR "PM: suspend test failed, error %d\n";
628 static char info_test[] __initdata =
629 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
630
631 unsigned long now;
632 struct rtc_wkalrm alm;
633 int status;
634
635 /* this may fail if the RTC hasn't been initialized */
636 status = rtc_read_time(rtc, &alm.time);
637 if (status < 0) {
638 printk(err_readtime, dev_name(&rtc->dev), status);
639 return;
640 }
641 rtc_tm_to_time(&alm.time, &now);
642
643 memset(&alm, 0, sizeof alm);
644 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
645 alm.enabled = true;
646
647 status = rtc_set_alarm(rtc, &alm);
648 if (status < 0) {
649 printk(err_wakealarm, dev_name(&rtc->dev), status);
650 return;
651 }
652
653 if (state == PM_SUSPEND_MEM) {
654 printk(info_test, pm_states[state]);
655 status = pm_suspend(state);
656 if (status == -ENODEV)
657 state = PM_SUSPEND_STANDBY;
658 }
659 if (state == PM_SUSPEND_STANDBY) {
660 printk(info_test, pm_states[state]);
661 status = pm_suspend(state);
662 }
663 if (status < 0)
664 printk(err_suspend, status);
665
666 /* Some platforms can't detect that the alarm triggered the
667 * wakeup, or (accordingly) disable it after it afterwards.
668 * It's supposed to give oneshot behavior; cope.
669 */
670 alm.enabled = false;
671 rtc_set_alarm(rtc, &alm);
672}
673
674static int __init has_wakealarm(struct device *dev, void *name_ptr)
675{
676 struct rtc_device *candidate = to_rtc_device(dev);
677
678 if (!candidate->ops->set_alarm)
679 return 0;
680 if (!device_may_wakeup(candidate->dev.parent))
681 return 0;
682
683 *(const char **)name_ptr = dev_name(dev);
684 return 1;
685}
686
687/*
688 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
689 * at startup time. They're normally disabled, for faster boot and because
690 * we can't know which states really work on this particular system.
691 */
692static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
693
694static char warn_bad_state[] __initdata =
695 KERN_WARNING "PM: can't test '%s' suspend state\n";
696
697static int __init setup_test_suspend(char *value)
698{
699 unsigned i;
700
701 /* "=mem" ==> "mem" */
702 value++;
703 for (i = 0; i < PM_SUSPEND_MAX; i++) {
704 if (!pm_states[i])
705 continue;
706 if (strcmp(pm_states[i], value) != 0)
707 continue;
708 test_state = (__force suspend_state_t) i;
709 return 0;
710 }
711 printk(warn_bad_state, value);
712 return 0;
713}
714__setup("test_suspend", setup_test_suspend);
715
716static int __init test_suspend(void)
717{
718 static char warn_no_rtc[] __initdata =
719 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
720
721 char *pony = NULL;
722 struct rtc_device *rtc = NULL;
723
724 /* PM is initialized by now; is that state testable? */
725 if (test_state == PM_SUSPEND_ON)
726 goto done;
727 if (!valid_state(test_state)) {
728 printk(warn_bad_state, pm_states[test_state]);
729 goto done;
730 }
731
732 /* RTCs have initialized by now too ... can we use one? */
733 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
734 if (pony)
735 rtc = rtc_class_open(pony);
736 if (!rtc) {
737 printk(warn_no_rtc);
738 goto done;
739 }
740
741 /* go for it */
742 test_wakealarm(rtc, test_state);
743 rtc_class_close(rtc);
744done:
745 return 0;
746}
747late_initcall(test_suspend);
748
749#endif /* CONFIG_PM_TEST_SUSPEND */
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 46b5ec7a3afb..26d5a26f82e3 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -45,7 +45,7 @@ static inline char *check_image_kernel(struct swsusp_info *info)
45 */ 45 */
46#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 46#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
47 47
48/* kernel/power/disk.c */ 48/* kernel/power/hibernate.c */
49extern int hibernation_snapshot(int platform_mode); 49extern int hibernation_snapshot(int platform_mode);
50extern int hibernation_restore(int platform_mode); 50extern int hibernation_restore(int platform_mode);
51extern int hibernation_platform_enter(void); 51extern int hibernation_platform_enter(void);
@@ -74,7 +74,7 @@ extern asmlinkage int swsusp_arch_resume(void);
74 74
75extern int create_basic_memory_bitmaps(void); 75extern int create_basic_memory_bitmaps(void);
76extern void free_basic_memory_bitmaps(void); 76extern void free_basic_memory_bitmaps(void);
77extern unsigned int count_data_pages(void); 77extern int swsusp_shrink_memory(void);
78 78
79/** 79/**
80 * Auxiliary structure used for reading the snapshot image data and 80 * Auxiliary structure used for reading the snapshot image data and
@@ -147,9 +147,8 @@ extern int swsusp_swap_in_use(void);
147 */ 147 */
148#define SF_PLATFORM_MODE 1 148#define SF_PLATFORM_MODE 1
149 149
150/* kernel/power/disk.c */ 150/* kernel/power/hibernate.c */
151extern int swsusp_check(void); 151extern int swsusp_check(void);
152extern int swsusp_shrink_memory(void);
153extern void swsusp_free(void); 152extern void swsusp_free(void);
154extern int swsusp_read(unsigned int *flags_p); 153extern int swsusp_read(unsigned int *flags_p);
155extern int swsusp_write(unsigned int flags); 154extern int swsusp_write(unsigned int flags);
@@ -161,22 +160,36 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
161 unsigned int, char *); 160 unsigned int, char *);
162 161
163#ifdef CONFIG_SUSPEND 162#ifdef CONFIG_SUSPEND
164/* kernel/power/main.c */ 163/* kernel/power/suspend.c */
164extern const char *const pm_states[];
165
166extern bool valid_state(suspend_state_t state);
165extern int suspend_devices_and_enter(suspend_state_t state); 167extern int suspend_devices_and_enter(suspend_state_t state);
168extern int enter_state(suspend_state_t state);
166#else /* !CONFIG_SUSPEND */ 169#else /* !CONFIG_SUSPEND */
167static inline int suspend_devices_and_enter(suspend_state_t state) 170static inline int suspend_devices_and_enter(suspend_state_t state)
168{ 171{
169 return -ENOSYS; 172 return -ENOSYS;
170} 173}
174static inline int enter_state(suspend_state_t state) { return -ENOSYS; }
175static inline bool valid_state(suspend_state_t state) { return false; }
171#endif /* !CONFIG_SUSPEND */ 176#endif /* !CONFIG_SUSPEND */
172 177
178#ifdef CONFIG_PM_TEST_SUSPEND
179/* kernel/power/suspend_test.c */
180extern void suspend_test_start(void);
181extern void suspend_test_finish(const char *label);
182#else /* !CONFIG_PM_TEST_SUSPEND */
183static inline void suspend_test_start(void) {}
184static inline void suspend_test_finish(const char *label) {}
185#endif /* !CONFIG_PM_TEST_SUSPEND */
186
173#ifdef CONFIG_PM_SLEEP 187#ifdef CONFIG_PM_SLEEP
174/* kernel/power/main.c */ 188/* kernel/power/main.c */
175extern int pm_notifier_call_chain(unsigned long val); 189extern int pm_notifier_call_chain(unsigned long val);
176#endif 190#endif
177 191
178#ifdef CONFIG_HIGHMEM 192#ifdef CONFIG_HIGHMEM
179unsigned int count_highmem_pages(void);
180int restore_highmem(void); 193int restore_highmem(void);
181#else 194#else
182static inline unsigned int count_highmem_pages(void) { return 0; } 195static inline unsigned int count_highmem_pages(void) { return 0; }
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 33e2e4a819f9..523a451b45d3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -39,6 +39,14 @@ static int swsusp_page_is_free(struct page *);
39static void swsusp_set_page_forbidden(struct page *); 39static void swsusp_set_page_forbidden(struct page *);
40static void swsusp_unset_page_forbidden(struct page *); 40static void swsusp_unset_page_forbidden(struct page *);
41 41
42/*
43 * Preferred image size in bytes (tunable via /sys/power/image_size).
44 * When it is set to N, swsusp will do its best to ensure the image
45 * size will not exceed N bytes, but if that is impossible, it will
46 * try to create the smallest image possible.
47 */
48unsigned long image_size = 500 * 1024 * 1024;
49
42/* List of PBEs needed for restoring the pages that were allocated before 50/* List of PBEs needed for restoring the pages that were allocated before
43 * the suspend and included in the suspend image, but have also been 51 * the suspend and included in the suspend image, but have also been
44 * allocated by the "resume" kernel, so their contents cannot be written 52 * allocated by the "resume" kernel, so their contents cannot be written
@@ -840,7 +848,7 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
840 * pages. 848 * pages.
841 */ 849 */
842 850
843unsigned int count_highmem_pages(void) 851static unsigned int count_highmem_pages(void)
844{ 852{
845 struct zone *zone; 853 struct zone *zone;
846 unsigned int n = 0; 854 unsigned int n = 0;
@@ -902,7 +910,7 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
902 * pages. 910 * pages.
903 */ 911 */
904 912
905unsigned int count_data_pages(void) 913static unsigned int count_data_pages(void)
906{ 914{
907 struct zone *zone; 915 struct zone *zone;
908 unsigned long pfn, max_zone_pfn; 916 unsigned long pfn, max_zone_pfn;
@@ -1058,6 +1066,74 @@ void swsusp_free(void)
1058 buffer = NULL; 1066 buffer = NULL;
1059} 1067}
1060 1068
1069/**
1070 * swsusp_shrink_memory - Try to free as much memory as needed
1071 *
1072 * ... but do not OOM-kill anyone
1073 *
1074 * Notice: all userland should be stopped before it is called, or
1075 * livelock is possible.
1076 */
1077
1078#define SHRINK_BITE 10000
1079static inline unsigned long __shrink_memory(long tmp)
1080{
1081 if (tmp > SHRINK_BITE)
1082 tmp = SHRINK_BITE;
1083 return shrink_all_memory(tmp);
1084}
1085
1086int swsusp_shrink_memory(void)
1087{
1088 long tmp;
1089 struct zone *zone;
1090 unsigned long pages = 0;
1091 unsigned int i = 0;
1092 char *p = "-\\|/";
1093 struct timeval start, stop;
1094
1095 printk(KERN_INFO "PM: Shrinking memory... ");
1096 do_gettimeofday(&start);
1097 do {
1098 long size, highmem_size;
1099
1100 highmem_size = count_highmem_pages();
1101 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
1102 tmp = size;
1103 size += highmem_size;
1104 for_each_populated_zone(zone) {
1105 tmp += snapshot_additional_pages(zone);
1106 if (is_highmem(zone)) {
1107 highmem_size -=
1108 zone_page_state(zone, NR_FREE_PAGES);
1109 } else {
1110 tmp -= zone_page_state(zone, NR_FREE_PAGES);
1111 tmp += zone->lowmem_reserve[ZONE_NORMAL];
1112 }
1113 }
1114
1115 if (highmem_size < 0)
1116 highmem_size = 0;
1117
1118 tmp += highmem_size;
1119 if (tmp > 0) {
1120 tmp = __shrink_memory(tmp);
1121 if (!tmp)
1122 return -ENOMEM;
1123 pages += tmp;
1124 } else if (size > image_size / PAGE_SIZE) {
1125 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
1126 pages += tmp;
1127 }
1128 printk("\b%c", p[i++%4]);
1129 } while (tmp > 0);
1130 do_gettimeofday(&stop);
1131 printk("\bdone (%lu pages freed)\n", pages);
1132 swsusp_show_speed(&start, &stop, pages, "Freed");
1133
1134 return 0;
1135}
1136
1061#ifdef CONFIG_HIGHMEM 1137#ifdef CONFIG_HIGHMEM
1062/** 1138/**
1063 * count_pages_for_highmem - compute the number of non-highmem pages 1139 * count_pages_for_highmem - compute the number of non-highmem pages
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
new file mode 100644
index 000000000000..6f10dfc2d3e9
--- /dev/null
+++ b/kernel/power/suspend.c
@@ -0,0 +1,300 @@
1/*
2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 *
8 * This file is released under the GPLv2.
9 */
10
11#include <linux/string.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/console.h>
16#include <linux/cpu.h>
17#include <linux/syscalls.h>
18
19#include "power.h"
20
21const char *const pm_states[PM_SUSPEND_MAX] = {
22 [PM_SUSPEND_STANDBY] = "standby",
23 [PM_SUSPEND_MEM] = "mem",
24};
25
26static struct platform_suspend_ops *suspend_ops;
27
28/**
29 * suspend_set_ops - Set the global suspend method table.
30 * @ops: Pointer to ops structure.
31 */
32void suspend_set_ops(struct platform_suspend_ops *ops)
33{
34 mutex_lock(&pm_mutex);
35 suspend_ops = ops;
36 mutex_unlock(&pm_mutex);
37}
38
39bool valid_state(suspend_state_t state)
40{
41 /*
42 * All states need lowlevel support and need to be valid to the lowlevel
43 * implementation, no valid callback implies that none are valid.
44 */
45 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
46}
47
48/**
49 * suspend_valid_only_mem - generic memory-only valid callback
50 *
51 * Platform drivers that implement mem suspend only and only need
52 * to check for that in their .valid callback can use this instead
53 * of rolling their own .valid callback.
54 */
55int suspend_valid_only_mem(suspend_state_t state)
56{
57 return state == PM_SUSPEND_MEM;
58}
59
60static int suspend_test(int level)
61{
62#ifdef CONFIG_PM_DEBUG
63 if (pm_test_level == level) {
64 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
65 mdelay(5000);
66 return 1;
67 }
68#endif /* !CONFIG_PM_DEBUG */
69 return 0;
70}
71
72/**
73 * suspend_prepare - Do prep work before entering low-power state.
74 *
75 * This is common code that is called for each state that we're entering.
76 * Run suspend notifiers, allocate a console and stop all processes.
77 */
78static int suspend_prepare(void)
79{
80 int error;
81
82 if (!suspend_ops || !suspend_ops->enter)
83 return -EPERM;
84
85 pm_prepare_console();
86
87 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
88 if (error)
89 goto Finish;
90
91 error = usermodehelper_disable();
92 if (error)
93 goto Finish;
94
95 error = suspend_freeze_processes();
96 if (!error)
97 return 0;
98
99 suspend_thaw_processes();
100 usermodehelper_enable();
101 Finish:
102 pm_notifier_call_chain(PM_POST_SUSPEND);
103 pm_restore_console();
104 return error;
105}
106
107/* default implementation */
108void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
109{
110 local_irq_disable();
111}
112
113/* default implementation */
114void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
115{
116 local_irq_enable();
117}
118
119/**
120 * suspend_enter - enter the desired system sleep state.
121 * @state: state to enter
122 *
123 * This function should be called after devices have been suspended.
124 */
125static int suspend_enter(suspend_state_t state)
126{
127 int error;
128
129 if (suspend_ops->prepare) {
130 error = suspend_ops->prepare();
131 if (error)
132 return error;
133 }
134
135 error = dpm_suspend_noirq(PMSG_SUSPEND);
136 if (error) {
137 printk(KERN_ERR "PM: Some devices failed to power down\n");
138 goto Platfrom_finish;
139 }
140
141 if (suspend_ops->prepare_late) {
142 error = suspend_ops->prepare_late();
143 if (error)
144 goto Power_up_devices;
145 }
146
147 if (suspend_test(TEST_PLATFORM))
148 goto Platform_wake;
149
150 error = disable_nonboot_cpus();
151 if (error || suspend_test(TEST_CPUS))
152 goto Enable_cpus;
153
154 arch_suspend_disable_irqs();
155 BUG_ON(!irqs_disabled());
156
157 error = sysdev_suspend(PMSG_SUSPEND);
158 if (!error) {
159 if (!suspend_test(TEST_CORE))
160 error = suspend_ops->enter(state);
161 sysdev_resume();
162 }
163
164 arch_suspend_enable_irqs();
165 BUG_ON(irqs_disabled());
166
167 Enable_cpus:
168 enable_nonboot_cpus();
169
170 Platform_wake:
171 if (suspend_ops->wake)
172 suspend_ops->wake();
173
174 Power_up_devices:
175 dpm_resume_noirq(PMSG_RESUME);
176
177 Platfrom_finish:
178 if (suspend_ops->finish)
179 suspend_ops->finish();
180
181 return error;
182}
183
184/**
185 * suspend_devices_and_enter - suspend devices and enter the desired system
186 * sleep state.
187 * @state: state to enter
188 */
189int suspend_devices_and_enter(suspend_state_t state)
190{
191 int error;
192
193 if (!suspend_ops)
194 return -ENOSYS;
195
196 if (suspend_ops->begin) {
197 error = suspend_ops->begin(state);
198 if (error)
199 goto Close;
200 }
201 suspend_console();
202 suspend_test_start();
203 error = dpm_suspend_start(PMSG_SUSPEND);
204 if (error) {
205 printk(KERN_ERR "PM: Some devices failed to suspend\n");
206 goto Recover_platform;
207 }
208 suspend_test_finish("suspend devices");
209 if (suspend_test(TEST_DEVICES))
210 goto Recover_platform;
211
212 suspend_enter(state);
213
214 Resume_devices:
215 suspend_test_start();
216 dpm_resume_end(PMSG_RESUME);
217 suspend_test_finish("resume devices");
218 resume_console();
219 Close:
220 if (suspend_ops->end)
221 suspend_ops->end();
222 return error;
223
224 Recover_platform:
225 if (suspend_ops->recover)
226 suspend_ops->recover();
227 goto Resume_devices;
228}
229
230/**
231 * suspend_finish - Do final work before exiting suspend sequence.
232 *
233 * Call platform code to clean up, restart processes, and free the
234 * console that we've allocated. This is not called for suspend-to-disk.
235 */
236static void suspend_finish(void)
237{
238 suspend_thaw_processes();
239 usermodehelper_enable();
240 pm_notifier_call_chain(PM_POST_SUSPEND);
241 pm_restore_console();
242}
243
244/**
245 * enter_state - Do common work of entering low-power state.
246 * @state: pm_state structure for state we're entering.
247 *
248 * Make sure we're the only ones trying to enter a sleep state. Fail
249 * if someone has beat us to it, since we don't want anything weird to
250 * happen when we wake up.
251 * Then, do the setup for suspend, enter the state, and cleaup (after
252 * we've woken up).
253 */
254int enter_state(suspend_state_t state)
255{
256 int error;
257
258 if (!valid_state(state))
259 return -ENODEV;
260
261 if (!mutex_trylock(&pm_mutex))
262 return -EBUSY;
263
264 printk(KERN_INFO "PM: Syncing filesystems ... ");
265 sys_sync();
266 printk("done.\n");
267
268 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
269 error = suspend_prepare();
270 if (error)
271 goto Unlock;
272
273 if (suspend_test(TEST_FREEZER))
274 goto Finish;
275
276 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
277 error = suspend_devices_and_enter(state);
278
279 Finish:
280 pr_debug("PM: Finishing wakeup.\n");
281 suspend_finish();
282 Unlock:
283 mutex_unlock(&pm_mutex);
284 return error;
285}
286
287/**
288 * pm_suspend - Externally visible function for suspending system.
289 * @state: Enumerated value of state to enter.
290 *
291 * Determine whether or not value is within range, get state
292 * structure, and enter (above).
293 */
294int pm_suspend(suspend_state_t state)
295{
296 if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
297 return enter_state(state);
298 return -EINVAL;
299}
300EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
new file mode 100644
index 000000000000..17d8bb1acf9c
--- /dev/null
+++ b/kernel/power/suspend_test.c
@@ -0,0 +1,187 @@
1/*
2 * kernel/power/suspend_test.c - Suspend to RAM and standby test facility.
3 *
4 * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz>
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/rtc.h>
11
12#include "power.h"
13
14/*
15 * We test the system suspend code by setting an RTC wakealarm a short
16 * time in the future, then suspending. Suspending the devices won't
17 * normally take long ... some systems only need a few milliseconds.
18 *
19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time.
21 */
22#define TEST_SUSPEND_SECONDS 5
23
24static unsigned long suspend_test_start_time;
25
26void suspend_test_start(void)
27{
28 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
29 * What we want is a hardware counter that will work correctly even
30 * during the irqs-are-off stages of the suspend/resume cycle...
31 */
32 suspend_test_start_time = jiffies;
33}
34
35void suspend_test_finish(const char *label)
36{
37 long nj = jiffies - suspend_test_start_time;
38 unsigned msec;
39
40 msec = jiffies_to_msecs(abs(nj));
41 pr_info("PM: %s took %d.%03d seconds\n", label,
42 msec / 1000, msec % 1000);
43
44 /* Warning on suspend means the RTC alarm period needs to be
45 * larger -- the system was sooo slooowwww to suspend that the
46 * alarm (should have) fired before the system went to sleep!
47 *
48 * Warning on either suspend or resume also means the system
49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk...
51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
53}
54
55/*
56 * To test system suspend, we need a hands-off mechanism to resume the
57 * system. RTCs wake alarms are a common self-contained mechanism.
58 */
59
60static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
61{
62 static char err_readtime[] __initdata =
63 KERN_ERR "PM: can't read %s time, err %d\n";
64 static char err_wakealarm [] __initdata =
65 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
66 static char err_suspend[] __initdata =
67 KERN_ERR "PM: suspend test failed, error %d\n";
68 static char info_test[] __initdata =
69 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
70
71 unsigned long now;
72 struct rtc_wkalrm alm;
73 int status;
74
75 /* this may fail if the RTC hasn't been initialized */
76 status = rtc_read_time(rtc, &alm.time);
77 if (status < 0) {
78 printk(err_readtime, dev_name(&rtc->dev), status);
79 return;
80 }
81 rtc_tm_to_time(&alm.time, &now);
82
83 memset(&alm, 0, sizeof alm);
84 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
85 alm.enabled = true;
86
87 status = rtc_set_alarm(rtc, &alm);
88 if (status < 0) {
89 printk(err_wakealarm, dev_name(&rtc->dev), status);
90 return;
91 }
92
93 if (state == PM_SUSPEND_MEM) {
94 printk(info_test, pm_states[state]);
95 status = pm_suspend(state);
96 if (status == -ENODEV)
97 state = PM_SUSPEND_STANDBY;
98 }
99 if (state == PM_SUSPEND_STANDBY) {
100 printk(info_test, pm_states[state]);
101 status = pm_suspend(state);
102 }
103 if (status < 0)
104 printk(err_suspend, status);
105
106 /* Some platforms can't detect that the alarm triggered the
107 * wakeup, or (accordingly) disable it after it afterwards.
108 * It's supposed to give oneshot behavior; cope.
109 */
110 alm.enabled = false;
111 rtc_set_alarm(rtc, &alm);
112}
113
114static int __init has_wakealarm(struct device *dev, void *name_ptr)
115{
116 struct rtc_device *candidate = to_rtc_device(dev);
117
118 if (!candidate->ops->set_alarm)
119 return 0;
120 if (!device_may_wakeup(candidate->dev.parent))
121 return 0;
122
123 *(const char **)name_ptr = dev_name(dev);
124 return 1;
125}
126
127/*
128 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
129 * at startup time. They're normally disabled, for faster boot and because
130 * we can't know which states really work on this particular system.
131 */
132static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
133
134static char warn_bad_state[] __initdata =
135 KERN_WARNING "PM: can't test '%s' suspend state\n";
136
137static int __init setup_test_suspend(char *value)
138{
139 unsigned i;
140
141 /* "=mem" ==> "mem" */
142 value++;
143 for (i = 0; i < PM_SUSPEND_MAX; i++) {
144 if (!pm_states[i])
145 continue;
146 if (strcmp(pm_states[i], value) != 0)
147 continue;
148 test_state = (__force suspend_state_t) i;
149 return 0;
150 }
151 printk(warn_bad_state, value);
152 return 0;
153}
154__setup("test_suspend", setup_test_suspend);
155
156static int __init test_suspend(void)
157{
158 static char warn_no_rtc[] __initdata =
159 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
160
161 char *pony = NULL;
162 struct rtc_device *rtc = NULL;
163
164 /* PM is initialized by now; is that state testable? */
165 if (test_state == PM_SUSPEND_ON)
166 goto done;
167 if (!valid_state(test_state)) {
168 printk(warn_bad_state, pm_states[test_state]);
169 goto done;
170 }
171
172 /* RTCs have initialized by now too ... can we use one? */
173 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
174 if (pony)
175 rtc = rtc_class_open(pony);
176 if (!rtc) {
177 printk(warn_no_rtc);
178 goto done;
179 }
180
181 /* go for it */
182 test_wakealarm(rtc, test_state);
183 rtc_class_close(rtc);
184done:
185 return 0;
186}
187late_initcall(test_suspend);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 78c35047586d..6a07f4dbf2f8 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -55,14 +55,6 @@
55 55
56#include "power.h" 56#include "power.h"
57 57
58/*
59 * Preferred image size in bytes (tunable via /sys/power/image_size).
60 * When it is set to N, swsusp will do its best to ensure the image
61 * size will not exceed N bytes, but if that is impossible, it will
62 * try to create the smallest image possible.
63 */
64unsigned long image_size = 500 * 1024 * 1024;
65
66int in_suspend __nosavedata = 0; 58int in_suspend __nosavedata = 0;
67 59
68/** 60/**
@@ -194,193 +186,3 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
194 centisecs / 100, centisecs % 100, 186 centisecs / 100, centisecs % 100,
195 kps / 1000, (kps % 1000) / 10); 187 kps / 1000, (kps % 1000) / 10);
196} 188}
197
198/**
199 * swsusp_shrink_memory - Try to free as much memory as needed
200 *
201 * ... but do not OOM-kill anyone
202 *
203 * Notice: all userland should be stopped before it is called, or
204 * livelock is possible.
205 */
206
207#define SHRINK_BITE 10000
208static inline unsigned long __shrink_memory(long tmp)
209{
210 if (tmp > SHRINK_BITE)
211 tmp = SHRINK_BITE;
212 return shrink_all_memory(tmp);
213}
214
215int swsusp_shrink_memory(void)
216{
217 long tmp;
218 struct zone *zone;
219 unsigned long pages = 0;
220 unsigned int i = 0;
221 char *p = "-\\|/";
222 struct timeval start, stop;
223
224 printk(KERN_INFO "PM: Shrinking memory... ");
225 do_gettimeofday(&start);
226 do {
227 long size, highmem_size;
228
229 highmem_size = count_highmem_pages();
230 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
231 tmp = size;
232 size += highmem_size;
233 for_each_populated_zone(zone) {
234 tmp += snapshot_additional_pages(zone);
235 if (is_highmem(zone)) {
236 highmem_size -=
237 zone_page_state(zone, NR_FREE_PAGES);
238 } else {
239 tmp -= zone_page_state(zone, NR_FREE_PAGES);
240 tmp += zone->lowmem_reserve[ZONE_NORMAL];
241 }
242 }
243
244 if (highmem_size < 0)
245 highmem_size = 0;
246
247 tmp += highmem_size;
248 if (tmp > 0) {
249 tmp = __shrink_memory(tmp);
250 if (!tmp)
251 return -ENOMEM;
252 pages += tmp;
253 } else if (size > image_size / PAGE_SIZE) {
254 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
255 pages += tmp;
256 }
257 printk("\b%c", p[i++%4]);
258 } while (tmp > 0);
259 do_gettimeofday(&stop);
260 printk("\bdone (%lu pages freed)\n", pages);
261 swsusp_show_speed(&start, &stop, pages, "Freed");
262
263 return 0;
264}
265
266/*
267 * Platforms, like ACPI, may want us to save some memory used by them during
268 * hibernation and to restore the contents of this memory during the subsequent
269 * resume. The code below implements a mechanism allowing us to do that.
270 */
271
272struct nvs_page {
273 unsigned long phys_start;
274 unsigned int size;
275 void *kaddr;
276 void *data;
277 struct list_head node;
278};
279
280static LIST_HEAD(nvs_list);
281
282/**
283 * hibernate_nvs_register - register platform NVS memory region to save
284 * @start - physical address of the region
285 * @size - size of the region
286 *
287 * The NVS region need not be page-aligned (both ends) and we arrange
288 * things so that the data from page-aligned addresses in this region will
289 * be copied into separate RAM pages.
290 */
291int hibernate_nvs_register(unsigned long start, unsigned long size)
292{
293 struct nvs_page *entry, *next;
294
295 while (size > 0) {
296 unsigned int nr_bytes;
297
298 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
299 if (!entry)
300 goto Error;
301
302 list_add_tail(&entry->node, &nvs_list);
303 entry->phys_start = start;
304 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
305 entry->size = (size < nr_bytes) ? size : nr_bytes;
306
307 start += entry->size;
308 size -= entry->size;
309 }
310 return 0;
311
312 Error:
313 list_for_each_entry_safe(entry, next, &nvs_list, node) {
314 list_del(&entry->node);
315 kfree(entry);
316 }
317 return -ENOMEM;
318}
319
320/**
321 * hibernate_nvs_free - free data pages allocated for saving NVS regions
322 */
323void hibernate_nvs_free(void)
324{
325 struct nvs_page *entry;
326
327 list_for_each_entry(entry, &nvs_list, node)
328 if (entry->data) {
329 free_page((unsigned long)entry->data);
330 entry->data = NULL;
331 if (entry->kaddr) {
332 iounmap(entry->kaddr);
333 entry->kaddr = NULL;
334 }
335 }
336}
337
338/**
339 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
340 */
341int hibernate_nvs_alloc(void)
342{
343 struct nvs_page *entry;
344
345 list_for_each_entry(entry, &nvs_list, node) {
346 entry->data = (void *)__get_free_page(GFP_KERNEL);
347 if (!entry->data) {
348 hibernate_nvs_free();
349 return -ENOMEM;
350 }
351 }
352 return 0;
353}
354
355/**
356 * hibernate_nvs_save - save NVS memory regions
357 */
358void hibernate_nvs_save(void)
359{
360 struct nvs_page *entry;
361
362 printk(KERN_INFO "PM: Saving platform NVS memory\n");
363
364 list_for_each_entry(entry, &nvs_list, node)
365 if (entry->data) {
366 entry->kaddr = ioremap(entry->phys_start, entry->size);
367 memcpy(entry->data, entry->kaddr, entry->size);
368 }
369}
370
371/**
372 * hibernate_nvs_restore - restore NVS memory regions
373 *
374 * This function is going to be called with interrupts disabled, so it
375 * cannot iounmap the virtual addresses used to access the NVS region.
376 */
377void hibernate_nvs_restore(void)
378{
379 struct nvs_page *entry;
380
381 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
382
383 list_for_each_entry(entry, &nvs_list, node)
384 if (entry->data)
385 memcpy(entry->kaddr, entry->data, entry->size);
386}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d254306562cd..95c08a8cc2ba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2056,7 +2056,7 @@ unsigned long global_lru_pages(void)
2056 + global_page_state(NR_INACTIVE_FILE); 2056 + global_page_state(NR_INACTIVE_FILE);
2057} 2057}
2058 2058
2059#ifdef CONFIG_PM 2059#ifdef CONFIG_HIBERNATION
2060/* 2060/*
2061 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages 2061 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
2062 * from LRU lists system-wide, for given pass and priority. 2062 * from LRU lists system-wide, for given pass and priority.
@@ -2196,7 +2196,7 @@ out:
2196 2196
2197 return sc.nr_reclaimed; 2197 return sc.nr_reclaimed;
2198} 2198}
2199#endif 2199#endif /* CONFIG_HIBERNATION */
2200 2200
2201/* It's optimal to keep kswapds on the same CPUs as their memory, but 2201/* It's optimal to keep kswapds on the same CPUs as their memory, but
2202 not required for correctness. So if the last cpu in a node goes 2202 not required for correctness. So if the last cpu in a node goes