aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorVegard Nossum <vegard.nossum@gmail.com>2009-06-15 09:50:49 -0400
committerVegard Nossum <vegard.nossum@gmail.com>2009-06-15 09:50:49 -0400
commit722f2a6c87f34ee0fd0130a8cf45f81e0705594a (patch)
tree50b054df34d2731eb0ba0cf1a6c27e43e7eed428 /kernel
parent7a0aeb14e18ad59394bd9bbc6e57fb345819e748 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
Merge commit 'linus/master' into HEAD
Conflicts: MAINTAINERS Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/kallsyms.c134
-rw-r--r--kernel/kexec.c14
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/params.c46
-rw-r--r--kernel/perf_counter.c95
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/Makefile5
-rw-r--r--kernel/power/hibernate.c (renamed from kernel/power/disk.c)34
-rw-r--r--kernel/power/hibernate_nvs.c135
-rw-r--r--kernel/power/main.c521
-rw-r--r--kernel/power/power.h25
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/power/snapshot.c80
-rw-r--r--kernel/power/suspend.c300
-rw-r--r--kernel/power/suspend_test.c187
-rw-r--r--kernel/power/swsusp.c198
-rw-r--r--kernel/rtmutex.c2
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/timer.c1
20 files changed, 953 insertions, 834 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 104578541230..065205bdd920 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -45,7 +45,7 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
45#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 45#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
46static void __init init_irq_default_affinity(void) 46static void __init init_irq_default_affinity(void)
47{ 47{
48 alloc_bootmem_cpumask_var(&irq_default_affinity); 48 alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
49 cpumask_setall(irq_default_affinity); 49 cpumask_setall(irq_default_affinity);
50} 50}
51#else 51#else
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 374faf9bfdc7..3a29dbe7898e 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -30,12 +30,16 @@
30#define all_var 0 30#define all_var 0
31#endif 31#endif
32 32
33/* These will be re-linked against their real values during the second link stage */ 33/*
34 * These will be re-linked against their real values
35 * during the second link stage.
36 */
34extern const unsigned long kallsyms_addresses[] __attribute__((weak)); 37extern const unsigned long kallsyms_addresses[] __attribute__((weak));
35extern const u8 kallsyms_names[] __attribute__((weak)); 38extern const u8 kallsyms_names[] __attribute__((weak));
36 39
37/* tell the compiler that the count isn't in the small data section if the arch 40/*
38 * has one (eg: FRV) 41 * Tell the compiler that the count isn't in the small data section if the arch
42 * has one (eg: FRV).
39 */ 43 */
40extern const unsigned long kallsyms_num_syms 44extern const unsigned long kallsyms_num_syms
41__attribute__((weak, section(".rodata"))); 45__attribute__((weak, section(".rodata")));
@@ -75,31 +79,37 @@ static int is_ksym_addr(unsigned long addr)
75 return is_kernel_text(addr) || is_kernel_inittext(addr); 79 return is_kernel_text(addr) || is_kernel_inittext(addr);
76} 80}
77 81
78/* expand a compressed symbol data into the resulting uncompressed string, 82/*
79 given the offset to where the symbol is in the compressed stream */ 83 * Expand a compressed symbol data into the resulting uncompressed string,
84 * given the offset to where the symbol is in the compressed stream.
85 */
80static unsigned int kallsyms_expand_symbol(unsigned int off, char *result) 86static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
81{ 87{
82 int len, skipped_first = 0; 88 int len, skipped_first = 0;
83 const u8 *tptr, *data; 89 const u8 *tptr, *data;
84 90
85 /* get the compressed symbol length from the first symbol byte */ 91 /* Get the compressed symbol length from the first symbol byte. */
86 data = &kallsyms_names[off]; 92 data = &kallsyms_names[off];
87 len = *data; 93 len = *data;
88 data++; 94 data++;
89 95
90 /* update the offset to return the offset for the next symbol on 96 /*
91 * the compressed stream */ 97 * Update the offset to return the offset for the next symbol on
98 * the compressed stream.
99 */
92 off += len + 1; 100 off += len + 1;
93 101
94 /* for every byte on the compressed symbol data, copy the table 102 /*
95 entry for that byte */ 103 * For every byte on the compressed symbol data, copy the table
96 while(len) { 104 * entry for that byte.
97 tptr = &kallsyms_token_table[ kallsyms_token_index[*data] ]; 105 */
106 while (len) {
107 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
98 data++; 108 data++;
99 len--; 109 len--;
100 110
101 while (*tptr) { 111 while (*tptr) {
102 if(skipped_first) { 112 if (skipped_first) {
103 *result = *tptr; 113 *result = *tptr;
104 result++; 114 result++;
105 } else 115 } else
@@ -110,36 +120,46 @@ static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
110 120
111 *result = '\0'; 121 *result = '\0';
112 122
113 /* return to offset to the next symbol */ 123 /* Return to offset to the next symbol. */
114 return off; 124 return off;
115} 125}
116 126
117/* get symbol type information. This is encoded as a single char at the 127/*
118 * begining of the symbol name */ 128 * Get symbol type information. This is encoded as a single char at the
129 * beginning of the symbol name.
130 */
119static char kallsyms_get_symbol_type(unsigned int off) 131static char kallsyms_get_symbol_type(unsigned int off)
120{ 132{
121 /* get just the first code, look it up in the token table, and return the 133 /*
122 * first char from this token */ 134 * Get just the first code, look it up in the token table,
123 return kallsyms_token_table[ kallsyms_token_index[ kallsyms_names[off+1] ] ]; 135 * and return the first char from this token.
136 */
137 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
124} 138}
125 139
126 140
127/* find the offset on the compressed stream given and index in the 141/*
128 * kallsyms array */ 142 * Find the offset on the compressed stream given and index in the
143 * kallsyms array.
144 */
129static unsigned int get_symbol_offset(unsigned long pos) 145static unsigned int get_symbol_offset(unsigned long pos)
130{ 146{
131 const u8 *name; 147 const u8 *name;
132 int i; 148 int i;
133 149
134 /* use the closest marker we have. We have markers every 256 positions, 150 /*
135 * so that should be close enough */ 151 * Use the closest marker we have. We have markers every 256 positions,
136 name = &kallsyms_names[ kallsyms_markers[pos>>8] ]; 152 * so that should be close enough.
153 */
154 name = &kallsyms_names[kallsyms_markers[pos >> 8]];
137 155
138 /* sequentially scan all the symbols up to the point we're searching for. 156 /*
139 * Every symbol is stored in a [<len>][<len> bytes of data] format, so we 157 * Sequentially scan all the symbols up to the point we're searching
140 * just need to add the len to the current pointer for every symbol we 158 * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
141 * wish to skip */ 159 * so we just need to add the len to the current pointer for every
142 for(i = 0; i < (pos&0xFF); i++) 160 * symbol we wish to skip.
161 */
162 for (i = 0; i < (pos & 0xFF); i++)
143 name = name + (*name) + 1; 163 name = name + (*name) + 1;
144 164
145 return name - kallsyms_names; 165 return name - kallsyms_names;
@@ -190,7 +210,7 @@ static unsigned long get_symbol_pos(unsigned long addr,
190 /* This kernel should never had been booted. */ 210 /* This kernel should never had been booted. */
191 BUG_ON(!kallsyms_addresses); 211 BUG_ON(!kallsyms_addresses);
192 212
193 /* do a binary search on the sorted kallsyms_addresses array */ 213 /* Do a binary search on the sorted kallsyms_addresses array. */
194 low = 0; 214 low = 0;
195 high = kallsyms_num_syms; 215 high = kallsyms_num_syms;
196 216
@@ -203,15 +223,15 @@ static unsigned long get_symbol_pos(unsigned long addr,
203 } 223 }
204 224
205 /* 225 /*
206 * search for the first aliased symbol. Aliased 226 * Search for the first aliased symbol. Aliased
207 * symbols are symbols with the same address 227 * symbols are symbols with the same address.
208 */ 228 */
209 while (low && kallsyms_addresses[low-1] == kallsyms_addresses[low]) 229 while (low && kallsyms_addresses[low-1] == kallsyms_addresses[low])
210 --low; 230 --low;
211 231
212 symbol_start = kallsyms_addresses[low]; 232 symbol_start = kallsyms_addresses[low];
213 233
214 /* Search for next non-aliased symbol */ 234 /* Search for next non-aliased symbol. */
215 for (i = low + 1; i < kallsyms_num_syms; i++) { 235 for (i = low + 1; i < kallsyms_num_syms; i++) {
216 if (kallsyms_addresses[i] > symbol_start) { 236 if (kallsyms_addresses[i] > symbol_start) {
217 symbol_end = kallsyms_addresses[i]; 237 symbol_end = kallsyms_addresses[i];
@@ -219,7 +239,7 @@ static unsigned long get_symbol_pos(unsigned long addr,
219 } 239 }
220 } 240 }
221 241
222 /* if we found no next symbol, we use the end of the section */ 242 /* If we found no next symbol, we use the end of the section. */
223 if (!symbol_end) { 243 if (!symbol_end) {
224 if (is_kernel_inittext(addr)) 244 if (is_kernel_inittext(addr))
225 symbol_end = (unsigned long)_einittext; 245 symbol_end = (unsigned long)_einittext;
@@ -252,10 +272,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
252 272
253/* 273/*
254 * Lookup an address 274 * Lookup an address
255 * - modname is set to NULL if it's in the kernel 275 * - modname is set to NULL if it's in the kernel.
256 * - we guarantee that the returned name is valid until we reschedule even if 276 * - We guarantee that the returned name is valid until we reschedule even if.
257 * it resides in a module 277 * It resides in a module.
258 * - we also guarantee that modname will be valid until rescheduled 278 * - We also guarantee that modname will be valid until rescheduled.
259 */ 279 */
260const char *kallsyms_lookup(unsigned long addr, 280const char *kallsyms_lookup(unsigned long addr,
261 unsigned long *symbolsize, 281 unsigned long *symbolsize,
@@ -276,7 +296,7 @@ const char *kallsyms_lookup(unsigned long addr,
276 return namebuf; 296 return namebuf;
277 } 297 }
278 298
279 /* see if it's in a module */ 299 /* See if it's in a module. */
280 return module_address_lookup(addr, symbolsize, offset, modname, 300 return module_address_lookup(addr, symbolsize, offset, modname,
281 namebuf); 301 namebuf);
282} 302}
@@ -294,7 +314,7 @@ int lookup_symbol_name(unsigned long addr, char *symname)
294 kallsyms_expand_symbol(get_symbol_offset(pos), symname); 314 kallsyms_expand_symbol(get_symbol_offset(pos), symname);
295 return 0; 315 return 0;
296 } 316 }
297 /* see if it's in a module */ 317 /* See if it's in a module. */
298 return lookup_module_symbol_name(addr, symname); 318 return lookup_module_symbol_name(addr, symname);
299} 319}
300 320
@@ -313,7 +333,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
313 modname[0] = '\0'; 333 modname[0] = '\0';
314 return 0; 334 return 0;
315 } 335 }
316 /* see if it's in a module */ 336 /* See if it's in a module. */
317 return lookup_module_symbol_attrs(addr, size, offset, modname, name); 337 return lookup_module_symbol_attrs(addr, size, offset, modname, name);
318} 338}
319 339
@@ -342,6 +362,7 @@ int sprint_symbol(char *buffer, unsigned long address)
342 362
343 return len; 363 return len;
344} 364}
365EXPORT_SYMBOL_GPL(sprint_symbol);
345 366
346/* Look up a kernel symbol and print it to the kernel messages. */ 367/* Look up a kernel symbol and print it to the kernel messages. */
347void __print_symbol(const char *fmt, unsigned long address) 368void __print_symbol(const char *fmt, unsigned long address)
@@ -352,13 +373,13 @@ void __print_symbol(const char *fmt, unsigned long address)
352 373
353 printk(fmt, buffer); 374 printk(fmt, buffer);
354} 375}
376EXPORT_SYMBOL(__print_symbol);
355 377
356/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */ 378/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
357struct kallsym_iter 379struct kallsym_iter {
358{
359 loff_t pos; 380 loff_t pos;
360 unsigned long value; 381 unsigned long value;
361 unsigned int nameoff; /* If iterating in core kernel symbols */ 382 unsigned int nameoff; /* If iterating in core kernel symbols. */
362 char type; 383 char type;
363 char name[KSYM_NAME_LEN]; 384 char name[KSYM_NAME_LEN];
364 char module_name[MODULE_NAME_LEN]; 385 char module_name[MODULE_NAME_LEN];
@@ -404,7 +425,7 @@ static int update_iter(struct kallsym_iter *iter, loff_t pos)
404 iter->pos = pos; 425 iter->pos = pos;
405 return get_ksymbol_mod(iter); 426 return get_ksymbol_mod(iter);
406 } 427 }
407 428
408 /* If we're not on the desired position, reset to new position. */ 429 /* If we're not on the desired position, reset to new position. */
409 if (pos != iter->pos) 430 if (pos != iter->pos)
410 reset_iter(iter, pos); 431 reset_iter(iter, pos);
@@ -439,23 +460,25 @@ static int s_show(struct seq_file *m, void *p)
439{ 460{
440 struct kallsym_iter *iter = m->private; 461 struct kallsym_iter *iter = m->private;
441 462
442 /* Some debugging symbols have no name. Ignore them. */ 463 /* Some debugging symbols have no name. Ignore them. */
443 if (!iter->name[0]) 464 if (!iter->name[0])
444 return 0; 465 return 0;
445 466
446 if (iter->module_name[0]) { 467 if (iter->module_name[0]) {
447 char type; 468 char type;
448 469
449 /* Label it "global" if it is exported, 470 /*
450 * "local" if not exported. */ 471 * Label it "global" if it is exported,
472 * "local" if not exported.
473 */
451 type = iter->exported ? toupper(iter->type) : 474 type = iter->exported ? toupper(iter->type) :
452 tolower(iter->type); 475 tolower(iter->type);
453 seq_printf(m, "%0*lx %c %s\t[%s]\n", 476 seq_printf(m, "%0*lx %c %s\t[%s]\n",
454 (int)(2*sizeof(void*)), 477 (int)(2 * sizeof(void *)),
455 iter->value, type, iter->name, iter->module_name); 478 iter->value, type, iter->name, iter->module_name);
456 } else 479 } else
457 seq_printf(m, "%0*lx %c %s\n", 480 seq_printf(m, "%0*lx %c %s\n",
458 (int)(2*sizeof(void*)), 481 (int)(2 * sizeof(void *)),
459 iter->value, iter->type, iter->name); 482 iter->value, iter->type, iter->name);
460 return 0; 483 return 0;
461} 484}
@@ -469,9 +492,11 @@ static const struct seq_operations kallsyms_op = {
469 492
470static int kallsyms_open(struct inode *inode, struct file *file) 493static int kallsyms_open(struct inode *inode, struct file *file)
471{ 494{
472 /* We keep iterator in m->private, since normal case is to 495 /*
496 * We keep iterator in m->private, since normal case is to
473 * s_start from where we left off, so we avoid doing 497 * s_start from where we left off, so we avoid doing
474 * using get_symbol_offset for every symbol */ 498 * using get_symbol_offset for every symbol.
499 */
475 struct kallsym_iter *iter; 500 struct kallsym_iter *iter;
476 int ret; 501 int ret;
477 502
@@ -500,7 +525,4 @@ static int __init kallsyms_init(void)
500 proc_create("kallsyms", 0444, NULL, &kallsyms_operations); 525 proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
501 return 0; 526 return 0;
502} 527}
503__initcall(kallsyms_init); 528device_initcall(kallsyms_init);
504
505EXPORT_SYMBOL(__print_symbol);
506EXPORT_SYMBOL_GPL(sprint_symbol);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index e4983770913b..ae1c35201cc8 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1448,17 +1448,17 @@ int kernel_kexec(void)
1448 goto Restore_console; 1448 goto Restore_console;
1449 } 1449 }
1450 suspend_console(); 1450 suspend_console();
1451 error = device_suspend(PMSG_FREEZE); 1451 error = dpm_suspend_start(PMSG_FREEZE);
1452 if (error) 1452 if (error)
1453 goto Resume_console; 1453 goto Resume_console;
1454 /* At this point, device_suspend() has been called, 1454 /* At this point, dpm_suspend_start() has been called,
1455 * but *not* device_power_down(). We *must* 1455 * but *not* dpm_suspend_noirq(). We *must* call
1456 * device_power_down() now. Otherwise, drivers for 1456 * dpm_suspend_noirq() now. Otherwise, drivers for
1457 * some devices (e.g. interrupt controllers) become 1457 * some devices (e.g. interrupt controllers) become
1458 * desynchronized with the actual state of the 1458 * desynchronized with the actual state of the
1459 * hardware at resume time, and evil weirdness ensues. 1459 * hardware at resume time, and evil weirdness ensues.
1460 */ 1460 */
1461 error = device_power_down(PMSG_FREEZE); 1461 error = dpm_suspend_noirq(PMSG_FREEZE);
1462 if (error) 1462 if (error)
1463 goto Resume_devices; 1463 goto Resume_devices;
1464 error = disable_nonboot_cpus(); 1464 error = disable_nonboot_cpus();
@@ -1486,9 +1486,9 @@ int kernel_kexec(void)
1486 local_irq_enable(); 1486 local_irq_enable();
1487 Enable_cpus: 1487 Enable_cpus:
1488 enable_nonboot_cpus(); 1488 enable_nonboot_cpus();
1489 device_power_up(PMSG_RESTORE); 1489 dpm_resume_noirq(PMSG_RESTORE);
1490 Resume_devices: 1490 Resume_devices:
1491 device_resume(PMSG_RESTORE); 1491 dpm_resume_end(PMSG_RESTORE);
1492 Resume_console: 1492 Resume_console:
1493 resume_console(); 1493 resume_console();
1494 thaw_processes(); 1494 thaw_processes();
diff --git a/kernel/module.c b/kernel/module.c
index 35f7de00bf0d..e4ab36ce7672 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2455,6 +2455,7 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
2455 mutex_lock(&module_mutex); 2455 mutex_lock(&module_mutex);
2456 /* Drop initial reference. */ 2456 /* Drop initial reference. */
2457 module_put(mod); 2457 module_put(mod);
2458 trim_init_extable(mod);
2458 module_free(mod, mod->module_init); 2459 module_free(mod, mod->module_init);
2459 mod->module_init = NULL; 2460 mod->module_init = NULL;
2460 mod->init_size = 0; 2461 mod->init_size = 0;
diff --git a/kernel/params.c b/kernel/params.c
index de273ec85bd2..7f6912ced2ba 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -24,9 +24,6 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26 26
27/* We abuse the high bits of "perm" to record whether we kmalloc'ed. */
28#define KPARAM_KMALLOCED 0x80000000
29
30#if 0 27#if 0
31#define DEBUGP printk 28#define DEBUGP printk
32#else 29#else
@@ -220,13 +217,13 @@ int param_set_charp(const char *val, struct kernel_param *kp)
220 return -ENOSPC; 217 return -ENOSPC;
221 } 218 }
222 219
223 if (kp->perm & KPARAM_KMALLOCED) 220 if (kp->flags & KPARAM_KMALLOCED)
224 kfree(*(char **)kp->arg); 221 kfree(*(char **)kp->arg);
225 222
226 /* This is a hack. We can't need to strdup in early boot, and we 223 /* This is a hack. We can't need to strdup in early boot, and we
227 * don't need to; this mangled commandline is preserved. */ 224 * don't need to; this mangled commandline is preserved. */
228 if (slab_is_available()) { 225 if (slab_is_available()) {
229 kp->perm |= KPARAM_KMALLOCED; 226 kp->flags |= KPARAM_KMALLOCED;
230 *(char **)kp->arg = kstrdup(val, GFP_KERNEL); 227 *(char **)kp->arg = kstrdup(val, GFP_KERNEL);
231 if (!kp->arg) 228 if (!kp->arg)
232 return -ENOMEM; 229 return -ENOMEM;
@@ -241,44 +238,63 @@ int param_get_charp(char *buffer, struct kernel_param *kp)
241 return sprintf(buffer, "%s", *((char **)kp->arg)); 238 return sprintf(buffer, "%s", *((char **)kp->arg));
242} 239}
243 240
241/* Actually could be a bool or an int, for historical reasons. */
244int param_set_bool(const char *val, struct kernel_param *kp) 242int param_set_bool(const char *val, struct kernel_param *kp)
245{ 243{
244 bool v;
245
246 /* No equals means "set"... */ 246 /* No equals means "set"... */
247 if (!val) val = "1"; 247 if (!val) val = "1";
248 248
249 /* One of =[yYnN01] */ 249 /* One of =[yYnN01] */
250 switch (val[0]) { 250 switch (val[0]) {
251 case 'y': case 'Y': case '1': 251 case 'y': case 'Y': case '1':
252 *(int *)kp->arg = 1; 252 v = true;
253 return 0; 253 break;
254 case 'n': case 'N': case '0': 254 case 'n': case 'N': case '0':
255 *(int *)kp->arg = 0; 255 v = false;
256 return 0; 256 break;
257 default:
258 return -EINVAL;
257 } 259 }
258 return -EINVAL; 260
261 if (kp->flags & KPARAM_ISBOOL)
262 *(bool *)kp->arg = v;
263 else
264 *(int *)kp->arg = v;
265 return 0;
259} 266}
260 267
261int param_get_bool(char *buffer, struct kernel_param *kp) 268int param_get_bool(char *buffer, struct kernel_param *kp)
262{ 269{
270 bool val;
271 if (kp->flags & KPARAM_ISBOOL)
272 val = *(bool *)kp->arg;
273 else
274 val = *(int *)kp->arg;
275
263 /* Y and N chosen as being relatively non-coder friendly */ 276 /* Y and N chosen as being relatively non-coder friendly */
264 return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'Y' : 'N'); 277 return sprintf(buffer, "%c", val ? 'Y' : 'N');
265} 278}
266 279
280/* This one must be bool. */
267int param_set_invbool(const char *val, struct kernel_param *kp) 281int param_set_invbool(const char *val, struct kernel_param *kp)
268{ 282{
269 int boolval, ret; 283 int ret;
284 bool boolval;
270 struct kernel_param dummy; 285 struct kernel_param dummy;
271 286
272 dummy.arg = &boolval; 287 dummy.arg = &boolval;
288 dummy.flags = KPARAM_ISBOOL;
273 ret = param_set_bool(val, &dummy); 289 ret = param_set_bool(val, &dummy);
274 if (ret == 0) 290 if (ret == 0)
275 *(int *)kp->arg = !boolval; 291 *(bool *)kp->arg = !boolval;
276 return ret; 292 return ret;
277} 293}
278 294
279int param_get_invbool(char *buffer, struct kernel_param *kp) 295int param_get_invbool(char *buffer, struct kernel_param *kp)
280{ 296{
281 return sprintf(buffer, "%c", (*(int *)kp->arg) ? 'N' : 'Y'); 297 return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y');
282} 298}
283 299
284/* We break the rule and mangle the string. */ 300/* We break the rule and mangle the string. */
@@ -591,7 +607,7 @@ void destroy_params(const struct kernel_param *params, unsigned num)
591 unsigned int i; 607 unsigned int i;
592 608
593 for (i = 0; i < num; i++) 609 for (i = 0; i < num; i++)
594 if (params[i].perm & KPARAM_KMALLOCED) 610 if (params[i].flags & KPARAM_KMALLOCED)
595 kfree(*(char **)params[i].arg); 611 kfree(*(char **)params[i].arg);
596} 612}
597 613
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index ef5d8a5b2453..29b685f551aa 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -3570,12 +3570,8 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3570 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 3570 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3571 goto done; 3571 goto done;
3572 3572
3573 if (attr->type == PERF_TYPE_RAW) {
3574 pmu = hw_perf_counter_init(counter);
3575 goto done;
3576 }
3577
3578 switch (attr->type) { 3573 switch (attr->type) {
3574 case PERF_TYPE_RAW:
3579 case PERF_TYPE_HARDWARE: 3575 case PERF_TYPE_HARDWARE:
3580 case PERF_TYPE_HW_CACHE: 3576 case PERF_TYPE_HW_CACHE:
3581 pmu = hw_perf_counter_init(counter); 3577 pmu = hw_perf_counter_init(counter);
@@ -3588,6 +3584,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3588 case PERF_TYPE_TRACEPOINT: 3584 case PERF_TYPE_TRACEPOINT:
3589 pmu = tp_perf_counter_init(counter); 3585 pmu = tp_perf_counter_init(counter);
3590 break; 3586 break;
3587
3588 default:
3589 break;
3591 } 3590 }
3592done: 3591done:
3593 err = 0; 3592 err = 0;
@@ -3614,6 +3613,85 @@ done:
3614 return counter; 3613 return counter;
3615} 3614}
3616 3615
3616static int perf_copy_attr(struct perf_counter_attr __user *uattr,
3617 struct perf_counter_attr *attr)
3618{
3619 int ret;
3620 u32 size;
3621
3622 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
3623 return -EFAULT;
3624
3625 /*
3626 * zero the full structure, so that a short copy will be nice.
3627 */
3628 memset(attr, 0, sizeof(*attr));
3629
3630 ret = get_user(size, &uattr->size);
3631 if (ret)
3632 return ret;
3633
3634 if (size > PAGE_SIZE) /* silly large */
3635 goto err_size;
3636
3637 if (!size) /* abi compat */
3638 size = PERF_ATTR_SIZE_VER0;
3639
3640 if (size < PERF_ATTR_SIZE_VER0)
3641 goto err_size;
3642
3643 /*
3644 * If we're handed a bigger struct than we know of,
3645 * ensure all the unknown bits are 0.
3646 */
3647 if (size > sizeof(*attr)) {
3648 unsigned long val;
3649 unsigned long __user *addr;
3650 unsigned long __user *end;
3651
3652 addr = PTR_ALIGN((void __user *)uattr + sizeof(*attr),
3653 sizeof(unsigned long));
3654 end = PTR_ALIGN((void __user *)uattr + size,
3655 sizeof(unsigned long));
3656
3657 for (; addr < end; addr += sizeof(unsigned long)) {
3658 ret = get_user(val, addr);
3659 if (ret)
3660 return ret;
3661 if (val)
3662 goto err_size;
3663 }
3664 }
3665
3666 ret = copy_from_user(attr, uattr, size);
3667 if (ret)
3668 return -EFAULT;
3669
3670 /*
3671 * If the type exists, the corresponding creation will verify
3672 * the attr->config.
3673 */
3674 if (attr->type >= PERF_TYPE_MAX)
3675 return -EINVAL;
3676
3677 if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3)
3678 return -EINVAL;
3679
3680 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
3681 return -EINVAL;
3682
3683 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
3684 return -EINVAL;
3685
3686out:
3687 return ret;
3688
3689err_size:
3690 put_user(sizeof(*attr), &uattr->size);
3691 ret = -E2BIG;
3692 goto out;
3693}
3694
3617/** 3695/**
3618 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu 3696 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3619 * 3697 *
@@ -3623,7 +3701,7 @@ done:
3623 * @group_fd: group leader counter fd 3701 * @group_fd: group leader counter fd
3624 */ 3702 */
3625SYSCALL_DEFINE5(perf_counter_open, 3703SYSCALL_DEFINE5(perf_counter_open,
3626 const struct perf_counter_attr __user *, attr_uptr, 3704 struct perf_counter_attr __user *, attr_uptr,
3627 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) 3705 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3628{ 3706{
3629 struct perf_counter *counter, *group_leader; 3707 struct perf_counter *counter, *group_leader;
@@ -3639,8 +3717,9 @@ SYSCALL_DEFINE5(perf_counter_open,
3639 if (flags) 3717 if (flags)
3640 return -EINVAL; 3718 return -EINVAL;
3641 3719
3642 if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0) 3720 ret = perf_copy_attr(attr_uptr, &attr);
3643 return -EFAULT; 3721 if (ret)
3722 return ret;
3644 3723
3645 if (!attr.exclude_kernel) { 3724 if (!attr.exclude_kernel) {
3646 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) 3725 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 23bd4daeb96b..72067cbdb37f 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -116,9 +116,13 @@ config SUSPEND_FREEZER
116 116
117 Turning OFF this setting is NOT recommended! If in doubt, say Y. 117 Turning OFF this setting is NOT recommended! If in doubt, say Y.
118 118
119config HIBERNATION_NVS
120 bool
121
119config HIBERNATION 122config HIBERNATION
120 bool "Hibernation (aka 'suspend to disk')" 123 bool "Hibernation (aka 'suspend to disk')"
121 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE 124 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE
125 select HIBERNATION_NVS if HAS_IOMEM
122 ---help--- 126 ---help---
123 Enable the suspend to disk (STD) functionality, which is usually 127 Enable the suspend to disk (STD) functionality, which is usually
124 called "hibernation" in user interfaces. STD checkpoints the 128 called "hibernation" in user interfaces. STD checkpoints the
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 720ea4f781bd..c3b81c30e5d5 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -6,6 +6,9 @@ endif
6obj-$(CONFIG_PM) += main.o 6obj-$(CONFIG_PM) += main.o
7obj-$(CONFIG_PM_SLEEP) += console.o 7obj-$(CONFIG_PM_SLEEP) += console.o
8obj-$(CONFIG_FREEZER) += process.o 8obj-$(CONFIG_FREEZER) += process.o
9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o 9obj-$(CONFIG_SUSPEND) += suspend.o
10obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
11obj-$(CONFIG_HIBERNATION) += swsusp.o hibernate.o snapshot.o swap.o user.o
12obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
10 13
11obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 14obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/hibernate.c
index 5cb080e7eebd..81d2e7464893 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/hibernate.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * kernel/power/disk.c - Suspend-to-disk support. 2 * kernel/power/hibernate.c - Hibernation (a.k.a suspend-to-disk) support.
3 * 3 *
4 * Copyright (c) 2003 Patrick Mochel 4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab 5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2004 Pavel Machek <pavel@suse.cz> 6 * Copyright (c) 2004 Pavel Machek <pavel@suse.cz>
7 * Copyright (c) 2009 Rafael J. Wysocki, Novell Inc.
7 * 8 *
8 * This file is released under the GPLv2. 9 * This file is released under the GPLv2.
9 *
10 */ 10 */
11 11
12#include <linux/suspend.h> 12#include <linux/suspend.h>
@@ -215,13 +215,13 @@ static int create_image(int platform_mode)
215 if (error) 215 if (error)
216 return error; 216 return error;
217 217
218 /* At this point, device_suspend() has been called, but *not* 218 /* At this point, dpm_suspend_start() has been called, but *not*
219 * device_power_down(). We *must* call device_power_down() now. 219 * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
220 * Otherwise, drivers for some devices (e.g. interrupt controllers) 220 * Otherwise, drivers for some devices (e.g. interrupt controllers)
221 * become desynchronized with the actual state of the hardware 221 * become desynchronized with the actual state of the hardware
222 * at resume time, and evil weirdness ensues. 222 * at resume time, and evil weirdness ensues.
223 */ 223 */
224 error = device_power_down(PMSG_FREEZE); 224 error = dpm_suspend_noirq(PMSG_FREEZE);
225 if (error) { 225 if (error) {
226 printk(KERN_ERR "PM: Some devices failed to power down, " 226 printk(KERN_ERR "PM: Some devices failed to power down, "
227 "aborting hibernation\n"); 227 "aborting hibernation\n");
@@ -262,7 +262,7 @@ static int create_image(int platform_mode)
262 262
263 Power_up: 263 Power_up:
264 sysdev_resume(); 264 sysdev_resume();
265 /* NOTE: device_power_up() is just a resume() for devices 265 /* NOTE: dpm_resume_noirq() is just a resume() for devices
266 * that suspended with irqs off ... no overall powerup. 266 * that suspended with irqs off ... no overall powerup.
267 */ 267 */
268 268
@@ -275,7 +275,7 @@ static int create_image(int platform_mode)
275 Platform_finish: 275 Platform_finish:
276 platform_finish(platform_mode); 276 platform_finish(platform_mode);
277 277
278 device_power_up(in_suspend ? 278 dpm_resume_noirq(in_suspend ?
279 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 279 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
280 280
281 return error; 281 return error;
@@ -304,7 +304,7 @@ int hibernation_snapshot(int platform_mode)
304 goto Close; 304 goto Close;
305 305
306 suspend_console(); 306 suspend_console();
307 error = device_suspend(PMSG_FREEZE); 307 error = dpm_suspend_start(PMSG_FREEZE);
308 if (error) 308 if (error)
309 goto Recover_platform; 309 goto Recover_platform;
310 310
@@ -315,7 +315,7 @@ int hibernation_snapshot(int platform_mode)
315 /* Control returns here after successful restore */ 315 /* Control returns here after successful restore */
316 316
317 Resume_devices: 317 Resume_devices:
318 device_resume(in_suspend ? 318 dpm_resume_end(in_suspend ?
319 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 319 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
320 resume_console(); 320 resume_console();
321 Close: 321 Close:
@@ -339,7 +339,7 @@ static int resume_target_kernel(bool platform_mode)
339{ 339{
340 int error; 340 int error;
341 341
342 error = device_power_down(PMSG_QUIESCE); 342 error = dpm_suspend_noirq(PMSG_QUIESCE);
343 if (error) { 343 if (error) {
344 printk(KERN_ERR "PM: Some devices failed to power down, " 344 printk(KERN_ERR "PM: Some devices failed to power down, "
345 "aborting resume\n"); 345 "aborting resume\n");
@@ -394,7 +394,7 @@ static int resume_target_kernel(bool platform_mode)
394 Cleanup: 394 Cleanup:
395 platform_restore_cleanup(platform_mode); 395 platform_restore_cleanup(platform_mode);
396 396
397 device_power_up(PMSG_RECOVER); 397 dpm_resume_noirq(PMSG_RECOVER);
398 398
399 return error; 399 return error;
400} 400}
@@ -414,10 +414,10 @@ int hibernation_restore(int platform_mode)
414 414
415 pm_prepare_console(); 415 pm_prepare_console();
416 suspend_console(); 416 suspend_console();
417 error = device_suspend(PMSG_QUIESCE); 417 error = dpm_suspend_start(PMSG_QUIESCE);
418 if (!error) { 418 if (!error) {
419 error = resume_target_kernel(platform_mode); 419 error = resume_target_kernel(platform_mode);
420 device_resume(PMSG_RECOVER); 420 dpm_resume_end(PMSG_RECOVER);
421 } 421 }
422 resume_console(); 422 resume_console();
423 pm_restore_console(); 423 pm_restore_console();
@@ -447,14 +447,14 @@ int hibernation_platform_enter(void)
447 447
448 entering_platform_hibernation = true; 448 entering_platform_hibernation = true;
449 suspend_console(); 449 suspend_console();
450 error = device_suspend(PMSG_HIBERNATE); 450 error = dpm_suspend_start(PMSG_HIBERNATE);
451 if (error) { 451 if (error) {
452 if (hibernation_ops->recover) 452 if (hibernation_ops->recover)
453 hibernation_ops->recover(); 453 hibernation_ops->recover();
454 goto Resume_devices; 454 goto Resume_devices;
455 } 455 }
456 456
457 error = device_power_down(PMSG_HIBERNATE); 457 error = dpm_suspend_noirq(PMSG_HIBERNATE);
458 if (error) 458 if (error)
459 goto Resume_devices; 459 goto Resume_devices;
460 460
@@ -479,11 +479,11 @@ int hibernation_platform_enter(void)
479 Platofrm_finish: 479 Platofrm_finish:
480 hibernation_ops->finish(); 480 hibernation_ops->finish();
481 481
482 device_power_up(PMSG_RESTORE); 482 dpm_suspend_noirq(PMSG_RESTORE);
483 483
484 Resume_devices: 484 Resume_devices:
485 entering_platform_hibernation = false; 485 entering_platform_hibernation = false;
486 device_resume(PMSG_RESTORE); 486 dpm_resume_end(PMSG_RESTORE);
487 resume_console(); 487 resume_console();
488 488
489 Close: 489 Close:
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/hibernate_nvs.c
new file mode 100644
index 000000000000..39ac698ef836
--- /dev/null
+++ b/kernel/power/hibernate_nvs.c
@@ -0,0 +1,135 @@
1/*
2 * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory
3 *
4 * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/io.h>
10#include <linux/kernel.h>
11#include <linux/list.h>
12#include <linux/mm.h>
13#include <linux/suspend.h>
14
15/*
16 * Platforms, like ACPI, may want us to save some memory used by them during
17 * hibernation and to restore the contents of this memory during the subsequent
18 * resume. The code below implements a mechanism allowing us to do that.
19 */
20
21struct nvs_page {
22 unsigned long phys_start;
23 unsigned int size;
24 void *kaddr;
25 void *data;
26 struct list_head node;
27};
28
29static LIST_HEAD(nvs_list);
30
31/**
32 * hibernate_nvs_register - register platform NVS memory region to save
33 * @start - physical address of the region
34 * @size - size of the region
35 *
36 * The NVS region need not be page-aligned (both ends) and we arrange
37 * things so that the data from page-aligned addresses in this region will
38 * be copied into separate RAM pages.
39 */
40int hibernate_nvs_register(unsigned long start, unsigned long size)
41{
42 struct nvs_page *entry, *next;
43
44 while (size > 0) {
45 unsigned int nr_bytes;
46
47 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
48 if (!entry)
49 goto Error;
50
51 list_add_tail(&entry->node, &nvs_list);
52 entry->phys_start = start;
53 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
54 entry->size = (size < nr_bytes) ? size : nr_bytes;
55
56 start += entry->size;
57 size -= entry->size;
58 }
59 return 0;
60
61 Error:
62 list_for_each_entry_safe(entry, next, &nvs_list, node) {
63 list_del(&entry->node);
64 kfree(entry);
65 }
66 return -ENOMEM;
67}
68
69/**
70 * hibernate_nvs_free - free data pages allocated for saving NVS regions
71 */
72void hibernate_nvs_free(void)
73{
74 struct nvs_page *entry;
75
76 list_for_each_entry(entry, &nvs_list, node)
77 if (entry->data) {
78 free_page((unsigned long)entry->data);
79 entry->data = NULL;
80 if (entry->kaddr) {
81 iounmap(entry->kaddr);
82 entry->kaddr = NULL;
83 }
84 }
85}
86
87/**
88 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
89 */
90int hibernate_nvs_alloc(void)
91{
92 struct nvs_page *entry;
93
94 list_for_each_entry(entry, &nvs_list, node) {
95 entry->data = (void *)__get_free_page(GFP_KERNEL);
96 if (!entry->data) {
97 hibernate_nvs_free();
98 return -ENOMEM;
99 }
100 }
101 return 0;
102}
103
104/**
105 * hibernate_nvs_save - save NVS memory regions
106 */
107void hibernate_nvs_save(void)
108{
109 struct nvs_page *entry;
110
111 printk(KERN_INFO "PM: Saving platform NVS memory\n");
112
113 list_for_each_entry(entry, &nvs_list, node)
114 if (entry->data) {
115 entry->kaddr = ioremap(entry->phys_start, entry->size);
116 memcpy(entry->data, entry->kaddr, entry->size);
117 }
118}
119
120/**
121 * hibernate_nvs_restore - restore NVS memory regions
122 *
123 * This function is going to be called with interrupts disabled, so it
124 * cannot iounmap the virtual addresses used to access the NVS region.
125 */
126void hibernate_nvs_restore(void)
127{
128 struct nvs_page *entry;
129
130 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
131
132 list_for_each_entry(entry, &nvs_list, node)
133 if (entry->data)
134 memcpy(entry->kaddr, entry->data, entry->size);
135}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 868028280d13..f710e36930cc 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -8,20 +8,9 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/module.h>
12#include <linux/suspend.h>
13#include <linux/kobject.h> 11#include <linux/kobject.h>
14#include <linux/string.h> 12#include <linux/string.h>
15#include <linux/delay.h>
16#include <linux/errno.h>
17#include <linux/kmod.h>
18#include <linux/init.h>
19#include <linux/console.h>
20#include <linux/cpu.h>
21#include <linux/resume-trace.h> 13#include <linux/resume-trace.h>
22#include <linux/freezer.h>
23#include <linux/vmstat.h>
24#include <linux/syscalls.h>
25 14
26#include "power.h" 15#include "power.h"
27 16
@@ -119,373 +108,6 @@ power_attr(pm_test);
119 108
120#endif /* CONFIG_PM_SLEEP */ 109#endif /* CONFIG_PM_SLEEP */
121 110
122#ifdef CONFIG_SUSPEND
123
124static int suspend_test(int level)
125{
126#ifdef CONFIG_PM_DEBUG
127 if (pm_test_level == level) {
128 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
129 mdelay(5000);
130 return 1;
131 }
132#endif /* !CONFIG_PM_DEBUG */
133 return 0;
134}
135
136#ifdef CONFIG_PM_TEST_SUSPEND
137
138/*
139 * We test the system suspend code by setting an RTC wakealarm a short
140 * time in the future, then suspending. Suspending the devices won't
141 * normally take long ... some systems only need a few milliseconds.
142 *
143 * The time it takes is system-specific though, so when we test this
144 * during system bootup we allow a LOT of time.
145 */
146#define TEST_SUSPEND_SECONDS 5
147
148static unsigned long suspend_test_start_time;
149
150static void suspend_test_start(void)
151{
152 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
153 * What we want is a hardware counter that will work correctly even
154 * during the irqs-are-off stages of the suspend/resume cycle...
155 */
156 suspend_test_start_time = jiffies;
157}
158
159static void suspend_test_finish(const char *label)
160{
161 long nj = jiffies - suspend_test_start_time;
162 unsigned msec;
163
164 msec = jiffies_to_msecs(abs(nj));
165 pr_info("PM: %s took %d.%03d seconds\n", label,
166 msec / 1000, msec % 1000);
167
168 /* Warning on suspend means the RTC alarm period needs to be
169 * larger -- the system was sooo slooowwww to suspend that the
170 * alarm (should have) fired before the system went to sleep!
171 *
172 * Warning on either suspend or resume also means the system
173 * has some performance issues. The stack dump of a WARN_ON
174 * is more likely to get the right attention than a printk...
175 */
176 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
177}
178
179#else
180
181static void suspend_test_start(void)
182{
183}
184
185static void suspend_test_finish(const char *label)
186{
187}
188
189#endif
190
191/* This is just an arbitrary number */
192#define FREE_PAGE_NUMBER (100)
193
194static struct platform_suspend_ops *suspend_ops;
195
196/**
197 * suspend_set_ops - Set the global suspend method table.
198 * @ops: Pointer to ops structure.
199 */
200
201void suspend_set_ops(struct platform_suspend_ops *ops)
202{
203 mutex_lock(&pm_mutex);
204 suspend_ops = ops;
205 mutex_unlock(&pm_mutex);
206}
207
208/**
209 * suspend_valid_only_mem - generic memory-only valid callback
210 *
211 * Platform drivers that implement mem suspend only and only need
212 * to check for that in their .valid callback can use this instead
213 * of rolling their own .valid callback.
214 */
215int suspend_valid_only_mem(suspend_state_t state)
216{
217 return state == PM_SUSPEND_MEM;
218}
219
220/**
221 * suspend_prepare - Do prep work before entering low-power state.
222 *
223 * This is common code that is called for each state that we're entering.
224 * Run suspend notifiers, allocate a console and stop all processes.
225 */
226static int suspend_prepare(void)
227{
228 int error;
229 unsigned int free_pages;
230
231 if (!suspend_ops || !suspend_ops->enter)
232 return -EPERM;
233
234 pm_prepare_console();
235
236 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
237 if (error)
238 goto Finish;
239
240 error = usermodehelper_disable();
241 if (error)
242 goto Finish;
243
244 if (suspend_freeze_processes()) {
245 error = -EAGAIN;
246 goto Thaw;
247 }
248
249 free_pages = global_page_state(NR_FREE_PAGES);
250 if (free_pages < FREE_PAGE_NUMBER) {
251 pr_debug("PM: free some memory\n");
252 shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
253 if (nr_free_pages() < FREE_PAGE_NUMBER) {
254 error = -ENOMEM;
255 printk(KERN_ERR "PM: No enough memory\n");
256 }
257 }
258 if (!error)
259 return 0;
260
261 Thaw:
262 suspend_thaw_processes();
263 usermodehelper_enable();
264 Finish:
265 pm_notifier_call_chain(PM_POST_SUSPEND);
266 pm_restore_console();
267 return error;
268}
269
270/* default implementation */
271void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
272{
273 local_irq_disable();
274}
275
276/* default implementation */
277void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
278{
279 local_irq_enable();
280}
281
282/**
283 * suspend_enter - enter the desired system sleep state.
284 * @state: state to enter
285 *
286 * This function should be called after devices have been suspended.
287 */
288static int suspend_enter(suspend_state_t state)
289{
290 int error;
291
292 if (suspend_ops->prepare) {
293 error = suspend_ops->prepare();
294 if (error)
295 return error;
296 }
297
298 error = device_power_down(PMSG_SUSPEND);
299 if (error) {
300 printk(KERN_ERR "PM: Some devices failed to power down\n");
301 goto Platfrom_finish;
302 }
303
304 if (suspend_ops->prepare_late) {
305 error = suspend_ops->prepare_late();
306 if (error)
307 goto Power_up_devices;
308 }
309
310 if (suspend_test(TEST_PLATFORM))
311 goto Platform_wake;
312
313 error = disable_nonboot_cpus();
314 if (error || suspend_test(TEST_CPUS))
315 goto Enable_cpus;
316
317 arch_suspend_disable_irqs();
318 BUG_ON(!irqs_disabled());
319
320 error = sysdev_suspend(PMSG_SUSPEND);
321 if (!error) {
322 if (!suspend_test(TEST_CORE))
323 error = suspend_ops->enter(state);
324 sysdev_resume();
325 }
326
327 arch_suspend_enable_irqs();
328 BUG_ON(irqs_disabled());
329
330 Enable_cpus:
331 enable_nonboot_cpus();
332
333 Platform_wake:
334 if (suspend_ops->wake)
335 suspend_ops->wake();
336
337 Power_up_devices:
338 device_power_up(PMSG_RESUME);
339
340 Platfrom_finish:
341 if (suspend_ops->finish)
342 suspend_ops->finish();
343
344 return error;
345}
346
347/**
348 * suspend_devices_and_enter - suspend devices and enter the desired system
349 * sleep state.
350 * @state: state to enter
351 */
352int suspend_devices_and_enter(suspend_state_t state)
353{
354 int error;
355
356 if (!suspend_ops)
357 return -ENOSYS;
358
359 if (suspend_ops->begin) {
360 error = suspend_ops->begin(state);
361 if (error)
362 goto Close;
363 }
364 suspend_console();
365 suspend_test_start();
366 error = device_suspend(PMSG_SUSPEND);
367 if (error) {
368 printk(KERN_ERR "PM: Some devices failed to suspend\n");
369 goto Recover_platform;
370 }
371 suspend_test_finish("suspend devices");
372 if (suspend_test(TEST_DEVICES))
373 goto Recover_platform;
374
375 suspend_enter(state);
376
377 Resume_devices:
378 suspend_test_start();
379 device_resume(PMSG_RESUME);
380 suspend_test_finish("resume devices");
381 resume_console();
382 Close:
383 if (suspend_ops->end)
384 suspend_ops->end();
385 return error;
386
387 Recover_platform:
388 if (suspend_ops->recover)
389 suspend_ops->recover();
390 goto Resume_devices;
391}
392
393/**
394 * suspend_finish - Do final work before exiting suspend sequence.
395 *
396 * Call platform code to clean up, restart processes, and free the
397 * console that we've allocated. This is not called for suspend-to-disk.
398 */
399static void suspend_finish(void)
400{
401 suspend_thaw_processes();
402 usermodehelper_enable();
403 pm_notifier_call_chain(PM_POST_SUSPEND);
404 pm_restore_console();
405}
406
407
408
409
410static const char * const pm_states[PM_SUSPEND_MAX] = {
411 [PM_SUSPEND_STANDBY] = "standby",
412 [PM_SUSPEND_MEM] = "mem",
413};
414
415static inline int valid_state(suspend_state_t state)
416{
417 /* All states need lowlevel support and need to be valid
418 * to the lowlevel implementation, no valid callback
419 * implies that none are valid. */
420 if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
421 return 0;
422 return 1;
423}
424
425
426/**
427 * enter_state - Do common work of entering low-power state.
428 * @state: pm_state structure for state we're entering.
429 *
430 * Make sure we're the only ones trying to enter a sleep state. Fail
431 * if someone has beat us to it, since we don't want anything weird to
432 * happen when we wake up.
433 * Then, do the setup for suspend, enter the state, and cleaup (after
434 * we've woken up).
435 */
436static int enter_state(suspend_state_t state)
437{
438 int error;
439
440 if (!valid_state(state))
441 return -ENODEV;
442
443 if (!mutex_trylock(&pm_mutex))
444 return -EBUSY;
445
446 printk(KERN_INFO "PM: Syncing filesystems ... ");
447 sys_sync();
448 printk("done.\n");
449
450 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
451 error = suspend_prepare();
452 if (error)
453 goto Unlock;
454
455 if (suspend_test(TEST_FREEZER))
456 goto Finish;
457
458 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
459 error = suspend_devices_and_enter(state);
460
461 Finish:
462 pr_debug("PM: Finishing wakeup.\n");
463 suspend_finish();
464 Unlock:
465 mutex_unlock(&pm_mutex);
466 return error;
467}
468
469
470/**
471 * pm_suspend - Externally visible function for suspending system.
472 * @state: Enumerated value of state to enter.
473 *
474 * Determine whether or not value is within range, get state
475 * structure, and enter (above).
476 */
477
478int pm_suspend(suspend_state_t state)
479{
480 if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
481 return enter_state(state);
482 return -EINVAL;
483}
484
485EXPORT_SYMBOL(pm_suspend);
486
487#endif /* CONFIG_SUSPEND */
488
489struct kobject *power_kobj; 111struct kobject *power_kobj;
490 112
491/** 113/**
@@ -498,7 +120,6 @@ struct kobject *power_kobj;
498 * store() accepts one of those strings, translates it into the 120 * store() accepts one of those strings, translates it into the
499 * proper enumerated value, and initiates a suspend transition. 121 * proper enumerated value, and initiates a suspend transition.
500 */ 122 */
501
502static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr, 123static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
503 char *buf) 124 char *buf)
504{ 125{
@@ -596,7 +217,6 @@ static struct attribute_group attr_group = {
596 .attrs = g, 217 .attrs = g,
597}; 218};
598 219
599
600static int __init pm_init(void) 220static int __init pm_init(void)
601{ 221{
602 power_kobj = kobject_create_and_add("power", NULL); 222 power_kobj = kobject_create_and_add("power", NULL);
@@ -606,144 +226,3 @@ static int __init pm_init(void)
606} 226}
607 227
608core_initcall(pm_init); 228core_initcall(pm_init);
609
610
611#ifdef CONFIG_PM_TEST_SUSPEND
612
613#include <linux/rtc.h>
614
615/*
616 * To test system suspend, we need a hands-off mechanism to resume the
617 * system. RTCs wake alarms are a common self-contained mechanism.
618 */
619
620static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
621{
622 static char err_readtime[] __initdata =
623 KERN_ERR "PM: can't read %s time, err %d\n";
624 static char err_wakealarm [] __initdata =
625 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
626 static char err_suspend[] __initdata =
627 KERN_ERR "PM: suspend test failed, error %d\n";
628 static char info_test[] __initdata =
629 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
630
631 unsigned long now;
632 struct rtc_wkalrm alm;
633 int status;
634
635 /* this may fail if the RTC hasn't been initialized */
636 status = rtc_read_time(rtc, &alm.time);
637 if (status < 0) {
638 printk(err_readtime, dev_name(&rtc->dev), status);
639 return;
640 }
641 rtc_tm_to_time(&alm.time, &now);
642
643 memset(&alm, 0, sizeof alm);
644 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
645 alm.enabled = true;
646
647 status = rtc_set_alarm(rtc, &alm);
648 if (status < 0) {
649 printk(err_wakealarm, dev_name(&rtc->dev), status);
650 return;
651 }
652
653 if (state == PM_SUSPEND_MEM) {
654 printk(info_test, pm_states[state]);
655 status = pm_suspend(state);
656 if (status == -ENODEV)
657 state = PM_SUSPEND_STANDBY;
658 }
659 if (state == PM_SUSPEND_STANDBY) {
660 printk(info_test, pm_states[state]);
661 status = pm_suspend(state);
662 }
663 if (status < 0)
664 printk(err_suspend, status);
665
666 /* Some platforms can't detect that the alarm triggered the
667 * wakeup, or (accordingly) disable it after it afterwards.
668 * It's supposed to give oneshot behavior; cope.
669 */
670 alm.enabled = false;
671 rtc_set_alarm(rtc, &alm);
672}
673
674static int __init has_wakealarm(struct device *dev, void *name_ptr)
675{
676 struct rtc_device *candidate = to_rtc_device(dev);
677
678 if (!candidate->ops->set_alarm)
679 return 0;
680 if (!device_may_wakeup(candidate->dev.parent))
681 return 0;
682
683 *(const char **)name_ptr = dev_name(dev);
684 return 1;
685}
686
687/*
688 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
689 * at startup time. They're normally disabled, for faster boot and because
690 * we can't know which states really work on this particular system.
691 */
692static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
693
694static char warn_bad_state[] __initdata =
695 KERN_WARNING "PM: can't test '%s' suspend state\n";
696
697static int __init setup_test_suspend(char *value)
698{
699 unsigned i;
700
701 /* "=mem" ==> "mem" */
702 value++;
703 for (i = 0; i < PM_SUSPEND_MAX; i++) {
704 if (!pm_states[i])
705 continue;
706 if (strcmp(pm_states[i], value) != 0)
707 continue;
708 test_state = (__force suspend_state_t) i;
709 return 0;
710 }
711 printk(warn_bad_state, value);
712 return 0;
713}
714__setup("test_suspend", setup_test_suspend);
715
716static int __init test_suspend(void)
717{
718 static char warn_no_rtc[] __initdata =
719 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
720
721 char *pony = NULL;
722 struct rtc_device *rtc = NULL;
723
724 /* PM is initialized by now; is that state testable? */
725 if (test_state == PM_SUSPEND_ON)
726 goto done;
727 if (!valid_state(test_state)) {
728 printk(warn_bad_state, pm_states[test_state]);
729 goto done;
730 }
731
732 /* RTCs have initialized by now too ... can we use one? */
733 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
734 if (pony)
735 rtc = rtc_class_open(pony);
736 if (!rtc) {
737 printk(warn_no_rtc);
738 goto done;
739 }
740
741 /* go for it */
742 test_wakealarm(rtc, test_state);
743 rtc_class_close(rtc);
744done:
745 return 0;
746}
747late_initcall(test_suspend);
748
749#endif /* CONFIG_PM_TEST_SUSPEND */
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 46b5ec7a3afb..26d5a26f82e3 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -45,7 +45,7 @@ static inline char *check_image_kernel(struct swsusp_info *info)
45 */ 45 */
46#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT) 46#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
47 47
48/* kernel/power/disk.c */ 48/* kernel/power/hibernate.c */
49extern int hibernation_snapshot(int platform_mode); 49extern int hibernation_snapshot(int platform_mode);
50extern int hibernation_restore(int platform_mode); 50extern int hibernation_restore(int platform_mode);
51extern int hibernation_platform_enter(void); 51extern int hibernation_platform_enter(void);
@@ -74,7 +74,7 @@ extern asmlinkage int swsusp_arch_resume(void);
74 74
75extern int create_basic_memory_bitmaps(void); 75extern int create_basic_memory_bitmaps(void);
76extern void free_basic_memory_bitmaps(void); 76extern void free_basic_memory_bitmaps(void);
77extern unsigned int count_data_pages(void); 77extern int swsusp_shrink_memory(void);
78 78
79/** 79/**
80 * Auxiliary structure used for reading the snapshot image data and 80 * Auxiliary structure used for reading the snapshot image data and
@@ -147,9 +147,8 @@ extern int swsusp_swap_in_use(void);
147 */ 147 */
148#define SF_PLATFORM_MODE 1 148#define SF_PLATFORM_MODE 1
149 149
150/* kernel/power/disk.c */ 150/* kernel/power/hibernate.c */
151extern int swsusp_check(void); 151extern int swsusp_check(void);
152extern int swsusp_shrink_memory(void);
153extern void swsusp_free(void); 152extern void swsusp_free(void);
154extern int swsusp_read(unsigned int *flags_p); 153extern int swsusp_read(unsigned int *flags_p);
155extern int swsusp_write(unsigned int flags); 154extern int swsusp_write(unsigned int flags);
@@ -161,22 +160,36 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
161 unsigned int, char *); 160 unsigned int, char *);
162 161
163#ifdef CONFIG_SUSPEND 162#ifdef CONFIG_SUSPEND
164/* kernel/power/main.c */ 163/* kernel/power/suspend.c */
164extern const char *const pm_states[];
165
166extern bool valid_state(suspend_state_t state);
165extern int suspend_devices_and_enter(suspend_state_t state); 167extern int suspend_devices_and_enter(suspend_state_t state);
168extern int enter_state(suspend_state_t state);
166#else /* !CONFIG_SUSPEND */ 169#else /* !CONFIG_SUSPEND */
167static inline int suspend_devices_and_enter(suspend_state_t state) 170static inline int suspend_devices_and_enter(suspend_state_t state)
168{ 171{
169 return -ENOSYS; 172 return -ENOSYS;
170} 173}
174static inline int enter_state(suspend_state_t state) { return -ENOSYS; }
175static inline bool valid_state(suspend_state_t state) { return false; }
171#endif /* !CONFIG_SUSPEND */ 176#endif /* !CONFIG_SUSPEND */
172 177
178#ifdef CONFIG_PM_TEST_SUSPEND
179/* kernel/power/suspend_test.c */
180extern void suspend_test_start(void);
181extern void suspend_test_finish(const char *label);
182#else /* !CONFIG_PM_TEST_SUSPEND */
183static inline void suspend_test_start(void) {}
184static inline void suspend_test_finish(const char *label) {}
185#endif /* !CONFIG_PM_TEST_SUSPEND */
186
173#ifdef CONFIG_PM_SLEEP 187#ifdef CONFIG_PM_SLEEP
174/* kernel/power/main.c */ 188/* kernel/power/main.c */
175extern int pm_notifier_call_chain(unsigned long val); 189extern int pm_notifier_call_chain(unsigned long val);
176#endif 190#endif
177 191
178#ifdef CONFIG_HIGHMEM 192#ifdef CONFIG_HIGHMEM
179unsigned int count_highmem_pages(void);
180int restore_highmem(void); 193int restore_highmem(void);
181#else 194#else
182static inline unsigned int count_highmem_pages(void) { return 0; } 195static inline unsigned int count_highmem_pages(void) { return 0; }
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 97890831e1b5..e8b337006276 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -34,7 +34,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
34 .handler = handle_poweroff, 34 .handler = handle_poweroff,
35 .help_msg = "powerOff", 35 .help_msg = "powerOff",
36 .action_msg = "Power Off", 36 .action_msg = "Power Off",
37 .enable_mask = SYSRQ_ENABLE_BOOT, 37 .enable_mask = SYSRQ_ENABLE_BOOT,
38}; 38};
39 39
40static int pm_sysrq_init(void) 40static int pm_sysrq_init(void)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 33e2e4a819f9..523a451b45d3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -39,6 +39,14 @@ static int swsusp_page_is_free(struct page *);
39static void swsusp_set_page_forbidden(struct page *); 39static void swsusp_set_page_forbidden(struct page *);
40static void swsusp_unset_page_forbidden(struct page *); 40static void swsusp_unset_page_forbidden(struct page *);
41 41
42/*
43 * Preferred image size in bytes (tunable via /sys/power/image_size).
44 * When it is set to N, swsusp will do its best to ensure the image
45 * size will not exceed N bytes, but if that is impossible, it will
46 * try to create the smallest image possible.
47 */
48unsigned long image_size = 500 * 1024 * 1024;
49
42/* List of PBEs needed for restoring the pages that were allocated before 50/* List of PBEs needed for restoring the pages that were allocated before
43 * the suspend and included in the suspend image, but have also been 51 * the suspend and included in the suspend image, but have also been
44 * allocated by the "resume" kernel, so their contents cannot be written 52 * allocated by the "resume" kernel, so their contents cannot be written
@@ -840,7 +848,7 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
840 * pages. 848 * pages.
841 */ 849 */
842 850
843unsigned int count_highmem_pages(void) 851static unsigned int count_highmem_pages(void)
844{ 852{
845 struct zone *zone; 853 struct zone *zone;
846 unsigned int n = 0; 854 unsigned int n = 0;
@@ -902,7 +910,7 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn)
902 * pages. 910 * pages.
903 */ 911 */
904 912
905unsigned int count_data_pages(void) 913static unsigned int count_data_pages(void)
906{ 914{
907 struct zone *zone; 915 struct zone *zone;
908 unsigned long pfn, max_zone_pfn; 916 unsigned long pfn, max_zone_pfn;
@@ -1058,6 +1066,74 @@ void swsusp_free(void)
1058 buffer = NULL; 1066 buffer = NULL;
1059} 1067}
1060 1068
1069/**
1070 * swsusp_shrink_memory - Try to free as much memory as needed
1071 *
1072 * ... but do not OOM-kill anyone
1073 *
1074 * Notice: all userland should be stopped before it is called, or
1075 * livelock is possible.
1076 */
1077
1078#define SHRINK_BITE 10000
1079static inline unsigned long __shrink_memory(long tmp)
1080{
1081 if (tmp > SHRINK_BITE)
1082 tmp = SHRINK_BITE;
1083 return shrink_all_memory(tmp);
1084}
1085
1086int swsusp_shrink_memory(void)
1087{
1088 long tmp;
1089 struct zone *zone;
1090 unsigned long pages = 0;
1091 unsigned int i = 0;
1092 char *p = "-\\|/";
1093 struct timeval start, stop;
1094
1095 printk(KERN_INFO "PM: Shrinking memory... ");
1096 do_gettimeofday(&start);
1097 do {
1098 long size, highmem_size;
1099
1100 highmem_size = count_highmem_pages();
1101 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
1102 tmp = size;
1103 size += highmem_size;
1104 for_each_populated_zone(zone) {
1105 tmp += snapshot_additional_pages(zone);
1106 if (is_highmem(zone)) {
1107 highmem_size -=
1108 zone_page_state(zone, NR_FREE_PAGES);
1109 } else {
1110 tmp -= zone_page_state(zone, NR_FREE_PAGES);
1111 tmp += zone->lowmem_reserve[ZONE_NORMAL];
1112 }
1113 }
1114
1115 if (highmem_size < 0)
1116 highmem_size = 0;
1117
1118 tmp += highmem_size;
1119 if (tmp > 0) {
1120 tmp = __shrink_memory(tmp);
1121 if (!tmp)
1122 return -ENOMEM;
1123 pages += tmp;
1124 } else if (size > image_size / PAGE_SIZE) {
1125 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
1126 pages += tmp;
1127 }
1128 printk("\b%c", p[i++%4]);
1129 } while (tmp > 0);
1130 do_gettimeofday(&stop);
1131 printk("\bdone (%lu pages freed)\n", pages);
1132 swsusp_show_speed(&start, &stop, pages, "Freed");
1133
1134 return 0;
1135}
1136
1061#ifdef CONFIG_HIGHMEM 1137#ifdef CONFIG_HIGHMEM
1062/** 1138/**
1063 * count_pages_for_highmem - compute the number of non-highmem pages 1139 * count_pages_for_highmem - compute the number of non-highmem pages
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
new file mode 100644
index 000000000000..6f10dfc2d3e9
--- /dev/null
+++ b/kernel/power/suspend.c
@@ -0,0 +1,300 @@
1/*
2 * kernel/power/suspend.c - Suspend to RAM and standby functionality.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
7 *
8 * This file is released under the GPLv2.
9 */
10
11#include <linux/string.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/console.h>
16#include <linux/cpu.h>
17#include <linux/syscalls.h>
18
19#include "power.h"
20
21const char *const pm_states[PM_SUSPEND_MAX] = {
22 [PM_SUSPEND_STANDBY] = "standby",
23 [PM_SUSPEND_MEM] = "mem",
24};
25
26static struct platform_suspend_ops *suspend_ops;
27
28/**
29 * suspend_set_ops - Set the global suspend method table.
30 * @ops: Pointer to ops structure.
31 */
32void suspend_set_ops(struct platform_suspend_ops *ops)
33{
34 mutex_lock(&pm_mutex);
35 suspend_ops = ops;
36 mutex_unlock(&pm_mutex);
37}
38
39bool valid_state(suspend_state_t state)
40{
41 /*
42 * All states need lowlevel support and need to be valid to the lowlevel
43 * implementation, no valid callback implies that none are valid.
44 */
45 return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
46}
47
48/**
49 * suspend_valid_only_mem - generic memory-only valid callback
50 *
51 * Platform drivers that implement mem suspend only and only need
52 * to check for that in their .valid callback can use this instead
53 * of rolling their own .valid callback.
54 */
55int suspend_valid_only_mem(suspend_state_t state)
56{
57 return state == PM_SUSPEND_MEM;
58}
59
60static int suspend_test(int level)
61{
62#ifdef CONFIG_PM_DEBUG
63 if (pm_test_level == level) {
64 printk(KERN_INFO "suspend debug: Waiting for 5 seconds.\n");
65 mdelay(5000);
66 return 1;
67 }
68#endif /* !CONFIG_PM_DEBUG */
69 return 0;
70}
71
72/**
73 * suspend_prepare - Do prep work before entering low-power state.
74 *
75 * This is common code that is called for each state that we're entering.
76 * Run suspend notifiers, allocate a console and stop all processes.
77 */
78static int suspend_prepare(void)
79{
80 int error;
81
82 if (!suspend_ops || !suspend_ops->enter)
83 return -EPERM;
84
85 pm_prepare_console();
86
87 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
88 if (error)
89 goto Finish;
90
91 error = usermodehelper_disable();
92 if (error)
93 goto Finish;
94
95 error = suspend_freeze_processes();
96 if (!error)
97 return 0;
98
99 suspend_thaw_processes();
100 usermodehelper_enable();
101 Finish:
102 pm_notifier_call_chain(PM_POST_SUSPEND);
103 pm_restore_console();
104 return error;
105}
106
107/* default implementation */
108void __attribute__ ((weak)) arch_suspend_disable_irqs(void)
109{
110 local_irq_disable();
111}
112
113/* default implementation */
114void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
115{
116 local_irq_enable();
117}
118
119/**
120 * suspend_enter - enter the desired system sleep state.
121 * @state: state to enter
122 *
123 * This function should be called after devices have been suspended.
124 */
125static int suspend_enter(suspend_state_t state)
126{
127 int error;
128
129 if (suspend_ops->prepare) {
130 error = suspend_ops->prepare();
131 if (error)
132 return error;
133 }
134
135 error = dpm_suspend_noirq(PMSG_SUSPEND);
136 if (error) {
137 printk(KERN_ERR "PM: Some devices failed to power down\n");
138 goto Platfrom_finish;
139 }
140
141 if (suspend_ops->prepare_late) {
142 error = suspend_ops->prepare_late();
143 if (error)
144 goto Power_up_devices;
145 }
146
147 if (suspend_test(TEST_PLATFORM))
148 goto Platform_wake;
149
150 error = disable_nonboot_cpus();
151 if (error || suspend_test(TEST_CPUS))
152 goto Enable_cpus;
153
154 arch_suspend_disable_irqs();
155 BUG_ON(!irqs_disabled());
156
157 error = sysdev_suspend(PMSG_SUSPEND);
158 if (!error) {
159 if (!suspend_test(TEST_CORE))
160 error = suspend_ops->enter(state);
161 sysdev_resume();
162 }
163
164 arch_suspend_enable_irqs();
165 BUG_ON(irqs_disabled());
166
167 Enable_cpus:
168 enable_nonboot_cpus();
169
170 Platform_wake:
171 if (suspend_ops->wake)
172 suspend_ops->wake();
173
174 Power_up_devices:
175 dpm_resume_noirq(PMSG_RESUME);
176
177 Platfrom_finish:
178 if (suspend_ops->finish)
179 suspend_ops->finish();
180
181 return error;
182}
183
184/**
185 * suspend_devices_and_enter - suspend devices and enter the desired system
186 * sleep state.
187 * @state: state to enter
188 */
189int suspend_devices_and_enter(suspend_state_t state)
190{
191 int error;
192
193 if (!suspend_ops)
194 return -ENOSYS;
195
196 if (suspend_ops->begin) {
197 error = suspend_ops->begin(state);
198 if (error)
199 goto Close;
200 }
201 suspend_console();
202 suspend_test_start();
203 error = dpm_suspend_start(PMSG_SUSPEND);
204 if (error) {
205 printk(KERN_ERR "PM: Some devices failed to suspend\n");
206 goto Recover_platform;
207 }
208 suspend_test_finish("suspend devices");
209 if (suspend_test(TEST_DEVICES))
210 goto Recover_platform;
211
212 suspend_enter(state);
213
214 Resume_devices:
215 suspend_test_start();
216 dpm_resume_end(PMSG_RESUME);
217 suspend_test_finish("resume devices");
218 resume_console();
219 Close:
220 if (suspend_ops->end)
221 suspend_ops->end();
222 return error;
223
224 Recover_platform:
225 if (suspend_ops->recover)
226 suspend_ops->recover();
227 goto Resume_devices;
228}
229
230/**
231 * suspend_finish - Do final work before exiting suspend sequence.
232 *
233 * Call platform code to clean up, restart processes, and free the
234 * console that we've allocated. This is not called for suspend-to-disk.
235 */
236static void suspend_finish(void)
237{
238 suspend_thaw_processes();
239 usermodehelper_enable();
240 pm_notifier_call_chain(PM_POST_SUSPEND);
241 pm_restore_console();
242}
243
244/**
245 * enter_state - Do common work of entering low-power state.
246 * @state: pm_state structure for state we're entering.
247 *
248 * Make sure we're the only ones trying to enter a sleep state. Fail
249 * if someone has beat us to it, since we don't want anything weird to
250 * happen when we wake up.
251 * Then, do the setup for suspend, enter the state, and cleaup (after
252 * we've woken up).
253 */
254int enter_state(suspend_state_t state)
255{
256 int error;
257
258 if (!valid_state(state))
259 return -ENODEV;
260
261 if (!mutex_trylock(&pm_mutex))
262 return -EBUSY;
263
264 printk(KERN_INFO "PM: Syncing filesystems ... ");
265 sys_sync();
266 printk("done.\n");
267
268 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
269 error = suspend_prepare();
270 if (error)
271 goto Unlock;
272
273 if (suspend_test(TEST_FREEZER))
274 goto Finish;
275
276 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
277 error = suspend_devices_and_enter(state);
278
279 Finish:
280 pr_debug("PM: Finishing wakeup.\n");
281 suspend_finish();
282 Unlock:
283 mutex_unlock(&pm_mutex);
284 return error;
285}
286
287/**
288 * pm_suspend - Externally visible function for suspending system.
289 * @state: Enumerated value of state to enter.
290 *
291 * Determine whether or not value is within range, get state
292 * structure, and enter (above).
293 */
294int pm_suspend(suspend_state_t state)
295{
296 if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
297 return enter_state(state);
298 return -EINVAL;
299}
300EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
new file mode 100644
index 000000000000..17d8bb1acf9c
--- /dev/null
+++ b/kernel/power/suspend_test.c
@@ -0,0 +1,187 @@
1/*
2 * kernel/power/suspend_test.c - Suspend to RAM and standby test facility.
3 *
4 * Copyright (c) 2009 Pavel Machek <pavel@ucw.cz>
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/rtc.h>
11
12#include "power.h"
13
14/*
15 * We test the system suspend code by setting an RTC wakealarm a short
16 * time in the future, then suspending. Suspending the devices won't
17 * normally take long ... some systems only need a few milliseconds.
18 *
19 * The time it takes is system-specific though, so when we test this
20 * during system bootup we allow a LOT of time.
21 */
22#define TEST_SUSPEND_SECONDS 5
23
24static unsigned long suspend_test_start_time;
25
26void suspend_test_start(void)
27{
28 /* FIXME Use better timebase than "jiffies", ideally a clocksource.
29 * What we want is a hardware counter that will work correctly even
30 * during the irqs-are-off stages of the suspend/resume cycle...
31 */
32 suspend_test_start_time = jiffies;
33}
34
35void suspend_test_finish(const char *label)
36{
37 long nj = jiffies - suspend_test_start_time;
38 unsigned msec;
39
40 msec = jiffies_to_msecs(abs(nj));
41 pr_info("PM: %s took %d.%03d seconds\n", label,
42 msec / 1000, msec % 1000);
43
44 /* Warning on suspend means the RTC alarm period needs to be
45 * larger -- the system was sooo slooowwww to suspend that the
46 * alarm (should have) fired before the system went to sleep!
47 *
48 * Warning on either suspend or resume also means the system
49 * has some performance issues. The stack dump of a WARN_ON
50 * is more likely to get the right attention than a printk...
51 */
52 WARN(msec > (TEST_SUSPEND_SECONDS * 1000), "Component: %s\n", label);
53}
54
55/*
56 * To test system suspend, we need a hands-off mechanism to resume the
57 * system. RTCs wake alarms are a common self-contained mechanism.
58 */
59
60static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
61{
62 static char err_readtime[] __initdata =
63 KERN_ERR "PM: can't read %s time, err %d\n";
64 static char err_wakealarm [] __initdata =
65 KERN_ERR "PM: can't set %s wakealarm, err %d\n";
66 static char err_suspend[] __initdata =
67 KERN_ERR "PM: suspend test failed, error %d\n";
68 static char info_test[] __initdata =
69 KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
70
71 unsigned long now;
72 struct rtc_wkalrm alm;
73 int status;
74
75 /* this may fail if the RTC hasn't been initialized */
76 status = rtc_read_time(rtc, &alm.time);
77 if (status < 0) {
78 printk(err_readtime, dev_name(&rtc->dev), status);
79 return;
80 }
81 rtc_tm_to_time(&alm.time, &now);
82
83 memset(&alm, 0, sizeof alm);
84 rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
85 alm.enabled = true;
86
87 status = rtc_set_alarm(rtc, &alm);
88 if (status < 0) {
89 printk(err_wakealarm, dev_name(&rtc->dev), status);
90 return;
91 }
92
93 if (state == PM_SUSPEND_MEM) {
94 printk(info_test, pm_states[state]);
95 status = pm_suspend(state);
96 if (status == -ENODEV)
97 state = PM_SUSPEND_STANDBY;
98 }
99 if (state == PM_SUSPEND_STANDBY) {
100 printk(info_test, pm_states[state]);
101 status = pm_suspend(state);
102 }
103 if (status < 0)
104 printk(err_suspend, status);
105
106 /* Some platforms can't detect that the alarm triggered the
107 * wakeup, or (accordingly) disable it after it afterwards.
108 * It's supposed to give oneshot behavior; cope.
109 */
110 alm.enabled = false;
111 rtc_set_alarm(rtc, &alm);
112}
113
114static int __init has_wakealarm(struct device *dev, void *name_ptr)
115{
116 struct rtc_device *candidate = to_rtc_device(dev);
117
118 if (!candidate->ops->set_alarm)
119 return 0;
120 if (!device_may_wakeup(candidate->dev.parent))
121 return 0;
122
123 *(const char **)name_ptr = dev_name(dev);
124 return 1;
125}
126
127/*
128 * Kernel options like "test_suspend=mem" force suspend/resume sanity tests
129 * at startup time. They're normally disabled, for faster boot and because
130 * we can't know which states really work on this particular system.
131 */
132static suspend_state_t test_state __initdata = PM_SUSPEND_ON;
133
134static char warn_bad_state[] __initdata =
135 KERN_WARNING "PM: can't test '%s' suspend state\n";
136
137static int __init setup_test_suspend(char *value)
138{
139 unsigned i;
140
141 /* "=mem" ==> "mem" */
142 value++;
143 for (i = 0; i < PM_SUSPEND_MAX; i++) {
144 if (!pm_states[i])
145 continue;
146 if (strcmp(pm_states[i], value) != 0)
147 continue;
148 test_state = (__force suspend_state_t) i;
149 return 0;
150 }
151 printk(warn_bad_state, value);
152 return 0;
153}
154__setup("test_suspend", setup_test_suspend);
155
156static int __init test_suspend(void)
157{
158 static char warn_no_rtc[] __initdata =
159 KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
160
161 char *pony = NULL;
162 struct rtc_device *rtc = NULL;
163
164 /* PM is initialized by now; is that state testable? */
165 if (test_state == PM_SUSPEND_ON)
166 goto done;
167 if (!valid_state(test_state)) {
168 printk(warn_bad_state, pm_states[test_state]);
169 goto done;
170 }
171
172 /* RTCs have initialized by now too ... can we use one? */
173 class_find_device(rtc_class, NULL, &pony, has_wakealarm);
174 if (pony)
175 rtc = rtc_class_open(pony);
176 if (!rtc) {
177 printk(warn_no_rtc);
178 goto done;
179 }
180
181 /* go for it */
182 test_wakealarm(rtc, test_state);
183 rtc_class_close(rtc);
184done:
185 return 0;
186}
187late_initcall(test_suspend);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 78c35047586d..6a07f4dbf2f8 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -55,14 +55,6 @@
55 55
56#include "power.h" 56#include "power.h"
57 57
58/*
59 * Preferred image size in bytes (tunable via /sys/power/image_size).
60 * When it is set to N, swsusp will do its best to ensure the image
61 * size will not exceed N bytes, but if that is impossible, it will
62 * try to create the smallest image possible.
63 */
64unsigned long image_size = 500 * 1024 * 1024;
65
66int in_suspend __nosavedata = 0; 58int in_suspend __nosavedata = 0;
67 59
68/** 60/**
@@ -194,193 +186,3 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
194 centisecs / 100, centisecs % 100, 186 centisecs / 100, centisecs % 100,
195 kps / 1000, (kps % 1000) / 10); 187 kps / 1000, (kps % 1000) / 10);
196} 188}
197
198/**
199 * swsusp_shrink_memory - Try to free as much memory as needed
200 *
201 * ... but do not OOM-kill anyone
202 *
203 * Notice: all userland should be stopped before it is called, or
204 * livelock is possible.
205 */
206
207#define SHRINK_BITE 10000
208static inline unsigned long __shrink_memory(long tmp)
209{
210 if (tmp > SHRINK_BITE)
211 tmp = SHRINK_BITE;
212 return shrink_all_memory(tmp);
213}
214
215int swsusp_shrink_memory(void)
216{
217 long tmp;
218 struct zone *zone;
219 unsigned long pages = 0;
220 unsigned int i = 0;
221 char *p = "-\\|/";
222 struct timeval start, stop;
223
224 printk(KERN_INFO "PM: Shrinking memory... ");
225 do_gettimeofday(&start);
226 do {
227 long size, highmem_size;
228
229 highmem_size = count_highmem_pages();
230 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
231 tmp = size;
232 size += highmem_size;
233 for_each_populated_zone(zone) {
234 tmp += snapshot_additional_pages(zone);
235 if (is_highmem(zone)) {
236 highmem_size -=
237 zone_page_state(zone, NR_FREE_PAGES);
238 } else {
239 tmp -= zone_page_state(zone, NR_FREE_PAGES);
240 tmp += zone->lowmem_reserve[ZONE_NORMAL];
241 }
242 }
243
244 if (highmem_size < 0)
245 highmem_size = 0;
246
247 tmp += highmem_size;
248 if (tmp > 0) {
249 tmp = __shrink_memory(tmp);
250 if (!tmp)
251 return -ENOMEM;
252 pages += tmp;
253 } else if (size > image_size / PAGE_SIZE) {
254 tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
255 pages += tmp;
256 }
257 printk("\b%c", p[i++%4]);
258 } while (tmp > 0);
259 do_gettimeofday(&stop);
260 printk("\bdone (%lu pages freed)\n", pages);
261 swsusp_show_speed(&start, &stop, pages, "Freed");
262
263 return 0;
264}
265
266/*
267 * Platforms, like ACPI, may want us to save some memory used by them during
268 * hibernation and to restore the contents of this memory during the subsequent
269 * resume. The code below implements a mechanism allowing us to do that.
270 */
271
272struct nvs_page {
273 unsigned long phys_start;
274 unsigned int size;
275 void *kaddr;
276 void *data;
277 struct list_head node;
278};
279
280static LIST_HEAD(nvs_list);
281
282/**
283 * hibernate_nvs_register - register platform NVS memory region to save
284 * @start - physical address of the region
285 * @size - size of the region
286 *
287 * The NVS region need not be page-aligned (both ends) and we arrange
288 * things so that the data from page-aligned addresses in this region will
289 * be copied into separate RAM pages.
290 */
291int hibernate_nvs_register(unsigned long start, unsigned long size)
292{
293 struct nvs_page *entry, *next;
294
295 while (size > 0) {
296 unsigned int nr_bytes;
297
298 entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
299 if (!entry)
300 goto Error;
301
302 list_add_tail(&entry->node, &nvs_list);
303 entry->phys_start = start;
304 nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
305 entry->size = (size < nr_bytes) ? size : nr_bytes;
306
307 start += entry->size;
308 size -= entry->size;
309 }
310 return 0;
311
312 Error:
313 list_for_each_entry_safe(entry, next, &nvs_list, node) {
314 list_del(&entry->node);
315 kfree(entry);
316 }
317 return -ENOMEM;
318}
319
320/**
321 * hibernate_nvs_free - free data pages allocated for saving NVS regions
322 */
323void hibernate_nvs_free(void)
324{
325 struct nvs_page *entry;
326
327 list_for_each_entry(entry, &nvs_list, node)
328 if (entry->data) {
329 free_page((unsigned long)entry->data);
330 entry->data = NULL;
331 if (entry->kaddr) {
332 iounmap(entry->kaddr);
333 entry->kaddr = NULL;
334 }
335 }
336}
337
338/**
339 * hibernate_nvs_alloc - allocate memory necessary for saving NVS regions
340 */
341int hibernate_nvs_alloc(void)
342{
343 struct nvs_page *entry;
344
345 list_for_each_entry(entry, &nvs_list, node) {
346 entry->data = (void *)__get_free_page(GFP_KERNEL);
347 if (!entry->data) {
348 hibernate_nvs_free();
349 return -ENOMEM;
350 }
351 }
352 return 0;
353}
354
355/**
356 * hibernate_nvs_save - save NVS memory regions
357 */
358void hibernate_nvs_save(void)
359{
360 struct nvs_page *entry;
361
362 printk(KERN_INFO "PM: Saving platform NVS memory\n");
363
364 list_for_each_entry(entry, &nvs_list, node)
365 if (entry->data) {
366 entry->kaddr = ioremap(entry->phys_start, entry->size);
367 memcpy(entry->data, entry->kaddr, entry->size);
368 }
369}
370
371/**
372 * hibernate_nvs_restore - restore NVS memory regions
373 *
374 * This function is going to be called with interrupts disabled, so it
375 * cannot iounmap the virtual addresses used to access the NVS region.
376 */
377void hibernate_nvs_restore(void)
378{
379 struct nvs_page *entry;
380
381 printk(KERN_INFO "PM: Restoring platform NVS memory\n");
382
383 list_for_each_entry(entry, &nvs_list, node)
384 if (entry->data)
385 memcpy(entry->kaddr, entry->data, entry->size);
386}
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 820c5af44f3e..fcd107a78c5a 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -902,7 +902,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
902 * Returns: 902 * Returns:
903 * 0 on success 903 * 0 on success
904 * -EINTR when interrupted by a signal 904 * -EINTR when interrupted by a signal
905 * -ETIMEOUT when the timeout expired 905 * -ETIMEDOUT when the timeout expired
906 * -EDEADLK when the lock would deadlock (when deadlock detection is on) 906 * -EDEADLK when the lock would deadlock (when deadlock detection is on)
907 */ 907 */
908int 908int
diff --git a/kernel/sched.c b/kernel/sched.c
index f04aa9664504..8ec9d13140be 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2192,6 +2192,7 @@ void kick_process(struct task_struct *p)
2192 smp_send_reschedule(cpu); 2192 smp_send_reschedule(cpu);
2193 preempt_enable(); 2193 preempt_enable();
2194} 2194}
2195EXPORT_SYMBOL_GPL(kick_process);
2195 2196
2196/* 2197/*
2197 * Return a low guess at the load of a migration-source cpu weighted 2198 * Return a low guess at the load of a migration-source cpu weighted
diff --git a/kernel/timer.c b/kernel/timer.c
index c01e568935ea..faf2db897de4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -757,6 +757,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
757 wake_up_idle_cpu(cpu); 757 wake_up_idle_cpu(cpu);
758 spin_unlock_irqrestore(&base->lock, flags); 758 spin_unlock_irqrestore(&base->lock, flags);
759} 759}
760EXPORT_SYMBOL_GPL(add_timer_on);
760 761
761/** 762/**
762 * del_timer - deactive a timer. 763 * del_timer - deactive a timer.