aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 23:34:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 23:34:19 -0400
commit7725131982477bffff8ffdea143434dcc69f5d90 (patch)
tree099440f6a8b11c8221ea99effec4c22818b3b577 /kernel/power
parent6b22df74f7af62137772c280791c932855f7635b (diff)
parent7ef97e0e3a0f4b02601dde384127cc85d27e46e2 (diff)
Merge tag 'pm+acpi-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: "Again, ACPICA leads the pack (47 commits), followed by cpufreq (18 commits) and system suspend/hibernation (9 commits). From the new code perspective, the ACPICA update brings ACPI 5.1 to the table, including a new device configuration object called _DSD (Device Specific Data) that will hopefully help us to operate device properties like Device Trees do (at least to some extent) and changes related to supporting ACPI on ARM. Apart from that we have hibernation changes making it use radix trees to store memory bitmaps which should speed up some operations carried out by it quite significantly. We also have some power management changes related to suspend-to-idle (the "freeze" sleep state) support and more preliminary changes needed to support ACPI on ARM (outside of ACPICA). The rest is fixes and cleanups pretty much everywhere. Specifics: - ACPICA update to upstream version 20140724. That includes ACPI 5.1 material (support for the _CCA and _DSD predefined names, changes related to the DMAR and PCCT tables and ARM support among other things) and cleanups related to using ACPICA's header files. A major part of it is related to acpidump and the core code used by that utility. Changes from Bob Moore, David E Box, Lv Zheng, Sascha Wildner, Tomasz Nowicki, Hanjun Guo. - Radix trees for memory bitmaps used by the hibernation core from Joerg Roedel. - Support for waking up the system from suspend-to-idle (also known as the "freeze" sleep state) using ACPI-based PCI wakeup signaling (Rafael J Wysocki). - Fixes for issues related to ACPI button events (Rafael J Wysocki). - New device ID for an ACPI-enumerated device included into the Wildcat Point PCH from Jie Yang. - ACPI video updates related to backlight handling from Hans de Goede and Linus Torvalds. - Preliminary changes needed to support ACPI on ARM from Hanjun Guo and Graeme Gregory. - ACPI PNP core cleanups from Arjun Sreedharan and Zhang Rui. - Cleanups related to ACPI_COMPANION() and ACPI_HANDLE() macros (Rafael J Wysocki). - ACPI-based device hotplug cleanups from Wei Yongjun and Rafael J Wysocki. - Cleanups and improvements related to system suspend from Lan Tianyu, Randy Dunlap and Rafael J Wysocki. - ACPI battery cleanup from Wei Yongjun. - cpufreq core fixes from Viresh Kumar. - Elimination of a deadband effect from the cpufreq ondemand governor and intel_pstate driver cleanups from Stratos Karafotis. - 350MHz CPU support for the powernow-k6 cpufreq driver from Mikulas Patocka. - Fix for the imx6 cpufreq driver from Anson Huang. - cpuidle core and governor cleanups from Daniel Lezcano, Sandeep Tripathy and Mohammad Merajul Islam Molla. - Build fix for the big_little cpuidle driver from Sachin Kamat. - Configuration fix for the Operation Performance Points (OPP) framework from Mark Brown. - APM cleanup from Jean Delvare. - cpupower utility fixes and cleanups from Peter Senna Tschudin, Andrey Utkin, Himangi Saraogi, Rickard Strandqvist, Thomas Renninger" * tag 'pm+acpi-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (118 commits) ACPI / LPSS: add LPSS device for Wildcat Point PCH ACPI / PNP: Replace faulty is_hex_digit() by isxdigit() ACPICA: Update version to 20140724. ACPICA: ACPI 5.1: Update for PCCT table changes. ACPICA/ARM: ACPI 5.1: Update for GTDT table changes. ACPICA/ARM: ACPI 5.1: Update for MADT changes. ACPICA/ARM: ACPI 5.1: Update for FADT changes. ACPICA: ACPI 5.1: Support for the _CCA predifined name. ACPICA: ACPI 5.1: New notify value for System Affinity Update. ACPICA: ACPI 5.1: Support for the _DSD predefined name. ACPICA: Debug object: Add current value of Timer() to debug line prefix. ACPICA: acpihelp: Add UUID support, restructure some existing files. ACPICA: Utilities: Fix local printf issue. ACPICA: Tables: Update for DMAR table changes. ACPICA: Remove some extraneous printf arguments. ACPICA: Update for comments/formatting. No functional changes. ACPICA: Disassembler: Add support for the ToUUID opererator (macro). ACPICA: Remove a redundant cast to acpi_size for ACPI_OFFSET() macro. ACPICA: Work around an ancient GCC bug. ACPI / processor: Make it possible to get local x2apic id via _MAT ...
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/Kconfig3
-rw-r--r--kernel/power/main.c25
-rw-r--r--kernel/power/power.h7
-rw-r--r--kernel/power/snapshot.c494
-rw-r--r--kernel/power/suspend.c152
-rw-r--r--kernel/power/suspend_test.c12
6 files changed, 475 insertions, 218 deletions
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9a83d780facd..e4e4121fa327 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -253,9 +253,6 @@ config APM_EMULATION
253 anything, try disabling/enabling this option (or disabling/enabling 253 anything, try disabling/enabling this option (or disabling/enabling
254 APM in your BIOS). 254 APM in your BIOS).
255 255
256config ARCH_HAS_OPP
257 bool
258
259config PM_OPP 256config PM_OPP
260 bool 257 bool
261 ---help--- 258 ---help---
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 8e90f330f139..9a59d042ea84 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -296,8 +296,8 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
296 suspend_state_t i; 296 suspend_state_t i;
297 297
298 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) 298 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
299 if (pm_states[i].state) 299 if (pm_states[i])
300 s += sprintf(s,"%s ", pm_states[i].label); 300 s += sprintf(s,"%s ", pm_states[i]);
301 301
302#endif 302#endif
303 if (hibernation_available()) 303 if (hibernation_available())
@@ -311,8 +311,7 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
311static suspend_state_t decode_state(const char *buf, size_t n) 311static suspend_state_t decode_state(const char *buf, size_t n)
312{ 312{
313#ifdef CONFIG_SUSPEND 313#ifdef CONFIG_SUSPEND
314 suspend_state_t state = PM_SUSPEND_MIN; 314 suspend_state_t state;
315 struct pm_sleep_state *s;
316#endif 315#endif
317 char *p; 316 char *p;
318 int len; 317 int len;
@@ -325,10 +324,12 @@ static suspend_state_t decode_state(const char *buf, size_t n)
325 return PM_SUSPEND_MAX; 324 return PM_SUSPEND_MAX;
326 325
327#ifdef CONFIG_SUSPEND 326#ifdef CONFIG_SUSPEND
328 for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++) 327 for (state = PM_SUSPEND_MIN; state < PM_SUSPEND_MAX; state++) {
329 if (s->state && len == strlen(s->label) 328 const char *label = pm_states[state];
330 && !strncmp(buf, s->label, len)) 329
331 return s->state; 330 if (label && len == strlen(label) && !strncmp(buf, label, len))
331 return state;
332 }
332#endif 333#endif
333 334
334 return PM_SUSPEND_ON; 335 return PM_SUSPEND_ON;
@@ -446,8 +447,8 @@ static ssize_t autosleep_show(struct kobject *kobj,
446 447
447#ifdef CONFIG_SUSPEND 448#ifdef CONFIG_SUSPEND
448 if (state < PM_SUSPEND_MAX) 449 if (state < PM_SUSPEND_MAX)
449 return sprintf(buf, "%s\n", pm_states[state].state ? 450 return sprintf(buf, "%s\n", pm_states[state] ?
450 pm_states[state].label : "error"); 451 pm_states[state] : "error");
451#endif 452#endif
452#ifdef CONFIG_HIBERNATION 453#ifdef CONFIG_HIBERNATION
453 return sprintf(buf, "disk\n"); 454 return sprintf(buf, "disk\n");
@@ -615,7 +616,6 @@ static struct attribute_group attr_group = {
615 .attrs = g, 616 .attrs = g,
616}; 617};
617 618
618#ifdef CONFIG_PM_RUNTIME
619struct workqueue_struct *pm_wq; 619struct workqueue_struct *pm_wq;
620EXPORT_SYMBOL_GPL(pm_wq); 620EXPORT_SYMBOL_GPL(pm_wq);
621 621
@@ -625,9 +625,6 @@ static int __init pm_start_workqueue(void)
625 625
626 return pm_wq ? 0 : -ENOMEM; 626 return pm_wq ? 0 : -ENOMEM;
627} 627}
628#else
629static inline int pm_start_workqueue(void) { return 0; }
630#endif
631 628
632static int __init pm_init(void) 629static int __init pm_init(void)
633{ 630{
diff --git a/kernel/power/power.h b/kernel/power/power.h
index c60f13b5270a..5d49dcac2537 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -178,13 +178,8 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *,
178 unsigned int, char *); 178 unsigned int, char *);
179 179
180#ifdef CONFIG_SUSPEND 180#ifdef CONFIG_SUSPEND
181struct pm_sleep_state {
182 const char *label;
183 suspend_state_t state;
184};
185
186/* kernel/power/suspend.c */ 181/* kernel/power/suspend.c */
187extern struct pm_sleep_state pm_states[]; 182extern const char *pm_states[];
188 183
189extern int suspend_devices_and_enter(suspend_state_t state); 184extern int suspend_devices_and_enter(suspend_state_t state);
190#else /* !CONFIG_SUSPEND */ 185#else /* !CONFIG_SUSPEND */
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 1ea328aafdc9..4fc5c32422b3 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -248,33 +248,61 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
248 * information is stored (in the form of a block of bitmap) 248 * information is stored (in the form of a block of bitmap)
249 * It also contains the pfns that correspond to the start and end of 249 * It also contains the pfns that correspond to the start and end of
250 * the represented memory area. 250 * the represented memory area.
251 *
252 * The memory bitmap is organized as a radix tree to guarantee fast random
253 * access to the bits. There is one radix tree for each zone (as returned
254 * from create_mem_extents).
255 *
256 * One radix tree is represented by one struct mem_zone_bm_rtree. There are
257 * two linked lists for the nodes of the tree, one for the inner nodes and
258 * one for the leave nodes. The linked leave nodes are used for fast linear
259 * access of the memory bitmap.
260 *
261 * The struct rtree_node represents one node of the radix tree.
251 */ 262 */
252 263
253#define BM_END_OF_MAP (~0UL) 264#define BM_END_OF_MAP (~0UL)
254 265
255#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE) 266#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
267#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
268#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
256 269
257struct bm_block { 270/*
258 struct list_head hook; /* hook into a list of bitmap blocks */ 271 * struct rtree_node is a wrapper struct to link the nodes
259 unsigned long start_pfn; /* pfn represented by the first bit */ 272 * of the rtree together for easy linear iteration over
260 unsigned long end_pfn; /* pfn represented by the last bit plus 1 */ 273 * bits and easy freeing
261 unsigned long *data; /* bitmap representing pages */ 274 */
275struct rtree_node {
276 struct list_head list;
277 unsigned long *data;
262}; 278};
263 279
264static inline unsigned long bm_block_bits(struct bm_block *bb) 280/*
265{ 281 * struct mem_zone_bm_rtree represents a bitmap used for one
266 return bb->end_pfn - bb->start_pfn; 282 * populated memory zone.
267} 283 */
284struct mem_zone_bm_rtree {
285 struct list_head list; /* Link Zones together */
286 struct list_head nodes; /* Radix Tree inner nodes */
287 struct list_head leaves; /* Radix Tree leaves */
288 unsigned long start_pfn; /* Zone start page frame */
289 unsigned long end_pfn; /* Zone end page frame + 1 */
290 struct rtree_node *rtree; /* Radix Tree Root */
291 int levels; /* Number of Radix Tree Levels */
292 unsigned int blocks; /* Number of Bitmap Blocks */
293};
268 294
269/* strcut bm_position is used for browsing memory bitmaps */ 295/* strcut bm_position is used for browsing memory bitmaps */
270 296
271struct bm_position { 297struct bm_position {
272 struct bm_block *block; 298 struct mem_zone_bm_rtree *zone;
273 int bit; 299 struct rtree_node *node;
300 unsigned long node_pfn;
301 int node_bit;
274}; 302};
275 303
276struct memory_bitmap { 304struct memory_bitmap {
277 struct list_head blocks; /* list of bitmap blocks */ 305 struct list_head zones;
278 struct linked_page *p_list; /* list of pages used to store zone 306 struct linked_page *p_list; /* list of pages used to store zone
279 * bitmap objects and bitmap block 307 * bitmap objects and bitmap block
280 * objects 308 * objects
@@ -284,38 +312,178 @@ struct memory_bitmap {
284 312
285/* Functions that operate on memory bitmaps */ 313/* Functions that operate on memory bitmaps */
286 314
287static void memory_bm_position_reset(struct memory_bitmap *bm) 315#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
316#if BITS_PER_LONG == 32
317#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
318#else
319#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
320#endif
321#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
322
323/*
324 * alloc_rtree_node - Allocate a new node and add it to the radix tree.
325 *
326 * This function is used to allocate inner nodes as well as the
327 * leave nodes of the radix tree. It also adds the node to the
328 * corresponding linked list passed in by the *list parameter.
329 */
330static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
331 struct chain_allocator *ca,
332 struct list_head *list)
288{ 333{
289 bm->cur.block = list_entry(bm->blocks.next, struct bm_block, hook); 334 struct rtree_node *node;
290 bm->cur.bit = 0;
291}
292 335
293static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free); 336 node = chain_alloc(ca, sizeof(struct rtree_node));
337 if (!node)
338 return NULL;
294 339
295/** 340 node->data = get_image_page(gfp_mask, safe_needed);
296 * create_bm_block_list - create a list of block bitmap objects 341 if (!node->data)
297 * @pages - number of pages to track 342 return NULL;
298 * @list - list to put the allocated blocks into 343
299 * @ca - chain allocator to be used for allocating memory 344 list_add_tail(&node->list, list);
345
346 return node;
347}
348
349/*
350 * add_rtree_block - Add a new leave node to the radix tree
351 *
352 * The leave nodes need to be allocated in order to keep the leaves
353 * linked list in order. This is guaranteed by the zone->blocks
354 * counter.
300 */ 355 */
301static int create_bm_block_list(unsigned long pages, 356static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
302 struct list_head *list, 357 int safe_needed, struct chain_allocator *ca)
303 struct chain_allocator *ca)
304{ 358{
305 unsigned int nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); 359 struct rtree_node *node, *block, **dst;
360 unsigned int levels_needed, block_nr;
361 int i;
306 362
307 while (nr_blocks-- > 0) { 363 block_nr = zone->blocks;
308 struct bm_block *bb; 364 levels_needed = 0;
309 365
310 bb = chain_alloc(ca, sizeof(struct bm_block)); 366 /* How many levels do we need for this block nr? */
311 if (!bb) 367 while (block_nr) {
368 levels_needed += 1;
369 block_nr >>= BM_RTREE_LEVEL_SHIFT;
370 }
371
372 /* Make sure the rtree has enough levels */
373 for (i = zone->levels; i < levels_needed; i++) {
374 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
375 &zone->nodes);
376 if (!node)
312 return -ENOMEM; 377 return -ENOMEM;
313 list_add(&bb->hook, list); 378
379 node->data[0] = (unsigned long)zone->rtree;
380 zone->rtree = node;
381 zone->levels += 1;
382 }
383
384 /* Allocate new block */
385 block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
386 if (!block)
387 return -ENOMEM;
388
389 /* Now walk the rtree to insert the block */
390 node = zone->rtree;
391 dst = &zone->rtree;
392 block_nr = zone->blocks;
393 for (i = zone->levels; i > 0; i--) {
394 int index;
395
396 if (!node) {
397 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
398 &zone->nodes);
399 if (!node)
400 return -ENOMEM;
401 *dst = node;
402 }
403
404 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
405 index &= BM_RTREE_LEVEL_MASK;
406 dst = (struct rtree_node **)&((*dst)->data[index]);
407 node = *dst;
314 } 408 }
315 409
410 zone->blocks += 1;
411 *dst = block;
412
316 return 0; 413 return 0;
317} 414}
318 415
416static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
417 int clear_nosave_free);
418
419/*
420 * create_zone_bm_rtree - create a radix tree for one zone
421 *
422 * Allocated the mem_zone_bm_rtree structure and initializes it.
423 * This function also allocated and builds the radix tree for the
424 * zone.
425 */
426static struct mem_zone_bm_rtree *
427create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
428 struct chain_allocator *ca,
429 unsigned long start, unsigned long end)
430{
431 struct mem_zone_bm_rtree *zone;
432 unsigned int i, nr_blocks;
433 unsigned long pages;
434
435 pages = end - start;
436 zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
437 if (!zone)
438 return NULL;
439
440 INIT_LIST_HEAD(&zone->nodes);
441 INIT_LIST_HEAD(&zone->leaves);
442 zone->start_pfn = start;
443 zone->end_pfn = end;
444 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
445
446 for (i = 0; i < nr_blocks; i++) {
447 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
448 free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
449 return NULL;
450 }
451 }
452
453 return zone;
454}
455
456/*
457 * free_zone_bm_rtree - Free the memory of the radix tree
458 *
459 * Free all node pages of the radix tree. The mem_zone_bm_rtree
460 * structure itself is not freed here nor are the rtree_node
461 * structs.
462 */
463static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
464 int clear_nosave_free)
465{
466 struct rtree_node *node;
467
468 list_for_each_entry(node, &zone->nodes, list)
469 free_image_page(node->data, clear_nosave_free);
470
471 list_for_each_entry(node, &zone->leaves, list)
472 free_image_page(node->data, clear_nosave_free);
473}
474
475static void memory_bm_position_reset(struct memory_bitmap *bm)
476{
477 bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
478 list);
479 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
480 struct rtree_node, list);
481 bm->cur.node_pfn = 0;
482 bm->cur.node_bit = 0;
483}
484
485static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
486
319struct mem_extent { 487struct mem_extent {
320 struct list_head hook; 488 struct list_head hook;
321 unsigned long start; 489 unsigned long start;
@@ -407,40 +575,22 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
407 int error; 575 int error;
408 576
409 chain_init(&ca, gfp_mask, safe_needed); 577 chain_init(&ca, gfp_mask, safe_needed);
410 INIT_LIST_HEAD(&bm->blocks); 578 INIT_LIST_HEAD(&bm->zones);
411 579
412 error = create_mem_extents(&mem_extents, gfp_mask); 580 error = create_mem_extents(&mem_extents, gfp_mask);
413 if (error) 581 if (error)
414 return error; 582 return error;
415 583
416 list_for_each_entry(ext, &mem_extents, hook) { 584 list_for_each_entry(ext, &mem_extents, hook) {
417 struct bm_block *bb; 585 struct mem_zone_bm_rtree *zone;
418 unsigned long pfn = ext->start;
419 unsigned long pages = ext->end - ext->start;
420
421 bb = list_entry(bm->blocks.prev, struct bm_block, hook);
422 586
423 error = create_bm_block_list(pages, bm->blocks.prev, &ca); 587 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
424 if (error) 588 ext->start, ext->end);
589 if (!zone) {
590 error = -ENOMEM;
425 goto Error; 591 goto Error;
426
427 list_for_each_entry_continue(bb, &bm->blocks, hook) {
428 bb->data = get_image_page(gfp_mask, safe_needed);
429 if (!bb->data) {
430 error = -ENOMEM;
431 goto Error;
432 }
433
434 bb->start_pfn = pfn;
435 if (pages >= BM_BITS_PER_BLOCK) {
436 pfn += BM_BITS_PER_BLOCK;
437 pages -= BM_BITS_PER_BLOCK;
438 } else {
439 /* This is executed only once in the loop */
440 pfn += pages;
441 }
442 bb->end_pfn = pfn;
443 } 592 }
593 list_add_tail(&zone->list, &bm->zones);
444 } 594 }
445 595
446 bm->p_list = ca.chain; 596 bm->p_list = ca.chain;
@@ -460,51 +610,83 @@ memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
460 */ 610 */
461static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free) 611static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
462{ 612{
463 struct bm_block *bb; 613 struct mem_zone_bm_rtree *zone;
464 614
465 list_for_each_entry(bb, &bm->blocks, hook) 615 list_for_each_entry(zone, &bm->zones, list)
466 if (bb->data) 616 free_zone_bm_rtree(zone, clear_nosave_free);
467 free_image_page(bb->data, clear_nosave_free);
468 617
469 free_list_of_pages(bm->p_list, clear_nosave_free); 618 free_list_of_pages(bm->p_list, clear_nosave_free);
470 619
471 INIT_LIST_HEAD(&bm->blocks); 620 INIT_LIST_HEAD(&bm->zones);
472} 621}
473 622
474/** 623/**
475 * memory_bm_find_bit - find the bit in the bitmap @bm that corresponds 624 * memory_bm_find_bit - Find the bit for pfn in the memory
476 * to given pfn. The cur_zone_bm member of @bm and the cur_block member 625 * bitmap
477 * of @bm->cur_zone_bm are updated. 626 *
627 * Find the bit in the bitmap @bm that corresponds to given pfn.
628 * The cur.zone, cur.block and cur.node_pfn member of @bm are
629 * updated.
630 * It walks the radix tree to find the page which contains the bit for
631 * pfn and returns the bit position in **addr and *bit_nr.
478 */ 632 */
479static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, 633static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
480 void **addr, unsigned int *bit_nr) 634 void **addr, unsigned int *bit_nr)
481{ 635{
482 struct bm_block *bb; 636 struct mem_zone_bm_rtree *curr, *zone;
637 struct rtree_node *node;
638 int i, block_nr;
483 639
640 zone = bm->cur.zone;
641
642 if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
643 goto zone_found;
644
645 zone = NULL;
646
647 /* Find the right zone */
648 list_for_each_entry(curr, &bm->zones, list) {
649 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
650 zone = curr;
651 break;
652 }
653 }
654
655 if (!zone)
656 return -EFAULT;
657
658zone_found:
484 /* 659 /*
485 * Check if the pfn corresponds to the current bitmap block and find 660 * We have a zone. Now walk the radix tree to find the leave
486 * the block where it fits if this is not the case. 661 * node for our pfn.
487 */ 662 */
488 bb = bm->cur.block;
489 if (pfn < bb->start_pfn)
490 list_for_each_entry_continue_reverse(bb, &bm->blocks, hook)
491 if (pfn >= bb->start_pfn)
492 break;
493 663
494 if (pfn >= bb->end_pfn) 664 node = bm->cur.node;
495 list_for_each_entry_continue(bb, &bm->blocks, hook) 665 if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
496 if (pfn >= bb->start_pfn && pfn < bb->end_pfn) 666 goto node_found;
497 break;
498 667
499 if (&bb->hook == &bm->blocks) 668 node = zone->rtree;
500 return -EFAULT; 669 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
670
671 for (i = zone->levels; i > 0; i--) {
672 int index;
673
674 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
675 index &= BM_RTREE_LEVEL_MASK;
676 BUG_ON(node->data[index] == 0);
677 node = (struct rtree_node *)node->data[index];
678 }
679
680node_found:
681 /* Update last position */
682 bm->cur.zone = zone;
683 bm->cur.node = node;
684 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
685
686 /* Set return values */
687 *addr = node->data;
688 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
501 689
502 /* The block has been found */
503 bm->cur.block = bb;
504 pfn -= bb->start_pfn;
505 bm->cur.bit = pfn + 1;
506 *bit_nr = pfn;
507 *addr = bb->data;
508 return 0; 690 return 0;
509} 691}
510 692
@@ -528,6 +710,7 @@ static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
528 error = memory_bm_find_bit(bm, pfn, &addr, &bit); 710 error = memory_bm_find_bit(bm, pfn, &addr, &bit);
529 if (!error) 711 if (!error)
530 set_bit(bit, addr); 712 set_bit(bit, addr);
713
531 return error; 714 return error;
532} 715}
533 716
@@ -542,6 +725,14 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
542 clear_bit(bit, addr); 725 clear_bit(bit, addr);
543} 726}
544 727
728static void memory_bm_clear_current(struct memory_bitmap *bm)
729{
730 int bit;
731
732 bit = max(bm->cur.node_bit - 1, 0);
733 clear_bit(bit, bm->cur.node->data);
734}
735
545static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 736static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
546{ 737{
547 void *addr; 738 void *addr;
@@ -561,38 +752,70 @@ static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
561 return !memory_bm_find_bit(bm, pfn, &addr, &bit); 752 return !memory_bm_find_bit(bm, pfn, &addr, &bit);
562} 753}
563 754
564/** 755/*
565 * memory_bm_next_pfn - find the pfn that corresponds to the next set bit 756 * rtree_next_node - Jumps to the next leave node
566 * in the bitmap @bm. If the pfn cannot be found, BM_END_OF_MAP is 757 *
567 * returned. 758 * Sets the position to the beginning of the next node in the
759 * memory bitmap. This is either the next node in the current
760 * zone's radix tree or the first node in the radix tree of the
761 * next zone.
568 * 762 *
569 * It is required to run memory_bm_position_reset() before the first call to 763 * Returns true if there is a next node, false otherwise.
570 * this function.
571 */ 764 */
765static bool rtree_next_node(struct memory_bitmap *bm)
766{
767 bm->cur.node = list_entry(bm->cur.node->list.next,
768 struct rtree_node, list);
769 if (&bm->cur.node->list != &bm->cur.zone->leaves) {
770 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
771 bm->cur.node_bit = 0;
772 touch_softlockup_watchdog();
773 return true;
774 }
775
776 /* No more nodes, goto next zone */
777 bm->cur.zone = list_entry(bm->cur.zone->list.next,
778 struct mem_zone_bm_rtree, list);
779 if (&bm->cur.zone->list != &bm->zones) {
780 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
781 struct rtree_node, list);
782 bm->cur.node_pfn = 0;
783 bm->cur.node_bit = 0;
784 return true;
785 }
572 786
787 /* No more zones */
788 return false;
789}
790
791/**
792 * memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
793 *
794 * Starting from the last returned position this function searches
795 * for the next set bit in the memory bitmap and returns its
796 * number. If no more bit is set BM_END_OF_MAP is returned.
797 *
798 * It is required to run memory_bm_position_reset() before the
799 * first call to this function.
800 */
573static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm) 801static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
574{ 802{
575 struct bm_block *bb; 803 unsigned long bits, pfn, pages;
576 int bit; 804 int bit;
577 805
578 bb = bm->cur.block;
579 do { 806 do {
580 bit = bm->cur.bit; 807 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
581 bit = find_next_bit(bb->data, bm_block_bits(bb), bit); 808 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
582 if (bit < bm_block_bits(bb)) 809 bit = find_next_bit(bm->cur.node->data, bits,
583 goto Return_pfn; 810 bm->cur.node_bit);
584 811 if (bit < bits) {
585 bb = list_entry(bb->hook.next, struct bm_block, hook); 812 pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
586 bm->cur.block = bb; 813 bm->cur.node_bit = bit + 1;
587 bm->cur.bit = 0; 814 return pfn;
588 } while (&bb->hook != &bm->blocks); 815 }
816 } while (rtree_next_node(bm));
589 817
590 memory_bm_position_reset(bm);
591 return BM_END_OF_MAP; 818 return BM_END_OF_MAP;
592
593 Return_pfn:
594 bm->cur.bit = bit + 1;
595 return bb->start_pfn + bit;
596} 819}
597 820
598/** 821/**
@@ -816,12 +1039,17 @@ void free_basic_memory_bitmaps(void)
816 1039
817unsigned int snapshot_additional_pages(struct zone *zone) 1040unsigned int snapshot_additional_pages(struct zone *zone)
818{ 1041{
819 unsigned int res; 1042 unsigned int rtree, nodes;
1043
1044 rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1045 rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1046 LINKED_PAGE_DATA_SIZE);
1047 while (nodes > 1) {
1048 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1049 rtree += nodes;
1050 }
820 1051
821 res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK); 1052 return 2 * rtree;
822 res += DIV_ROUND_UP(res * sizeof(struct bm_block),
823 LINKED_PAGE_DATA_SIZE);
824 return 2 * res;
825} 1053}
826 1054
827#ifdef CONFIG_HIGHMEM 1055#ifdef CONFIG_HIGHMEM
@@ -1094,23 +1322,35 @@ static struct memory_bitmap copy_bm;
1094 1322
1095void swsusp_free(void) 1323void swsusp_free(void)
1096{ 1324{
1097 struct zone *zone; 1325 unsigned long fb_pfn, fr_pfn;
1098 unsigned long pfn, max_zone_pfn;
1099 1326
1100 for_each_populated_zone(zone) { 1327 memory_bm_position_reset(forbidden_pages_map);
1101 max_zone_pfn = zone_end_pfn(zone); 1328 memory_bm_position_reset(free_pages_map);
1102 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1329
1103 if (pfn_valid(pfn)) { 1330loop:
1104 struct page *page = pfn_to_page(pfn); 1331 fr_pfn = memory_bm_next_pfn(free_pages_map);
1105 1332 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1106 if (swsusp_page_is_forbidden(page) && 1333
1107 swsusp_page_is_free(page)) { 1334 /*
1108 swsusp_unset_page_forbidden(page); 1335 * Find the next bit set in both bitmaps. This is guaranteed to
1109 swsusp_unset_page_free(page); 1336 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1110 __free_page(page); 1337 */
1111 } 1338 do {
1112 } 1339 if (fb_pfn < fr_pfn)
1340 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1341 if (fr_pfn < fb_pfn)
1342 fr_pfn = memory_bm_next_pfn(free_pages_map);
1343 } while (fb_pfn != fr_pfn);
1344
1345 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1346 struct page *page = pfn_to_page(fr_pfn);
1347
1348 memory_bm_clear_current(forbidden_pages_map);
1349 memory_bm_clear_current(free_pages_map);
1350 __free_page(page);
1351 goto loop;
1113 } 1352 }
1353
1114 nr_copy_pages = 0; 1354 nr_copy_pages = 0;
1115 nr_meta_pages = 0; 1355 nr_meta_pages = 0;
1116 restore_pblist = NULL; 1356 restore_pblist = NULL;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4b736b4dfa96..6dadb25cb0d8 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -31,20 +31,11 @@
31 31
32#include "power.h" 32#include "power.h"
33 33
34struct pm_sleep_state pm_states[PM_SUSPEND_MAX] = { 34static const char *pm_labels[] = { "mem", "standby", "freeze", };
35 [PM_SUSPEND_FREEZE] = { .label = "freeze", .state = PM_SUSPEND_FREEZE }, 35const char *pm_states[PM_SUSPEND_MAX];
36 [PM_SUSPEND_STANDBY] = { .label = "standby", },
37 [PM_SUSPEND_MEM] = { .label = "mem", },
38};
39 36
40static const struct platform_suspend_ops *suspend_ops; 37static const struct platform_suspend_ops *suspend_ops;
41static const struct platform_freeze_ops *freeze_ops; 38static const struct platform_freeze_ops *freeze_ops;
42
43static bool need_suspend_ops(suspend_state_t state)
44{
45 return state > PM_SUSPEND_FREEZE;
46}
47
48static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); 39static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
49static bool suspend_freeze_wake; 40static bool suspend_freeze_wake;
50 41
@@ -97,10 +88,7 @@ static bool relative_states;
97static int __init sleep_states_setup(char *str) 88static int __init sleep_states_setup(char *str)
98{ 89{
99 relative_states = !strncmp(str, "1", 1); 90 relative_states = !strncmp(str, "1", 1);
100 if (relative_states) { 91 pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2];
101 pm_states[PM_SUSPEND_MEM].state = PM_SUSPEND_FREEZE;
102 pm_states[PM_SUSPEND_FREEZE].state = 0;
103 }
104 return 1; 92 return 1;
105} 93}
106 94
@@ -113,20 +101,20 @@ __setup("relative_sleep_states=", sleep_states_setup);
113void suspend_set_ops(const struct platform_suspend_ops *ops) 101void suspend_set_ops(const struct platform_suspend_ops *ops)
114{ 102{
115 suspend_state_t i; 103 suspend_state_t i;
116 int j = PM_SUSPEND_MAX - 1; 104 int j = 0;
117 105
118 lock_system_sleep(); 106 lock_system_sleep();
119 107
120 suspend_ops = ops; 108 suspend_ops = ops;
121 for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--) 109 for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
122 if (valid_state(i)) 110 if (valid_state(i)) {
123 pm_states[j--].state = i; 111 pm_states[i] = pm_labels[j++];
124 else if (!relative_states) 112 } else if (!relative_states) {
125 pm_states[j--].state = 0; 113 pm_states[i] = NULL;
114 j++;
115 }
126 116
127 pm_states[j--].state = PM_SUSPEND_FREEZE; 117 pm_states[PM_SUSPEND_FREEZE] = pm_labels[j];
128 while (j >= PM_SUSPEND_MIN)
129 pm_states[j--].state = 0;
130 118
131 unlock_system_sleep(); 119 unlock_system_sleep();
132} 120}
@@ -145,6 +133,65 @@ int suspend_valid_only_mem(suspend_state_t state)
145} 133}
146EXPORT_SYMBOL_GPL(suspend_valid_only_mem); 134EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
147 135
136static bool sleep_state_supported(suspend_state_t state)
137{
138 return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter);
139}
140
141static int platform_suspend_prepare(suspend_state_t state)
142{
143 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ?
144 suspend_ops->prepare() : 0;
145}
146
147static int platform_suspend_prepare_late(suspend_state_t state)
148{
149 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
150 suspend_ops->prepare_late() : 0;
151}
152
153static void platform_suspend_wake(suspend_state_t state)
154{
155 if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
156 suspend_ops->wake();
157}
158
159static void platform_suspend_finish(suspend_state_t state)
160{
161 if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
162 suspend_ops->finish();
163}
164
165static int platform_suspend_begin(suspend_state_t state)
166{
167 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin)
168 return freeze_ops->begin();
169 else if (suspend_ops->begin)
170 return suspend_ops->begin(state);
171 else
172 return 0;
173}
174
175static void platform_suspend_end(suspend_state_t state)
176{
177 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
178 freeze_ops->end();
179 else if (suspend_ops->end)
180 suspend_ops->end();
181}
182
183static void platform_suspend_recover(suspend_state_t state)
184{
185 if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
186 suspend_ops->recover();
187}
188
189static bool platform_suspend_again(suspend_state_t state)
190{
191 return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ?
192 suspend_ops->suspend_again() : false;
193}
194
148static int suspend_test(int level) 195static int suspend_test(int level)
149{ 196{
150#ifdef CONFIG_PM_DEBUG 197#ifdef CONFIG_PM_DEBUG
@@ -168,7 +215,7 @@ static int suspend_prepare(suspend_state_t state)
168{ 215{
169 int error; 216 int error;
170 217
171 if (need_suspend_ops(state) && (!suspend_ops || !suspend_ops->enter)) 218 if (!sleep_state_supported(state))
172 return -EPERM; 219 return -EPERM;
173 220
174 pm_prepare_console(); 221 pm_prepare_console();
@@ -214,23 +261,18 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
214{ 261{
215 int error; 262 int error;
216 263
217 if (need_suspend_ops(state) && suspend_ops->prepare) { 264 error = platform_suspend_prepare(state);
218 error = suspend_ops->prepare(); 265 if (error)
219 if (error) 266 goto Platform_finish;
220 goto Platform_finish;
221 }
222 267
223 error = dpm_suspend_end(PMSG_SUSPEND); 268 error = dpm_suspend_end(PMSG_SUSPEND);
224 if (error) { 269 if (error) {
225 printk(KERN_ERR "PM: Some devices failed to power down\n"); 270 printk(KERN_ERR "PM: Some devices failed to power down\n");
226 goto Platform_finish; 271 goto Platform_finish;
227 } 272 }
228 273 error = platform_suspend_prepare_late(state);
229 if (need_suspend_ops(state) && suspend_ops->prepare_late) { 274 if (error)
230 error = suspend_ops->prepare_late(); 275 goto Platform_wake;
231 if (error)
232 goto Platform_wake;
233 }
234 276
235 if (suspend_test(TEST_PLATFORM)) 277 if (suspend_test(TEST_PLATFORM))
236 goto Platform_wake; 278 goto Platform_wake;
@@ -276,15 +318,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
276 enable_nonboot_cpus(); 318 enable_nonboot_cpus();
277 319
278 Platform_wake: 320 Platform_wake:
279 if (need_suspend_ops(state) && suspend_ops->wake) 321 platform_suspend_wake(state);
280 suspend_ops->wake();
281
282 dpm_resume_start(PMSG_RESUME); 322 dpm_resume_start(PMSG_RESUME);
283 323
284 Platform_finish: 324 Platform_finish:
285 if (need_suspend_ops(state) && suspend_ops->finish) 325 platform_suspend_finish(state);
286 suspend_ops->finish();
287
288 return error; 326 return error;
289} 327}
290 328
@@ -297,18 +335,13 @@ int suspend_devices_and_enter(suspend_state_t state)
297 int error; 335 int error;
298 bool wakeup = false; 336 bool wakeup = false;
299 337
300 if (need_suspend_ops(state) && !suspend_ops) 338 if (!sleep_state_supported(state))
301 return -ENOSYS; 339 return -ENOSYS;
302 340
303 if (need_suspend_ops(state) && suspend_ops->begin) { 341 error = platform_suspend_begin(state);
304 error = suspend_ops->begin(state); 342 if (error)
305 if (error) 343 goto Close;
306 goto Close; 344
307 } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
308 error = freeze_ops->begin();
309 if (error)
310 goto Close;
311 }
312 suspend_console(); 345 suspend_console();
313 suspend_test_start(); 346 suspend_test_start();
314 error = dpm_suspend_start(PMSG_SUSPEND); 347 error = dpm_suspend_start(PMSG_SUSPEND);
@@ -322,25 +355,20 @@ int suspend_devices_and_enter(suspend_state_t state)
322 355
323 do { 356 do {
324 error = suspend_enter(state, &wakeup); 357 error = suspend_enter(state, &wakeup);
325 } while (!error && !wakeup && need_suspend_ops(state) 358 } while (!error && !wakeup && platform_suspend_again(state));
326 && suspend_ops->suspend_again && suspend_ops->suspend_again());
327 359
328 Resume_devices: 360 Resume_devices:
329 suspend_test_start(); 361 suspend_test_start();
330 dpm_resume_end(PMSG_RESUME); 362 dpm_resume_end(PMSG_RESUME);
331 suspend_test_finish("resume devices"); 363 suspend_test_finish("resume devices");
332 resume_console(); 364 resume_console();
333 Close:
334 if (need_suspend_ops(state) && suspend_ops->end)
335 suspend_ops->end();
336 else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
337 freeze_ops->end();
338 365
366 Close:
367 platform_suspend_end(state);
339 return error; 368 return error;
340 369
341 Recover_platform: 370 Recover_platform:
342 if (need_suspend_ops(state) && suspend_ops->recover) 371 platform_suspend_recover(state);
343 suspend_ops->recover();
344 goto Resume_devices; 372 goto Resume_devices;
345} 373}
346 374
@@ -393,7 +421,7 @@ static int enter_state(suspend_state_t state)
393 printk("done.\n"); 421 printk("done.\n");
394 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 422 trace_suspend_resume(TPS("sync_filesystems"), 0, false);
395 423
396 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state].label); 424 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
397 error = suspend_prepare(state); 425 error = suspend_prepare(state);
398 if (error) 426 if (error)
399 goto Unlock; 427 goto Unlock;
@@ -402,7 +430,7 @@ static int enter_state(suspend_state_t state)
402 goto Finish; 430 goto Finish;
403 431
404 trace_suspend_resume(TPS("suspend_enter"), state, false); 432 trace_suspend_resume(TPS("suspend_enter"), state, false);
405 pr_debug("PM: Entering %s sleep\n", pm_states[state].label); 433 pr_debug("PM: Entering %s sleep\n", pm_states[state]);
406 pm_restrict_gfp_mask(); 434 pm_restrict_gfp_mask();
407 error = suspend_devices_and_enter(state); 435 error = suspend_devices_and_enter(state);
408 pm_restore_gfp_mask(); 436 pm_restore_gfp_mask();
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index 269b097e78ea..2f524928b6aa 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -92,13 +92,13 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
92 } 92 }
93 93
94 if (state == PM_SUSPEND_MEM) { 94 if (state == PM_SUSPEND_MEM) {
95 printk(info_test, pm_states[state].label); 95 printk(info_test, pm_states[state]);
96 status = pm_suspend(state); 96 status = pm_suspend(state);
97 if (status == -ENODEV) 97 if (status == -ENODEV)
98 state = PM_SUSPEND_STANDBY; 98 state = PM_SUSPEND_STANDBY;
99 } 99 }
100 if (state == PM_SUSPEND_STANDBY) { 100 if (state == PM_SUSPEND_STANDBY) {
101 printk(info_test, pm_states[state].label); 101 printk(info_test, pm_states[state]);
102 status = pm_suspend(state); 102 status = pm_suspend(state);
103 } 103 }
104 if (status < 0) 104 if (status < 0)
@@ -141,8 +141,8 @@ static int __init setup_test_suspend(char *value)
141 /* "=mem" ==> "mem" */ 141 /* "=mem" ==> "mem" */
142 value++; 142 value++;
143 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) 143 for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++)
144 if (!strcmp(pm_states[i].label, value)) { 144 if (!strcmp(pm_states[i], value)) {
145 test_state = pm_states[i].state; 145 test_state = i;
146 return 0; 146 return 0;
147 } 147 }
148 148
@@ -162,8 +162,8 @@ static int __init test_suspend(void)
162 /* PM is initialized by now; is that state testable? */ 162 /* PM is initialized by now; is that state testable? */
163 if (test_state == PM_SUSPEND_ON) 163 if (test_state == PM_SUSPEND_ON)
164 goto done; 164 goto done;
165 if (!pm_states[test_state].state) { 165 if (!pm_states[test_state]) {
166 printk(warn_bad_state, pm_states[test_state].label); 166 printk(warn_bad_state, pm_states[test_state]);
167 goto done; 167 goto done;
168 } 168 }
169 169