aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-09-26 04:36:36 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-09-26 04:36:36 -0400
commitc825dda905bac330c2da7fabdf5c0ac28758b3cd (patch)
tree597fe4f90c49f784a0dbf1596c027880747210ec
parentb0a37dca72a05b7b579f288d8a67afeed96bffa5 (diff)
parent8fb54284ba6aa1f0d832ec015fde64ecf4bb0f4f (diff)
Merge branch 'for_3_2/for-rmk/arm_cpu_pm' of git://gitorious.org/omap-sw-develoment/linux-omap-dev into devel-stable
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/common/gic.c188
-rw-r--r--arch/arm/include/asm/hardware/gic.h8
-rw-r--r--arch/arm/include/asm/mach/map.h1
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/mm/mmu.c8
-rw-r--r--arch/arm/vfp/vfpmodule.c31
-rw-r--r--include/linux/cpu_pm.h109
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cpu_pm.c233
-rw-r--r--kernel/power/Kconfig4
11 files changed, 578 insertions, 9 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5a3a78633177..9c4caa633d04 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -29,6 +29,7 @@ config ARM
29 select HAVE_GENERIC_HARDIRQS 29 select HAVE_GENERIC_HARDIRQS
30 select HAVE_SPARSE_IRQ 30 select HAVE_SPARSE_IRQ
31 select GENERIC_IRQ_SHOW 31 select GENERIC_IRQ_SHOW
32 select CPU_PM if (SUSPEND || CPU_IDLE)
32 help 33 help
33 The ARM series is a line of low-power-consumption RISC chip designs 34 The ARM series is a line of low-power-consumption RISC chip designs
34 licensed by ARM Ltd and targeted at embedded applications and 35 licensed by ARM Ltd and targeted at embedded applications and
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 3227ca952a12..734db99eaee7 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/cpu_pm.h>
29#include <linux/cpumask.h> 30#include <linux/cpumask.h>
30#include <linux/io.h> 31#include <linux/io.h>
31 32
@@ -276,6 +277,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
276 if (gic_irqs > 1020) 277 if (gic_irqs > 1020)
277 gic_irqs = 1020; 278 gic_irqs = 1020;
278 279
280 gic->gic_irqs = gic_irqs;
281
279 /* 282 /*
280 * Set all global interrupts to be level triggered, active low. 283 * Set all global interrupts to be level triggered, active low.
281 */ 284 */
@@ -343,6 +346,189 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
343 writel_relaxed(1, base + GIC_CPU_CTRL); 346 writel_relaxed(1, base + GIC_CPU_CTRL);
344} 347}
345 348
349#ifdef CONFIG_CPU_PM
350/*
351 * Saves the GIC distributor registers during suspend or idle. Must be called
352 * with interrupts disabled but before powering down the GIC. After calling
353 * this function, no interrupts will be delivered by the GIC, and another
354 * platform-specific wakeup source must be enabled.
355 */
356static void gic_dist_save(unsigned int gic_nr)
357{
358 unsigned int gic_irqs;
359 void __iomem *dist_base;
360 int i;
361
362 if (gic_nr >= MAX_GIC_NR)
363 BUG();
364
365 gic_irqs = gic_data[gic_nr].gic_irqs;
366 dist_base = gic_data[gic_nr].dist_base;
367
368 if (!dist_base)
369 return;
370
371 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
372 gic_data[gic_nr].saved_spi_conf[i] =
373 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
374
375 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
376 gic_data[gic_nr].saved_spi_target[i] =
377 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
378
379 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
380 gic_data[gic_nr].saved_spi_enable[i] =
381 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
382}
383
384/*
385 * Restores the GIC distributor registers during resume or when coming out of
386 * idle. Must be called before enabling interrupts. If a level interrupt
387 * that occured while the GIC was suspended is still present, it will be
388 * handled normally, but any edge interrupts that occured will not be seen by
389 * the GIC and need to be handled by the platform-specific wakeup source.
390 */
391static void gic_dist_restore(unsigned int gic_nr)
392{
393 unsigned int gic_irqs;
394 unsigned int i;
395 void __iomem *dist_base;
396
397 if (gic_nr >= MAX_GIC_NR)
398 BUG();
399
400 gic_irqs = gic_data[gic_nr].gic_irqs;
401 dist_base = gic_data[gic_nr].dist_base;
402
403 if (!dist_base)
404 return;
405
406 writel_relaxed(0, dist_base + GIC_DIST_CTRL);
407
408 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
409 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
410 dist_base + GIC_DIST_CONFIG + i * 4);
411
412 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
413 writel_relaxed(0xa0a0a0a0,
414 dist_base + GIC_DIST_PRI + i * 4);
415
416 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
417 writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
418 dist_base + GIC_DIST_TARGET + i * 4);
419
420 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
421 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
422 dist_base + GIC_DIST_ENABLE_SET + i * 4);
423
424 writel_relaxed(1, dist_base + GIC_DIST_CTRL);
425}
426
427static void gic_cpu_save(unsigned int gic_nr)
428{
429 int i;
430 u32 *ptr;
431 void __iomem *dist_base;
432 void __iomem *cpu_base;
433
434 if (gic_nr >= MAX_GIC_NR)
435 BUG();
436
437 dist_base = gic_data[gic_nr].dist_base;
438 cpu_base = gic_data[gic_nr].cpu_base;
439
440 if (!dist_base || !cpu_base)
441 return;
442
443 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
444 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
445 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
446
447 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
448 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
449 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
450
451}
452
453static void gic_cpu_restore(unsigned int gic_nr)
454{
455 int i;
456 u32 *ptr;
457 void __iomem *dist_base;
458 void __iomem *cpu_base;
459
460 if (gic_nr >= MAX_GIC_NR)
461 BUG();
462
463 dist_base = gic_data[gic_nr].dist_base;
464 cpu_base = gic_data[gic_nr].cpu_base;
465
466 if (!dist_base || !cpu_base)
467 return;
468
469 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
470 for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
471 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
472
473 ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
474 for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
475 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
476
477 for (i = 0; i < DIV_ROUND_UP(32, 4); i++)
478 writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4);
479
480 writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK);
481 writel_relaxed(1, cpu_base + GIC_CPU_CTRL);
482}
483
484static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
485{
486 int i;
487
488 for (i = 0; i < MAX_GIC_NR; i++) {
489 switch (cmd) {
490 case CPU_PM_ENTER:
491 gic_cpu_save(i);
492 break;
493 case CPU_PM_ENTER_FAILED:
494 case CPU_PM_EXIT:
495 gic_cpu_restore(i);
496 break;
497 case CPU_CLUSTER_PM_ENTER:
498 gic_dist_save(i);
499 break;
500 case CPU_CLUSTER_PM_ENTER_FAILED:
501 case CPU_CLUSTER_PM_EXIT:
502 gic_dist_restore(i);
503 break;
504 }
505 }
506
507 return NOTIFY_OK;
508}
509
510static struct notifier_block gic_notifier_block = {
511 .notifier_call = gic_notifier,
512};
513
514static void __init gic_pm_init(struct gic_chip_data *gic)
515{
516 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
517 sizeof(u32));
518 BUG_ON(!gic->saved_ppi_enable);
519
520 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
521 sizeof(u32));
522 BUG_ON(!gic->saved_ppi_conf);
523
524 cpu_pm_register_notifier(&gic_notifier_block);
525}
526#else
527static void __init gic_pm_init(struct gic_chip_data *gic)
528{
529}
530#endif
531
346void __init gic_init(unsigned int gic_nr, unsigned int irq_start, 532void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
347 void __iomem *dist_base, void __iomem *cpu_base) 533 void __iomem *dist_base, void __iomem *cpu_base)
348{ 534{
@@ -358,8 +544,10 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
358 if (gic_nr == 0) 544 if (gic_nr == 0)
359 gic_cpu_base_addr = cpu_base; 545 gic_cpu_base_addr = cpu_base;
360 546
547 gic_chip.flags |= gic_arch_extn.flags;
361 gic_dist_init(gic, irq_start); 548 gic_dist_init(gic, irq_start);
362 gic_cpu_init(gic); 549 gic_cpu_init(gic);
550 gic_pm_init(gic);
363} 551}
364 552
365void __cpuinit gic_secondary_init(unsigned int gic_nr) 553void __cpuinit gic_secondary_init(unsigned int gic_nr)
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 435d3f86c708..c5627057b1c7 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -46,6 +46,14 @@ struct gic_chip_data {
46 unsigned int irq_offset; 46 unsigned int irq_offset;
47 void __iomem *dist_base; 47 void __iomem *dist_base;
48 void __iomem *cpu_base; 48 void __iomem *cpu_base;
49#ifdef CONFIG_CPU_PM
50 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
51 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
52 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
53 u32 __percpu *saved_ppi_enable;
54 u32 __percpu *saved_ppi_conf;
55#endif
56 unsigned int gic_irqs;
49}; 57};
50#endif 58#endif
51 59
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index d2fedb5aeb1f..b36f3654bf54 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -29,6 +29,7 @@ struct map_desc {
29#define MT_MEMORY_NONCACHED 11 29#define MT_MEMORY_NONCACHED 11
30#define MT_MEMORY_DTCM 12 30#define MT_MEMORY_DTCM 12
31#define MT_MEMORY_ITCM 13 31#define MT_MEMORY_ITCM 13
32#define MT_MEMORY_SO 14
32 33
33#ifdef CONFIG_MMU 34#ifdef CONFIG_MMU
34extern void iotable_init(struct map_desc *, int); 35extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5750704e0271..f1956b27ae5a 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -232,6 +232,9 @@ extern pgprot_t pgprot_kernel;
232#define pgprot_writecombine(prot) \ 232#define pgprot_writecombine(prot) \
233 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE) 233 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
234 234
235#define pgprot_stronglyordered(prot) \
236 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
237
235#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 238#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
236#define pgprot_dmacoherent(prot) \ 239#define pgprot_dmacoherent(prot) \
237 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN) 240 __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677b92c8..ea9c9f3e48bf 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -273,6 +273,14 @@ static struct mem_type mem_types[] = {
273 .prot_l1 = PMD_TYPE_TABLE, 273 .prot_l1 = PMD_TYPE_TABLE,
274 .domain = DOMAIN_KERNEL, 274 .domain = DOMAIN_KERNEL,
275 }, 275 },
276 [MT_MEMORY_SO] = {
277 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
278 L_PTE_MT_UNCACHED,
279 .prot_l1 = PMD_TYPE_TABLE,
280 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
281 PMD_SECT_UNCACHED | PMD_SECT_XN,
282 .domain = DOMAIN_KERNEL,
283 },
276}; 284};
277 285
278const struct mem_type *get_mem_type(unsigned int type) 286const struct mem_type *get_mem_type(unsigned int type)
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 79bcb4316930..0cbd5a0a9332 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/notifier.h> 16#include <linux/notifier.h>
16#include <linux/signal.h> 17#include <linux/signal.h>
@@ -68,7 +69,7 @@ static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread)
68/* 69/*
69 * Force a reload of the VFP context from the thread structure. We do 70 * Force a reload of the VFP context from the thread structure. We do
70 * this by ensuring that access to the VFP hardware is disabled, and 71 * this by ensuring that access to the VFP hardware is disabled, and
71 * clear last_VFP_context. Must be called from non-preemptible context. 72 * clear vfp_current_hw_state. Must be called from non-preemptible context.
72 */ 73 */
73static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) 74static void vfp_force_reload(unsigned int cpu, struct thread_info *thread)
74{ 75{
@@ -436,9 +437,7 @@ static void vfp_enable(void *unused)
436 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11)); 437 set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
437} 438}
438 439
439#ifdef CONFIG_PM 440#ifdef CONFIG_CPU_PM
440#include <linux/syscore_ops.h>
441
442static int vfp_pm_suspend(void) 441static int vfp_pm_suspend(void)
443{ 442{
444 struct thread_info *ti = current_thread_info(); 443 struct thread_info *ti = current_thread_info();
@@ -468,19 +467,33 @@ static void vfp_pm_resume(void)
468 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); 467 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
469} 468}
470 469
471static struct syscore_ops vfp_pm_syscore_ops = { 470static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
472 .suspend = vfp_pm_suspend, 471 void *v)
473 .resume = vfp_pm_resume, 472{
473 switch (cmd) {
474 case CPU_PM_ENTER:
475 vfp_pm_suspend();
476 break;
477 case CPU_PM_ENTER_FAILED:
478 case CPU_PM_EXIT:
479 vfp_pm_resume();
480 break;
481 }
482 return NOTIFY_OK;
483}
484
485static struct notifier_block vfp_cpu_pm_notifier_block = {
486 .notifier_call = vfp_cpu_pm_notifier,
474}; 487};
475 488
476static void vfp_pm_init(void) 489static void vfp_pm_init(void)
477{ 490{
478 register_syscore_ops(&vfp_pm_syscore_ops); 491 cpu_pm_register_notifier(&vfp_cpu_pm_notifier_block);
479} 492}
480 493
481#else 494#else
482static inline void vfp_pm_init(void) { } 495static inline void vfp_pm_init(void) { }
483#endif /* CONFIG_PM */ 496#endif /* CONFIG_CPU_PM */
484 497
485/* 498/*
486 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date 499 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
diff --git a/include/linux/cpu_pm.h b/include/linux/cpu_pm.h
new file mode 100644
index 000000000000..455b233dd3b1
--- /dev/null
+++ b/include/linux/cpu_pm.h
@@ -0,0 +1,109 @@
1/*
2 * Copyright (C) 2011 Google, Inc.
3 *
4 * Author:
5 * Colin Cross <ccross@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#ifndef _LINUX_CPU_PM_H
19#define _LINUX_CPU_PM_H
20
21#include <linux/kernel.h>
22#include <linux/notifier.h>
23
24/*
25 * When a CPU goes to a low power state that turns off power to the CPU's
26 * power domain, the contents of some blocks (floating point coprocessors,
27 * interrupt controllers, caches, timers) in the same power domain can
28 * be lost. The cpm_pm notifiers provide a method for platform idle, suspend,
29 * and hotplug implementations to notify the drivers for these blocks that
30 * they may be reset.
31 *
32 * All cpu_pm notifications must be called with interrupts disabled.
33 *
34 * The notifications are split into two classes: CPU notifications and CPU
35 * cluster notifications.
36 *
37 * CPU notifications apply to a single CPU and must be called on the affected
38 * CPU. They are used to save per-cpu context for affected blocks.
39 *
40 * CPU cluster notifications apply to all CPUs in a single power domain. They
41 * are used to save any global context for affected blocks, and must be called
42 * after all the CPUs in the power domain have been notified of the low power
43 * state.
44 */
45
46/*
47 * Event codes passed as unsigned long val to notifier calls
48 */
49enum cpu_pm_event {
50 /* A single cpu is entering a low power state */
51 CPU_PM_ENTER,
52
53 /* A single cpu failed to enter a low power state */
54 CPU_PM_ENTER_FAILED,
55
56 /* A single cpu is exiting a low power state */
57 CPU_PM_EXIT,
58
59 /* A cpu power domain is entering a low power state */
60 CPU_CLUSTER_PM_ENTER,
61
62 /* A cpu power domain failed to enter a low power state */
63 CPU_CLUSTER_PM_ENTER_FAILED,
64
65 /* A cpu power domain is exiting a low power state */
66 CPU_CLUSTER_PM_EXIT,
67};
68
69#ifdef CONFIG_CPU_PM
70int cpu_pm_register_notifier(struct notifier_block *nb);
71int cpu_pm_unregister_notifier(struct notifier_block *nb);
72int cpu_pm_enter(void);
73int cpu_pm_exit(void);
74int cpu_cluster_pm_enter(void);
75int cpu_cluster_pm_exit(void);
76
77#else
78
79static inline int cpu_pm_register_notifier(struct notifier_block *nb)
80{
81 return 0;
82}
83
84static inline int cpu_pm_unregister_notifier(struct notifier_block *nb)
85{
86 return 0;
87}
88
89static inline int cpu_pm_enter(void)
90{
91 return 0;
92}
93
94static inline int cpu_pm_exit(void)
95{
96 return 0;
97}
98
99static inline int cpu_cluster_pm_enter(void)
100{
101 return 0;
102}
103
104static inline int cpu_cluster_pm_exit(void)
105{
106 return 0;
107}
108#endif
109#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index eca595e2fd52..988cb3da7031 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -101,6 +101,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
101obj-$(CONFIG_TRACEPOINTS) += trace/ 101obj-$(CONFIG_TRACEPOINTS) += trace/
102obj-$(CONFIG_SMP) += sched_cpupri.o 102obj-$(CONFIG_SMP) += sched_cpupri.o
103obj-$(CONFIG_IRQ_WORK) += irq_work.o 103obj-$(CONFIG_IRQ_WORK) += irq_work.o
104obj-$(CONFIG_CPU_PM) += cpu_pm.o
104 105
105obj-$(CONFIG_PERF_EVENTS) += events/ 106obj-$(CONFIG_PERF_EVENTS) += events/
106 107
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
new file mode 100644
index 000000000000..249152e15308
--- /dev/null
+++ b/kernel/cpu_pm.c
@@ -0,0 +1,233 @@
1/*
2 * Copyright (C) 2011 Google, Inc.
3 *
4 * Author:
5 * Colin Cross <ccross@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/cpu_pm.h>
20#include <linux/module.h>
21#include <linux/notifier.h>
22#include <linux/spinlock.h>
23#include <linux/syscore_ops.h>
24
25static DEFINE_RWLOCK(cpu_pm_notifier_lock);
26static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
27
28static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
29{
30 int ret;
31
32 ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
33 nr_to_call, nr_calls);
34
35 return notifier_to_errno(ret);
36}
37
38/**
39 * cpu_pm_register_notifier - register a driver with cpu_pm
40 * @nb: notifier block to register
41 *
42 * Add a driver to a list of drivers that are notified about
43 * CPU and CPU cluster low power entry and exit.
44 *
45 * This function may sleep, and has the same return conditions as
46 * raw_notifier_chain_register.
47 */
48int cpu_pm_register_notifier(struct notifier_block *nb)
49{
50 unsigned long flags;
51 int ret;
52
53 write_lock_irqsave(&cpu_pm_notifier_lock, flags);
54 ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
55 write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
56
57 return ret;
58}
59EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
60
61/**
62 * cpu_pm_unregister_notifier - unregister a driver with cpu_pm
63 * @nb: notifier block to be unregistered
64 *
65 * Remove a driver from the CPU PM notifier list.
66 *
67 * This function may sleep, and has the same return conditions as
68 * raw_notifier_chain_unregister.
69 */
70int cpu_pm_unregister_notifier(struct notifier_block *nb)
71{
72 unsigned long flags;
73 int ret;
74
75 write_lock_irqsave(&cpu_pm_notifier_lock, flags);
76 ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
77 write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
78
79 return ret;
80}
81EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
82
83/**
84 * cpm_pm_enter - CPU low power entry notifier
85 *
86 * Notifies listeners that a single CPU is entering a low power state that may
87 * cause some blocks in the same power domain as the cpu to reset.
88 *
89 * Must be called on the affected CPU with interrupts disabled. Platform is
90 * responsible for ensuring that cpu_pm_enter is not called twice on the same
91 * CPU before cpu_pm_exit is called. Notified drivers can include VFP
92 * co-processor, interrupt controller and it's PM extensions, local CPU
93 * timers context save/restore which shouldn't be interrupted. Hence it
94 * must be called with interrupts disabled.
95 *
96 * Return conditions are same as __raw_notifier_call_chain.
97 */
98int cpu_pm_enter(void)
99{
100 int nr_calls;
101 int ret = 0;
102
103 read_lock(&cpu_pm_notifier_lock);
104 ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
105 if (ret)
106 /*
107 * Inform listeners (nr_calls - 1) about failure of CPU PM
108 * PM entry who are notified earlier to prepare for it.
109 */
110 cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
111 read_unlock(&cpu_pm_notifier_lock);
112
113 return ret;
114}
115EXPORT_SYMBOL_GPL(cpu_pm_enter);
116
117/**
118 * cpm_pm_exit - CPU low power exit notifier
119 *
120 * Notifies listeners that a single CPU is exiting a low power state that may
121 * have caused some blocks in the same power domain as the cpu to reset.
122 *
123 * Notified drivers can include VFP co-processor, interrupt controller
124 * and it's PM extensions, local CPU timers context save/restore which
125 * shouldn't be interrupted. Hence it must be called with interrupts disabled.
126 *
127 * Return conditions are same as __raw_notifier_call_chain.
128 */
129int cpu_pm_exit(void)
130{
131 int ret;
132
133 read_lock(&cpu_pm_notifier_lock);
134 ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
135 read_unlock(&cpu_pm_notifier_lock);
136
137 return ret;
138}
139EXPORT_SYMBOL_GPL(cpu_pm_exit);
140
141/**
142 * cpm_cluster_pm_enter - CPU cluster low power entry notifier
143 *
144 * Notifies listeners that all cpus in a power domain are entering a low power
145 * state that may cause some blocks in the same power domain to reset.
146 *
147 * Must be called after cpu_pm_enter has been called on all cpus in the power
148 * domain, and before cpu_pm_exit has been called on any cpu in the power
149 * domain. Notified drivers can include VFP co-processor, interrupt controller
150 * and it's PM extensions, local CPU timers context save/restore which
151 * shouldn't be interrupted. Hence it must be called with interrupts disabled.
152 *
153 * Must be called with interrupts disabled.
154 *
155 * Return conditions are same as __raw_notifier_call_chain.
156 */
157int cpu_cluster_pm_enter(void)
158{
159 int nr_calls;
160 int ret = 0;
161
162 read_lock(&cpu_pm_notifier_lock);
163 ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
164 if (ret)
165 /*
166 * Inform listeners (nr_calls - 1) about failure of CPU cluster
167 * PM entry who are notified earlier to prepare for it.
168 */
169 cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
170 read_unlock(&cpu_pm_notifier_lock);
171
172 return ret;
173}
174EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
175
176/**
177 * cpm_cluster_pm_exit - CPU cluster low power exit notifier
178 *
179 * Notifies listeners that all cpus in a power domain are exiting form a
180 * low power state that may have caused some blocks in the same power domain
181 * to reset.
182 *
183 * Must be called after cpu_pm_exit has been called on all cpus in the power
184 * domain, and before cpu_pm_exit has been called on any cpu in the power
185 * domain. Notified drivers can include VFP co-processor, interrupt controller
186 * and it's PM extensions, local CPU timers context save/restore which
187 * shouldn't be interrupted. Hence it must be called with interrupts disabled.
188 *
189 * Return conditions are same as __raw_notifier_call_chain.
190 */
191int cpu_cluster_pm_exit(void)
192{
193 int ret;
194
195 read_lock(&cpu_pm_notifier_lock);
196 ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
197 read_unlock(&cpu_pm_notifier_lock);
198
199 return ret;
200}
201EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
202
203#ifdef CONFIG_PM
204static int cpu_pm_suspend(void)
205{
206 int ret;
207
208 ret = cpu_pm_enter();
209 if (ret)
210 return ret;
211
212 ret = cpu_cluster_pm_enter();
213 return ret;
214}
215
216static void cpu_pm_resume(void)
217{
218 cpu_cluster_pm_exit();
219 cpu_pm_exit();
220}
221
222static struct syscore_ops cpu_pm_syscore_ops = {
223 .suspend = cpu_pm_suspend,
224 .resume = cpu_pm_resume,
225};
226
227static int cpu_pm_init(void)
228{
229 register_syscore_ops(&cpu_pm_syscore_ops);
230 return 0;
231}
232core_initcall(cpu_pm_init);
233#endif
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 3744c594b19b..80a85971cf64 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -235,3 +235,7 @@ config PM_GENERIC_DOMAINS
235config PM_GENERIC_DOMAINS_RUNTIME 235config PM_GENERIC_DOMAINS_RUNTIME
236 def_bool y 236 def_bool y
237 depends on PM_RUNTIME && PM_GENERIC_DOMAINS 237 depends on PM_RUNTIME && PM_GENERIC_DOMAINS
238
239config CPU_PM
240 bool
241 depends on SUSPEND || CPU_IDLE