diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2012-09-20 16:05:37 -0400 |
---|---|---|
committer | Nicolas Pitre <nicolas.pitre@linaro.org> | 2013-04-24 10:36:59 -0400 |
commit | 7c2b860534d02d11923dd0504b961f21508173f1 (patch) | |
tree | 643030cebe48d267e67eb9b533284db881ef99a5 /arch/arm | |
parent | e8db288e05e588ad3f416b3a24354d60d02f35f2 (diff) |
ARM: mcpm: introduce the CPU/cluster power API
This is the basic API used to handle the powering up/down of individual
CPUs in a (multi-)cluster system. The platform specific backend
implementation has the responsibility to also handle the cluster level
power as well when the first/last CPU in a cluster is brought up/down.
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Reviewed-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/common/mcpm_entry.c | 91 | ||||
-rw-r--r-- | arch/arm/include/asm/mcpm.h | 92 |
2 files changed, 183 insertions, 0 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 7cbf70051ea7..5d72889a58a4 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c | |||
@@ -9,8 +9,13 @@ | |||
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/irqflags.h> | ||
15 | |||
12 | #include <asm/mcpm.h> | 16 | #include <asm/mcpm.h> |
13 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/idmap.h> | ||
14 | 19 | ||
15 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; | 20 | extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; |
16 | 21 | ||
@@ -20,3 +25,89 @@ void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) | |||
20 | mcpm_entry_vectors[cluster][cpu] = val; | 25 | mcpm_entry_vectors[cluster][cpu] = val; |
21 | sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); | 26 | sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); |
22 | } | 27 | } |
28 | |||
29 | static const struct mcpm_platform_ops *platform_ops; | ||
30 | |||
31 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops) | ||
32 | { | ||
33 | if (platform_ops) | ||
34 | return -EBUSY; | ||
35 | platform_ops = ops; | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) | ||
40 | { | ||
41 | if (!platform_ops) | ||
42 | return -EUNATCH; /* try not to shadow power_up errors */ | ||
43 | might_sleep(); | ||
44 | return platform_ops->power_up(cpu, cluster); | ||
45 | } | ||
46 | |||
47 | typedef void (*phys_reset_t)(unsigned long); | ||
48 | |||
49 | void mcpm_cpu_power_down(void) | ||
50 | { | ||
51 | phys_reset_t phys_reset; | ||
52 | |||
53 | BUG_ON(!platform_ops); | ||
54 | BUG_ON(!irqs_disabled()); | ||
55 | |||
56 | /* | ||
57 | * Do this before calling into the power_down method, | ||
58 | * as it might not always be safe to do afterwards. | ||
59 | */ | ||
60 | setup_mm_for_reboot(); | ||
61 | |||
62 | platform_ops->power_down(); | ||
63 | |||
64 | /* | ||
65 | * It is possible for a power_up request to happen concurrently | ||
66 | * with a power_down request for the same CPU. In this case the | ||
67 | * power_down method might not be able to actually enter a | ||
68 | * powered down state with the WFI instruction if the power_up | ||
69 | * method has removed the required reset condition. The | ||
70 | * power_down method is then allowed to return. We must perform | ||
71 | * a re-entry in the kernel as if the power_up method just had | ||
72 | * deasserted reset on the CPU. | ||
73 | * | ||
74 | * To simplify race issues, the platform specific implementation | ||
75 | * must accommodate for the possibility of unordered calls to | ||
76 | * power_down and power_up with a usage count. Therefore, if a | ||
77 | * call to power_up is issued for a CPU that is not down, then | ||
78 | * the next call to power_down must not attempt a full shutdown | ||
79 | * but only do the minimum (normally disabling L1 cache and CPU | ||
80 | * coherency) and return just as if a concurrent power_up request | ||
81 | * had happened as described above. | ||
82 | */ | ||
83 | |||
84 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | ||
85 | phys_reset(virt_to_phys(mcpm_entry_point)); | ||
86 | |||
87 | /* should never get here */ | ||
88 | BUG(); | ||
89 | } | ||
90 | |||
91 | void mcpm_cpu_suspend(u64 expected_residency) | ||
92 | { | ||
93 | phys_reset_t phys_reset; | ||
94 | |||
95 | BUG_ON(!platform_ops); | ||
96 | BUG_ON(!irqs_disabled()); | ||
97 | |||
98 | /* Very similar to mcpm_cpu_power_down() */ | ||
99 | setup_mm_for_reboot(); | ||
100 | platform_ops->suspend(expected_residency); | ||
101 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | ||
102 | phys_reset(virt_to_phys(mcpm_entry_point)); | ||
103 | BUG(); | ||
104 | } | ||
105 | |||
106 | int mcpm_cpu_powered_up(void) | ||
107 | { | ||
108 | if (!platform_ops) | ||
109 | return -EUNATCH; | ||
110 | if (platform_ops->powered_up) | ||
111 | platform_ops->powered_up(); | ||
112 | return 0; | ||
113 | } | ||
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index 470a417d1351..627761fce780 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h | |||
@@ -38,5 +38,97 @@ extern void mcpm_entry_point(void); | |||
38 | */ | 38 | */ |
39 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); | 39 | void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); |
40 | 40 | ||
41 | /* | ||
42 | * CPU/cluster power operations API for higher subsystems to use. | ||
43 | */ | ||
44 | |||
45 | /** | ||
46 | * mcpm_cpu_power_up - make given CPU in given cluster runable | ||
47 | * | ||
48 | * @cpu: CPU number within given cluster | ||
49 | * @cluster: cluster number for the CPU | ||
50 | * | ||
51 | * The identified CPU is brought out of reset. If the cluster was powered | ||
52 | * down then it is brought up as well, taking care not to let the other CPUs | ||
53 | * in the cluster run, and ensuring appropriate cluster setup. | ||
54 | * | ||
55 | * Caller must ensure the appropriate entry vector is initialized with | ||
56 | * mcpm_set_entry_vector() prior to calling this. | ||
57 | * | ||
58 | * This must be called in a sleepable context. However, the implementation | ||
59 | * is strongly encouraged to return early and let the operation happen | ||
60 | * asynchronously, especially when significant delays are expected. | ||
61 | * | ||
62 | * If the operation cannot be performed then an error code is returned. | ||
63 | */ | ||
64 | int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); | ||
65 | |||
66 | /** | ||
67 | * mcpm_cpu_power_down - power the calling CPU down | ||
68 | * | ||
69 | * The calling CPU is powered down. | ||
70 | * | ||
71 | * If this CPU is found to be the "last man standing" in the cluster | ||
72 | * then the cluster is prepared for power-down too. | ||
73 | * | ||
74 | * This must be called with interrupts disabled. | ||
75 | * | ||
76 | * This does not return. Re-entry in the kernel is expected via | ||
77 | * mcpm_entry_point. | ||
78 | */ | ||
79 | void mcpm_cpu_power_down(void); | ||
80 | |||
81 | /** | ||
82 | * mcpm_cpu_suspend - bring the calling CPU in a suspended state | ||
83 | * | ||
84 | * @expected_residency: duration in microseconds the CPU is expected | ||
85 | * to remain suspended, or 0 if unknown/infinity. | ||
86 | * | ||
87 | * The calling CPU is suspended. The expected residency argument is used | ||
88 | * as a hint by the platform specific backend to implement the appropriate | ||
89 | * sleep state level according to the knowledge it has on wake-up latency | ||
90 | * for the given hardware. | ||
91 | * | ||
92 | * If this CPU is found to be the "last man standing" in the cluster | ||
93 | * then the cluster may be prepared for power-down too, if the expected | ||
94 | * residency makes it worthwhile. | ||
95 | * | ||
96 | * This must be called with interrupts disabled. | ||
97 | * | ||
98 | * This does not return. Re-entry in the kernel is expected via | ||
99 | * mcpm_entry_point. | ||
100 | */ | ||
101 | void mcpm_cpu_suspend(u64 expected_residency); | ||
102 | |||
103 | /** | ||
104 | * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up | ||
105 | * | ||
106 | * This lets the platform specific backend code perform needed housekeeping | ||
107 | * work. This must be called by the newly activated CPU as soon as it is | ||
108 | * fully operational in kernel space, before it enables interrupts. | ||
109 | * | ||
110 | * If the operation cannot be performed then an error code is returned. | ||
111 | */ | ||
112 | int mcpm_cpu_powered_up(void); | ||
113 | |||
114 | /* | ||
115 | * Platform specific methods used in the implementation of the above API. | ||
116 | */ | ||
117 | struct mcpm_platform_ops { | ||
118 | int (*power_up)(unsigned int cpu, unsigned int cluster); | ||
119 | void (*power_down)(void); | ||
120 | void (*suspend)(u64); | ||
121 | void (*powered_up)(void); | ||
122 | }; | ||
123 | |||
124 | /** | ||
125 | * mcpm_platform_register - register platform specific power methods | ||
126 | * | ||
127 | * @ops: mcpm_platform_ops structure to register | ||
128 | * | ||
129 | * An error is returned if the registration has been done previously. | ||
130 | */ | ||
131 | int __init mcpm_platform_register(const struct mcpm_platform_ops *ops); | ||
132 | |||
41 | #endif /* ! __ASSEMBLY__ */ | 133 | #endif /* ! __ASSEMBLY__ */ |
42 | #endif | 134 | #endif |