diff options
author | Olof Johansson <olof@lixom.net> | 2012-07-27 16:56:57 -0400 |
---|---|---|
committer | Olof Johansson <olof@lixom.net> | 2012-07-27 16:56:57 -0400 |
commit | 5c62202968d4aaf3bd50e19c421d66aec2e508e0 (patch) | |
tree | 925d3102051db9664772911f29a59155825c671d | |
parent | 28a33cbc24e4256c143dce96c7d93bf423229f92 (diff) | |
parent | b93d70aeb8f3b5ed2d74643f5009239a55634e1d (diff) |
Merge branch 'for_3.6/pm/coupled-cpuidle' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into late2/pm
* 'for_3.6/pm/coupled-cpuidle' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm:
ARM: OMAP4: CPUidle: Open broadcast clock-event device.
ARM: OMAP4: CPUidle: add synchronization for coupled idle states
ARM: OMAP4: CPUidle: Use coupled cpuidle states to implement SMP cpuidle.
ARM: OMAP: timer: allow gp timer clock-event to be used on both cpus
-rw-r--r-- | arch/arm/mach-omap2/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/mach-omap2/cpuidle44xx.c | 141 | ||||
-rw-r--r-- | arch/arm/mach-omap2/timer.c | 4 |
3 files changed, 101 insertions, 45 deletions
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 4cf5142f22cc..cc83f5e13d5c 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -55,6 +55,7 @@ config ARCH_OMAP4 | |||
55 | select PM_OPP if PM | 55 | select PM_OPP if PM |
56 | select USB_ARCH_HAS_EHCI if USB_SUPPORT | 56 | select USB_ARCH_HAS_EHCI if USB_SUPPORT |
57 | select ARM_CPU_SUSPEND if PM | 57 | select ARM_CPU_SUSPEND if PM |
58 | select ARCH_NEEDS_CPU_IDLE_COUPLED | ||
58 | 59 | ||
59 | comment "OMAP Core Type" | 60 | comment "OMAP Core Type" |
60 | depends on ARCH_OMAP2 | 61 | depends on ARCH_OMAP2 |
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index be1617ca84bd..45e6a54d5818 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "common.h" | 21 | #include "common.h" |
22 | #include "pm.h" | 22 | #include "pm.h" |
23 | #include "prm.h" | 23 | #include "prm.h" |
24 | #include "clockdomain.h" | ||
24 | 25 | ||
25 | #ifdef CONFIG_CPU_IDLE | 26 | #ifdef CONFIG_CPU_IDLE |
26 | 27 | ||
@@ -49,10 +50,14 @@ static struct omap4_idle_statedata omap4_idle_data[] = { | |||
49 | }, | 50 | }, |
50 | }; | 51 | }; |
51 | 52 | ||
52 | static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; | 53 | static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; |
54 | static struct clockdomain *cpu_clkdm[NR_CPUS]; | ||
55 | |||
56 | static atomic_t abort_barrier; | ||
57 | static bool cpu_done[NR_CPUS]; | ||
53 | 58 | ||
54 | /** | 59 | /** |
55 | * omap4_enter_idle - Programs OMAP4 to enter the specified state | 60 | * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions |
56 | * @dev: cpuidle device | 61 | * @dev: cpuidle device |
57 | * @drv: cpuidle driver | 62 | * @drv: cpuidle driver |
58 | * @index: the index of state to be entered | 63 | * @index: the index of state to be entered |
@@ -61,60 +66,84 @@ static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; | |||
61 | * specified low power state selected by the governor. | 66 | * specified low power state selected by the governor. |
62 | * Returns the amount of time spent in the low power state. | 67 | * Returns the amount of time spent in the low power state. |
63 | */ | 68 | */ |
64 | static int omap4_enter_idle(struct cpuidle_device *dev, | 69 | static int omap4_enter_idle_simple(struct cpuidle_device *dev, |
70 | struct cpuidle_driver *drv, | ||
71 | int index) | ||
72 | { | ||
73 | local_fiq_disable(); | ||
74 | omap_do_wfi(); | ||
75 | local_fiq_enable(); | ||
76 | |||
77 | return index; | ||
78 | } | ||
79 | |||
80 | static int omap4_enter_idle_coupled(struct cpuidle_device *dev, | ||
65 | struct cpuidle_driver *drv, | 81 | struct cpuidle_driver *drv, |
66 | int index) | 82 | int index) |
67 | { | 83 | { |
68 | struct omap4_idle_statedata *cx = &omap4_idle_data[index]; | 84 | struct omap4_idle_statedata *cx = &omap4_idle_data[index]; |
69 | u32 cpu1_state; | ||
70 | int cpu_id = smp_processor_id(); | 85 | int cpu_id = smp_processor_id(); |
71 | 86 | ||
72 | local_fiq_disable(); | 87 | local_fiq_disable(); |
73 | 88 | ||
74 | /* | 89 | /* |
75 | * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. | 90 | * CPU0 has to wait and stay ON until CPU1 is OFF state. |
76 | * This is necessary to honour hardware recommondation | 91 | * This is necessary to honour hardware recommondation |
77 | * of triggeing all the possible low power modes once CPU1 is | 92 | * of triggeing all the possible low power modes once CPU1 is |
78 | * out of coherency and in OFF mode. | 93 | * out of coherency and in OFF mode. |
79 | * Update dev->last_state so that governor stats reflects right | ||
80 | * data. | ||
81 | */ | 94 | */ |
82 | cpu1_state = pwrdm_read_pwrst(cpu1_pd); | 95 | if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { |
83 | if (cpu1_state != PWRDM_POWER_OFF) { | 96 | while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) { |
84 | index = drv->safe_state_index; | 97 | cpu_relax(); |
85 | cx = &omap4_idle_data[index]; | 98 | |
99 | /* | ||
100 | * CPU1 could have already entered & exited idle | ||
101 | * without hitting off because of a wakeup | ||
102 | * or a failed attempt to hit off mode. Check for | ||
103 | * that here, otherwise we could spin forever | ||
104 | * waiting for CPU1 off. | ||
105 | */ | ||
106 | if (cpu_done[1]) | ||
107 | goto fail; | ||
108 | |||
109 | } | ||
86 | } | 110 | } |
87 | 111 | ||
88 | if (index > 0) | 112 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); |
89 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); | ||
90 | 113 | ||
91 | /* | 114 | /* |
92 | * Call idle CPU PM enter notifier chain so that | 115 | * Call idle CPU PM enter notifier chain so that |
93 | * VFP and per CPU interrupt context is saved. | 116 | * VFP and per CPU interrupt context is saved. |
94 | */ | 117 | */ |
95 | if (cx->cpu_state == PWRDM_POWER_OFF) | 118 | cpu_pm_enter(); |
96 | cpu_pm_enter(); | ||
97 | 119 | ||
98 | pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); | 120 | if (dev->cpu == 0) { |
99 | omap_set_pwrdm_state(mpu_pd, cx->mpu_state); | 121 | pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); |
122 | omap_set_pwrdm_state(mpu_pd, cx->mpu_state); | ||
100 | 123 | ||
101 | /* | 124 | /* |
102 | * Call idle CPU cluster PM enter notifier chain | 125 | * Call idle CPU cluster PM enter notifier chain |
103 | * to save GIC and wakeupgen context. | 126 | * to save GIC and wakeupgen context. |
104 | */ | 127 | */ |
105 | if ((cx->mpu_state == PWRDM_POWER_RET) && | 128 | if ((cx->mpu_state == PWRDM_POWER_RET) && |
106 | (cx->mpu_logic_state == PWRDM_POWER_OFF)) | 129 | (cx->mpu_logic_state == PWRDM_POWER_OFF)) |
107 | cpu_cluster_pm_enter(); | 130 | cpu_cluster_pm_enter(); |
131 | } | ||
108 | 132 | ||
109 | omap4_enter_lowpower(dev->cpu, cx->cpu_state); | 133 | omap4_enter_lowpower(dev->cpu, cx->cpu_state); |
134 | cpu_done[dev->cpu] = true; | ||
135 | |||
136 | /* Wakeup CPU1 only if it is not offlined */ | ||
137 | if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { | ||
138 | clkdm_wakeup(cpu_clkdm[1]); | ||
139 | clkdm_allow_idle(cpu_clkdm[1]); | ||
140 | } | ||
110 | 141 | ||
111 | /* | 142 | /* |
112 | * Call idle CPU PM exit notifier chain to restore | 143 | * Call idle CPU PM exit notifier chain to restore |
113 | * VFP and per CPU IRQ context. Only CPU0 state is | 144 | * VFP and per CPU IRQ context. |
114 | * considered since CPU1 is managed by CPU hotplug. | ||
115 | */ | 145 | */ |
116 | if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) | 146 | cpu_pm_exit(); |
117 | cpu_pm_exit(); | ||
118 | 147 | ||
119 | /* | 148 | /* |
120 | * Call idle CPU cluster PM exit notifier chain | 149 | * Call idle CPU cluster PM exit notifier chain |
@@ -123,8 +152,11 @@ static int omap4_enter_idle(struct cpuidle_device *dev, | |||
123 | if (omap4_mpuss_read_prev_context_state()) | 152 | if (omap4_mpuss_read_prev_context_state()) |
124 | cpu_cluster_pm_exit(); | 153 | cpu_cluster_pm_exit(); |
125 | 154 | ||
126 | if (index > 0) | 155 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); |
127 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); | 156 | |
157 | fail: | ||
158 | cpuidle_coupled_parallel_barrier(dev, &abort_barrier); | ||
159 | cpu_done[dev->cpu] = false; | ||
128 | 160 | ||
129 | local_fiq_enable(); | 161 | local_fiq_enable(); |
130 | 162 | ||
@@ -143,7 +175,7 @@ struct cpuidle_driver omap4_idle_driver = { | |||
143 | .exit_latency = 2 + 2, | 175 | .exit_latency = 2 + 2, |
144 | .target_residency = 5, | 176 | .target_residency = 5, |
145 | .flags = CPUIDLE_FLAG_TIME_VALID, | 177 | .flags = CPUIDLE_FLAG_TIME_VALID, |
146 | .enter = omap4_enter_idle, | 178 | .enter = omap4_enter_idle_simple, |
147 | .name = "C1", | 179 | .name = "C1", |
148 | .desc = "MPUSS ON" | 180 | .desc = "MPUSS ON" |
149 | }, | 181 | }, |
@@ -151,8 +183,8 @@ struct cpuidle_driver omap4_idle_driver = { | |||
151 | /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ | 183 | /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ |
152 | .exit_latency = 328 + 440, | 184 | .exit_latency = 328 + 440, |
153 | .target_residency = 960, | 185 | .target_residency = 960, |
154 | .flags = CPUIDLE_FLAG_TIME_VALID, | 186 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, |
155 | .enter = omap4_enter_idle, | 187 | .enter = omap4_enter_idle_coupled, |
156 | .name = "C2", | 188 | .name = "C2", |
157 | .desc = "MPUSS CSWR", | 189 | .desc = "MPUSS CSWR", |
158 | }, | 190 | }, |
@@ -160,8 +192,8 @@ struct cpuidle_driver omap4_idle_driver = { | |||
160 | /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ | 192 | /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ |
161 | .exit_latency = 460 + 518, | 193 | .exit_latency = 460 + 518, |
162 | .target_residency = 1100, | 194 | .target_residency = 1100, |
163 | .flags = CPUIDLE_FLAG_TIME_VALID, | 195 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, |
164 | .enter = omap4_enter_idle, | 196 | .enter = omap4_enter_idle_coupled, |
165 | .name = "C3", | 197 | .name = "C3", |
166 | .desc = "MPUSS OSWR", | 198 | .desc = "MPUSS OSWR", |
167 | }, | 199 | }, |
@@ -170,6 +202,16 @@ struct cpuidle_driver omap4_idle_driver = { | |||
170 | .safe_state_index = 0, | 202 | .safe_state_index = 0, |
171 | }; | 203 | }; |
172 | 204 | ||
205 | /* | ||
206 | * For each cpu, setup the broadcast timer because local timers | ||
207 | * stops for the states above C1. | ||
208 | */ | ||
209 | static void omap_setup_broadcast_timer(void *arg) | ||
210 | { | ||
211 | int cpu = smp_processor_id(); | ||
212 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); | ||
213 | } | ||
214 | |||
173 | /** | 215 | /** |
174 | * omap4_idle_init - Init routine for OMAP4 idle | 216 | * omap4_idle_init - Init routine for OMAP4 idle |
175 | * | 217 | * |
@@ -182,19 +224,30 @@ int __init omap4_idle_init(void) | |||
182 | unsigned int cpu_id = 0; | 224 | unsigned int cpu_id = 0; |
183 | 225 | ||
184 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); | 226 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); |
185 | cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); | 227 | cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); |
186 | cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); | 228 | cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); |
187 | if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) | 229 | if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1])) |
188 | return -ENODEV; | 230 | return -ENODEV; |
189 | 231 | ||
190 | dev = &per_cpu(omap4_idle_dev, cpu_id); | 232 | cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm"); |
191 | dev->cpu = cpu_id; | 233 | cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm"); |
234 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) | ||
235 | return -ENODEV; | ||
236 | |||
237 | /* Configure the broadcast timer on each cpu */ | ||
238 | on_each_cpu(omap_setup_broadcast_timer, NULL, 1); | ||
239 | |||
240 | for_each_cpu(cpu_id, cpu_online_mask) { | ||
241 | dev = &per_cpu(omap4_idle_dev, cpu_id); | ||
242 | dev->cpu = cpu_id; | ||
243 | dev->coupled_cpus = *cpu_online_mask; | ||
192 | 244 | ||
193 | cpuidle_register_driver(&omap4_idle_driver); | 245 | cpuidle_register_driver(&omap4_idle_driver); |
194 | 246 | ||
195 | if (cpuidle_register_device(dev)) { | 247 | if (cpuidle_register_device(dev)) { |
196 | pr_err("%s: CPUidle register device failed\n", __func__); | 248 | pr_err("%s: CPUidle register failed\n", __func__); |
197 | return -EIO; | 249 | return -EIO; |
250 | } | ||
198 | } | 251 | } |
199 | 252 | ||
200 | return 0; | 253 | return 0; |
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 840929bd9dae..9b7a07360610 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c | |||
@@ -135,6 +135,7 @@ static struct clock_event_device clockevent_gpt = { | |||
135 | .name = "gp_timer", | 135 | .name = "gp_timer", |
136 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 136 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
137 | .shift = 32, | 137 | .shift = 32, |
138 | .rating = 300, | ||
138 | .set_next_event = omap2_gp_timer_set_next_event, | 139 | .set_next_event = omap2_gp_timer_set_next_event, |
139 | .set_mode = omap2_gp_timer_set_mode, | 140 | .set_mode = omap2_gp_timer_set_mode, |
140 | }; | 141 | }; |
@@ -228,7 +229,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id, | |||
228 | clockevent_delta2ns(3, &clockevent_gpt); | 229 | clockevent_delta2ns(3, &clockevent_gpt); |
229 | /* Timer internal resynch latency. */ | 230 | /* Timer internal resynch latency. */ |
230 | 231 | ||
231 | clockevent_gpt.cpumask = cpumask_of(0); | 232 | clockevent_gpt.cpumask = cpu_possible_mask; |
233 | clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev); | ||
232 | clockevents_register_device(&clockevent_gpt); | 234 | clockevents_register_device(&clockevent_gpt); |
233 | 235 | ||
234 | pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n", | 236 | pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n", |