aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mach-omap2
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-08-02 14:48:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-08-02 14:48:54 -0400
commitbfdf85dfce1f203f8fcca034b8efe339542033fa (patch)
tree678106c6ce5e176f886bbda9a03a4953bab0d288 /arch/arm/mach-omap2
parentd1494ba8c38b5afd3c9e219bbaeb7d90ff6cd2ec (diff)
parent5c62202968d4aaf3bd50e19c421d66aec2e508e0 (diff)
Merge tag 'pm2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull arm-soc cpuidle enablement for OMAP from Olof Johansson: "Coupled cpuidle was meant to merge for 3.5 through Len Brown's tree, but didn't go in because the pull request ended up rejected. So it just got merged, and we got this staged branch that enables the coupled cpuidle code on OMAP. With a stable git workflow from the other maintainer we could have staged this earlier, but that wasn't the case so we have had to merge it late. The alternative is to hold it off until 3.7 but given that the code is well-isolated to OMAP and they are eager to see it go in, I didn't push back hard in that direction." * tag 'pm2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: ARM: OMAP4: CPUidle: Open broadcast clock-event device. ARM: OMAP4: CPUidle: add synchronization for coupled idle states ARM: OMAP4: CPUidle: Use coupled cpuidle states to implement SMP cpuidle. ARM: OMAP: timer: allow gp timer clock-event to be used on both cpus
Diffstat (limited to 'arch/arm/mach-omap2')
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c145
-rw-r--r--arch/arm/mach-omap2/timer.c4
3 files changed, 103 insertions, 47 deletions
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index dd0fbf76ac79..dd2db025f778 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,6 +62,7 @@ config ARCH_OMAP4
62 select PM_OPP if PM 62 select PM_OPP if PM
63 select USB_ARCH_HAS_EHCI if USB_SUPPORT 63 select USB_ARCH_HAS_EHCI if USB_SUPPORT
64 select ARM_CPU_SUSPEND if PM 64 select ARM_CPU_SUSPEND if PM
65 select ARCH_NEEDS_CPU_IDLE_COUPLED
65 66
66config SOC_OMAP5 67config SOC_OMAP5
67 bool "TI OMAP5" 68 bool "TI OMAP5"
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 02d15bbd4e35..ee05e193fc61 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -21,6 +21,7 @@
21#include "common.h" 21#include "common.h"
22#include "pm.h" 22#include "pm.h"
23#include "prm.h" 23#include "prm.h"
24#include "clockdomain.h"
24 25
25/* Machine specific information */ 26/* Machine specific information */
26struct omap4_idle_statedata { 27struct omap4_idle_statedata {
@@ -47,10 +48,14 @@ static struct omap4_idle_statedata omap4_idle_data[] = {
47 }, 48 },
48}; 49};
49 50
50static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; 51static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
52static struct clockdomain *cpu_clkdm[NR_CPUS];
53
54static atomic_t abort_barrier;
55static bool cpu_done[NR_CPUS];
51 56
52/** 57/**
53 * omap4_enter_idle - Programs OMAP4 to enter the specified state 58 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
54 * @dev: cpuidle device 59 * @dev: cpuidle device
55 * @drv: cpuidle driver 60 * @drv: cpuidle driver
56 * @index: the index of state to be entered 61 * @index: the index of state to be entered
@@ -59,60 +64,84 @@ static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
59 * specified low power state selected by the governor. 64 * specified low power state selected by the governor.
60 * Returns the amount of time spent in the low power state. 65 * Returns the amount of time spent in the low power state.
61 */ 66 */
62static int omap4_enter_idle(struct cpuidle_device *dev, 67static int omap4_enter_idle_simple(struct cpuidle_device *dev,
68 struct cpuidle_driver *drv,
69 int index)
70{
71 local_fiq_disable();
72 omap_do_wfi();
73 local_fiq_enable();
74
75 return index;
76}
77
78static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
63 struct cpuidle_driver *drv, 79 struct cpuidle_driver *drv,
64 int index) 80 int index)
65{ 81{
66 struct omap4_idle_statedata *cx = &omap4_idle_data[index]; 82 struct omap4_idle_statedata *cx = &omap4_idle_data[index];
67 u32 cpu1_state;
68 int cpu_id = smp_processor_id(); 83 int cpu_id = smp_processor_id();
69 84
70 local_fiq_disable(); 85 local_fiq_disable();
71 86
72 /* 87 /*
73 * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. 88 * CPU0 has to wait and stay ON until CPU1 is OFF state.
74 * This is necessary to honour hardware recommondation 89 * This is necessary to honour hardware recommondation
75 * of triggeing all the possible low power modes once CPU1 is 90 * of triggeing all the possible low power modes once CPU1 is
76 * out of coherency and in OFF mode. 91 * out of coherency and in OFF mode.
77 * Update dev->last_state so that governor stats reflects right
78 * data.
79 */ 92 */
80 cpu1_state = pwrdm_read_pwrst(cpu1_pd); 93 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
81 if (cpu1_state != PWRDM_POWER_OFF) { 94 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
82 index = drv->safe_state_index; 95 cpu_relax();
83 cx = &omap4_idle_data[index]; 96
97 /*
98 * CPU1 could have already entered & exited idle
99 * without hitting off because of a wakeup
100 * or a failed attempt to hit off mode. Check for
101 * that here, otherwise we could spin forever
102 * waiting for CPU1 off.
103 */
104 if (cpu_done[1])
105 goto fail;
106
107 }
84 } 108 }
85 109
86 if (index > 0) 110 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
87 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
88 111
89 /* 112 /*
90 * Call idle CPU PM enter notifier chain so that 113 * Call idle CPU PM enter notifier chain so that
91 * VFP and per CPU interrupt context is saved. 114 * VFP and per CPU interrupt context is saved.
92 */ 115 */
93 if (cx->cpu_state == PWRDM_POWER_OFF) 116 cpu_pm_enter();
94 cpu_pm_enter(); 117
95 118 if (dev->cpu == 0) {
96 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 119 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
97 omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 120 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
98 121
99 /* 122 /*
100 * Call idle CPU cluster PM enter notifier chain 123 * Call idle CPU cluster PM enter notifier chain
101 * to save GIC and wakeupgen context. 124 * to save GIC and wakeupgen context.
102 */ 125 */
103 if ((cx->mpu_state == PWRDM_POWER_RET) && 126 if ((cx->mpu_state == PWRDM_POWER_RET) &&
104 (cx->mpu_logic_state == PWRDM_POWER_OFF)) 127 (cx->mpu_logic_state == PWRDM_POWER_OFF))
105 cpu_cluster_pm_enter(); 128 cpu_cluster_pm_enter();
129 }
106 130
107 omap4_enter_lowpower(dev->cpu, cx->cpu_state); 131 omap4_enter_lowpower(dev->cpu, cx->cpu_state);
132 cpu_done[dev->cpu] = true;
133
134 /* Wakeup CPU1 only if it is not offlined */
135 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
136 clkdm_wakeup(cpu_clkdm[1]);
137 clkdm_allow_idle(cpu_clkdm[1]);
138 }
108 139
109 /* 140 /*
110 * Call idle CPU PM exit notifier chain to restore 141 * Call idle CPU PM exit notifier chain to restore
111 * VFP and per CPU IRQ context. Only CPU0 state is 142 * VFP and per CPU IRQ context.
112 * considered since CPU1 is managed by CPU hotplug.
113 */ 143 */
114 if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) 144 cpu_pm_exit();
115 cpu_pm_exit();
116 145
117 /* 146 /*
118 * Call idle CPU cluster PM exit notifier chain 147 * Call idle CPU cluster PM exit notifier chain
@@ -121,8 +150,11 @@ static int omap4_enter_idle(struct cpuidle_device *dev,
121 if (omap4_mpuss_read_prev_context_state()) 150 if (omap4_mpuss_read_prev_context_state())
122 cpu_cluster_pm_exit(); 151 cpu_cluster_pm_exit();
123 152
124 if (index > 0) 153 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
125 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); 154
155fail:
156 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
157 cpu_done[dev->cpu] = false;
126 158
127 local_fiq_enable(); 159 local_fiq_enable();
128 160
@@ -141,7 +173,7 @@ struct cpuidle_driver omap4_idle_driver = {
141 .exit_latency = 2 + 2, 173 .exit_latency = 2 + 2,
142 .target_residency = 5, 174 .target_residency = 5,
143 .flags = CPUIDLE_FLAG_TIME_VALID, 175 .flags = CPUIDLE_FLAG_TIME_VALID,
144 .enter = omap4_enter_idle, 176 .enter = omap4_enter_idle_simple,
145 .name = "C1", 177 .name = "C1",
146 .desc = "MPUSS ON" 178 .desc = "MPUSS ON"
147 }, 179 },
@@ -149,8 +181,8 @@ struct cpuidle_driver omap4_idle_driver = {
149 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 181 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
150 .exit_latency = 328 + 440, 182 .exit_latency = 328 + 440,
151 .target_residency = 960, 183 .target_residency = 960,
152 .flags = CPUIDLE_FLAG_TIME_VALID, 184 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
153 .enter = omap4_enter_idle, 185 .enter = omap4_enter_idle_coupled,
154 .name = "C2", 186 .name = "C2",
155 .desc = "MPUSS CSWR", 187 .desc = "MPUSS CSWR",
156 }, 188 },
@@ -158,8 +190,8 @@ struct cpuidle_driver omap4_idle_driver = {
158 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 190 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
159 .exit_latency = 460 + 518, 191 .exit_latency = 460 + 518,
160 .target_residency = 1100, 192 .target_residency = 1100,
161 .flags = CPUIDLE_FLAG_TIME_VALID, 193 .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
162 .enter = omap4_enter_idle, 194 .enter = omap4_enter_idle_coupled,
163 .name = "C3", 195 .name = "C3",
164 .desc = "MPUSS OSWR", 196 .desc = "MPUSS OSWR",
165 }, 197 },
@@ -168,6 +200,16 @@ struct cpuidle_driver omap4_idle_driver = {
168 .safe_state_index = 0, 200 .safe_state_index = 0,
169}; 201};
170 202
203/*
204 * For each cpu, setup the broadcast timer because local timers
205 * stops for the states above C1.
206 */
207static void omap_setup_broadcast_timer(void *arg)
208{
209 int cpu = smp_processor_id();
210 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
211}
212
171/** 213/**
172 * omap4_idle_init - Init routine for OMAP4 idle 214 * omap4_idle_init - Init routine for OMAP4 idle
173 * 215 *
@@ -180,19 +222,30 @@ int __init omap4_idle_init(void)
180 unsigned int cpu_id = 0; 222 unsigned int cpu_id = 0;
181 223
182 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 224 mpu_pd = pwrdm_lookup("mpu_pwrdm");
183 cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); 225 cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
184 cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); 226 cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
185 if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) 227 if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
186 return -ENODEV; 228 return -ENODEV;
187 229
188 dev = &per_cpu(omap4_idle_dev, cpu_id); 230 cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
189 dev->cpu = cpu_id; 231 cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
232 if (!cpu_clkdm[0] || !cpu_clkdm[1])
233 return -ENODEV;
234
235 /* Configure the broadcast timer on each cpu */
236 on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
237
238 for_each_cpu(cpu_id, cpu_online_mask) {
239 dev = &per_cpu(omap4_idle_dev, cpu_id);
240 dev->cpu = cpu_id;
241 dev->coupled_cpus = *cpu_online_mask;
190 242
191 cpuidle_register_driver(&omap4_idle_driver); 243 cpuidle_register_driver(&omap4_idle_driver);
192 244
193 if (cpuidle_register_device(dev)) { 245 if (cpuidle_register_device(dev)) {
194 pr_err("%s: CPUidle register device failed\n", __func__); 246 pr_err("%s: CPUidle register failed\n", __func__);
195 return -EIO; 247 return -EIO;
248 }
196 } 249 }
197 250
198 return 0; 251 return 0;
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 13d20c8a283d..2ff6d41ec6c6 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -130,6 +130,7 @@ static struct clock_event_device clockevent_gpt = {
130 .name = "gp_timer", 130 .name = "gp_timer",
131 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 131 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
132 .shift = 32, 132 .shift = 32,
133 .rating = 300,
133 .set_next_event = omap2_gp_timer_set_next_event, 134 .set_next_event = omap2_gp_timer_set_next_event,
134 .set_mode = omap2_gp_timer_set_mode, 135 .set_mode = omap2_gp_timer_set_mode,
135}; 136};
@@ -223,7 +224,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
223 clockevent_delta2ns(3, &clockevent_gpt); 224 clockevent_delta2ns(3, &clockevent_gpt);
224 /* Timer internal resynch latency. */ 225 /* Timer internal resynch latency. */
225 226
226 clockevent_gpt.cpumask = cpumask_of(0); 227 clockevent_gpt.cpumask = cpu_possible_mask;
228 clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev);
227 clockevents_register_device(&clockevent_gpt); 229 clockevents_register_device(&clockevent_gpt);
228 230
229 pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n", 231 pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n",