aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2013-06-13 23:51:18 -0400
committerNicolas Pitre <nicolas.pitre@linaro.org>2013-09-23 18:47:29 -0400
commit6137eba6c2b9bc2b7fd52e77741f50e43db4b5a6 (patch)
tree57fe4cd3d01b6c87c3f276a5044ec7cd59681530
parent14d2ca615a85e2dbc744c12c296affd35f119fa7 (diff)
ARM: bL_switcher: wait until inbound is alive before performing a switch
In some cases, a significant delay may be observed between the moment a request for a CPU to come up is made and the moment it is ready to start executing kernel code. This is especially true when a whole cluster has to be powered up which may take in the order of miliseconds. It is therefore a good idea to let the outbound CPU continue to execute code in the mean time, and be notified when the inbound is ready before performing the actual switch. This is achieved by registering a completion block with the appropriate IPI callback, and programming the sending of an IPI by the early assembly code prior to entering the main kernel code. Once the IPI is delivered to the outbound CPU, the completion block is "completed" and the switcher thread is resumed. Signed-off-by: Nicolas Pitre <nico@linaro.org>
-rw-r--r--arch/arm/common/bL_switcher.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c
index aab7c1274885..dc53eb8dcc81 100644
--- a/arch/arm/common/bL_switcher.c
+++ b/arch/arm/common/bL_switcher.c
@@ -144,10 +144,11 @@ static int bL_switch_to(unsigned int new_cluster_id)
144{ 144{
145 unsigned int mpidr, this_cpu, that_cpu; 145 unsigned int mpidr, this_cpu, that_cpu;
146 unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster; 146 unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
147 struct completion inbound_alive;
147 struct tick_device *tdev; 148 struct tick_device *tdev;
148 enum clock_event_mode tdev_mode; 149 enum clock_event_mode tdev_mode;
149 long volatile *handshake_ptr; 150 long volatile *handshake_ptr;
150 int ret; 151 int ipi_nr, ret;
151 152
152 this_cpu = smp_processor_id(); 153 this_cpu = smp_processor_id();
153 ob_mpidr = read_mpidr(); 154 ob_mpidr = read_mpidr();
@@ -166,10 +167,18 @@ static int bL_switch_to(unsigned int new_cluster_id)
166 pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n", 167 pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
167 this_cpu, ob_mpidr, ib_mpidr); 168 this_cpu, ob_mpidr, ib_mpidr);
168 169
170 this_cpu = smp_processor_id();
171
169 /* Close the gate for our entry vectors */ 172 /* Close the gate for our entry vectors */
170 mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL); 173 mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
171 mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL); 174 mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
172 175
176 /* Install our "inbound alive" notifier. */
177 init_completion(&inbound_alive);
178 ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
179 ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
180 mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
181
173 /* 182 /*
174 * Let's wake up the inbound CPU now in case it requires some delay 183 * Let's wake up the inbound CPU now in case it requires some delay
175 * to come online, but leave it gated in our entry vector code. 184 * to come online, but leave it gated in our entry vector code.
@@ -181,6 +190,19 @@ static int bL_switch_to(unsigned int new_cluster_id)
181 } 190 }
182 191
183 /* 192 /*
193 * Raise a SGI on the inbound CPU to make sure it doesn't stall
194 * in a possible WFI, such as in bL_power_down().
195 */
196 gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
197
198 /*
199 * Wait for the inbound to come up. This allows for other
200 * tasks to be scheduled in the mean time.
201 */
202 wait_for_completion(&inbound_alive);
203 mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
204
205 /*
184 * From this point we are entering the switch critical zone 206 * From this point we are entering the switch critical zone
185 * and can't take any interrupts anymore. 207 * and can't take any interrupts anymore.
186 */ 208 */
@@ -190,12 +212,6 @@ static int bL_switch_to(unsigned int new_cluster_id)
190 /* redirect GIC's SGIs to our counterpart */ 212 /* redirect GIC's SGIs to our counterpart */
191 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); 213 gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
192 214
193 /*
194 * Raise a SGI on the inbound CPU to make sure it doesn't stall
195 * in a possible WFI, such as in mcpm_power_down().
196 */
197 arch_send_wakeup_ipi_mask(cpumask_of(this_cpu));
198
199 tdev = tick_get_device(this_cpu); 215 tdev = tick_get_device(this_cpu);
200 if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu))) 216 if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
201 tdev = NULL; 217 tdev = NULL;