aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/platforms/85xx/smp.c49
1 files changed, 36 insertions, 13 deletions
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6fcfa12e5c56..148c2f2d9780 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -128,6 +128,19 @@ static void __cpuinit smp_85xx_mach_cpu_die(void)
128} 128}
129#endif 129#endif
130 130
131static inline void flush_spin_table(void *spin_table)
132{
133 flush_dcache_range((ulong)spin_table,
134 (ulong)spin_table + sizeof(struct epapr_spin_table));
135}
136
137static inline u32 read_spin_table_addr_l(void *spin_table)
138{
139 flush_dcache_range((ulong)spin_table,
140 (ulong)spin_table + sizeof(struct epapr_spin_table));
141 return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
142}
143
131static int __cpuinit smp_85xx_kick_cpu(int nr) 144static int __cpuinit smp_85xx_kick_cpu(int nr)
132{ 145{
133 unsigned long flags; 146 unsigned long flags;
@@ -161,8 +174,8 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
161 174
162 /* Map the spin table */ 175 /* Map the spin table */
163 if (ioremappable) 176 if (ioremappable)
164 spin_table = ioremap(*cpu_rel_addr, 177 spin_table = ioremap_prot(*cpu_rel_addr,
165 sizeof(struct epapr_spin_table)); 178 sizeof(struct epapr_spin_table), _PAGE_COHERENT);
166 else 179 else
167 spin_table = phys_to_virt(*cpu_rel_addr); 180 spin_table = phys_to_virt(*cpu_rel_addr);
168 181
@@ -173,7 +186,16 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
173 generic_set_cpu_up(nr); 186 generic_set_cpu_up(nr);
174 187
175 if (system_state == SYSTEM_RUNNING) { 188 if (system_state == SYSTEM_RUNNING) {
189 /*
190 * To keep it compatible with old boot program which uses
191 * cache-inhibit spin table, we need to flush the cache
192 * before accessing spin table to invalidate any staled data.
193 * We also need to flush the cache after writing to spin
194 * table to push data out.
195 */
196 flush_spin_table(spin_table);
176 out_be32(&spin_table->addr_l, 0); 197 out_be32(&spin_table->addr_l, 0);
198 flush_spin_table(spin_table);
177 199
178 /* 200 /*
179 * We don't set the BPTR register here since it already points 201 * We don't set the BPTR register here since it already points
@@ -181,9 +203,14 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
181 */ 203 */
182 mpic_reset_core(hw_cpu); 204 mpic_reset_core(hw_cpu);
183 205
184 /* wait until core is ready... */ 206 /*
185 if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1, 207 * wait until core is ready...
186 10000, 100)) { 208 * We need to invalidate the stale data, in case the boot
209 * loader uses a cache-inhibited spin table.
210 */
211 if (!spin_event_timeout(
212 read_spin_table_addr_l(spin_table) == 1,
213 10000, 100)) {
187 pr_err("%s: timeout waiting for core %d to reset\n", 214 pr_err("%s: timeout waiting for core %d to reset\n",
188 __func__, hw_cpu); 215 __func__, hw_cpu);
189 ret = -ENOENT; 216 ret = -ENOENT;
@@ -194,12 +221,10 @@ static int __cpuinit smp_85xx_kick_cpu(int nr)
194 __secondary_hold_acknowledge = -1; 221 __secondary_hold_acknowledge = -1;
195 } 222 }
196#endif 223#endif
224 flush_spin_table(spin_table);
197 out_be32(&spin_table->pir, hw_cpu); 225 out_be32(&spin_table->pir, hw_cpu);
198 out_be32(&spin_table->addr_l, __pa(__early_start)); 226 out_be32(&spin_table->addr_l, __pa(__early_start));
199 227 flush_spin_table(spin_table);
200 if (!ioremappable)
201 flush_dcache_range((ulong)spin_table,
202 (ulong)spin_table + sizeof(struct epapr_spin_table));
203 228
204 /* Wait a bit for the CPU to ack. */ 229 /* Wait a bit for the CPU to ack. */
205 if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu, 230 if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
@@ -213,13 +238,11 @@ out:
213#else 238#else
214 smp_generic_kick_cpu(nr); 239 smp_generic_kick_cpu(nr);
215 240
241 flush_spin_table(spin_table);
216 out_be32(&spin_table->pir, hw_cpu); 242 out_be32(&spin_table->pir, hw_cpu);
217 out_be64((u64 *)(&spin_table->addr_h), 243 out_be64((u64 *)(&spin_table->addr_h),
218 __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); 244 __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
219 245 flush_spin_table(spin_table);
220 if (!ioremappable)
221 flush_dcache_range((ulong)spin_table,
222 (ulong)spin_table + sizeof(struct epapr_spin_table));
223#endif 246#endif
224 247
225 local_irq_restore(flags); 248 local_irq_restore(flags);