aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel/smp.c')
-rw-r--r--arch/tile/kernel/smp.c72
1 files changed, 63 insertions, 9 deletions
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 782c1bfa6dfe..1cb5ec79de04 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -15,10 +15,18 @@
15 */ 15 */
16 16
17#include <linux/smp.h> 17#include <linux/smp.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
18#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/module.h>
19#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
20 23
21HV_Topology smp_topology __write_once; 24HV_Topology smp_topology __write_once;
25EXPORT_SYMBOL(smp_topology);
26
27#if CHIP_HAS_IPI()
28static unsigned long __iomem *ipi_mappings[NR_CPUS];
29#endif
22 30
23 31
24/* 32/*
@@ -100,7 +108,6 @@ void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
100/* Handler to start the current cpu. */ 108/* Handler to start the current cpu. */
101static void smp_start_cpu_interrupt(void) 109static void smp_start_cpu_interrupt(void)
102{ 110{
103 extern unsigned long start_cpu_function_addr;
104 get_irq_regs()->pc = start_cpu_function_addr; 111 get_irq_regs()->pc = start_cpu_function_addr;
105} 112}
106 113
@@ -174,12 +181,8 @@ void flush_icache_range(unsigned long start, unsigned long end)
174} 181}
175 182
176 183
177/* 184/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
178 * The smp_send_reschedule() path does not use the hv_message_intr() 185static irqreturn_t handle_reschedule_ipi(int irq, void *token)
179 * path but instead the faster tile_dev_intr() path for interrupts.
180 */
181
182irqreturn_t handle_reschedule_ipi(int irq, void *token)
183{ 186{
184 /* 187 /*
185 * Nothing to do here; when we return from interrupt, the 188 * Nothing to do here; when we return from interrupt, the
@@ -191,12 +194,63 @@ irqreturn_t handle_reschedule_ipi(int irq, void *token)
191 return IRQ_HANDLED; 194 return IRQ_HANDLED;
192} 195}
193 196
197static struct irqaction resched_action = {
198 .handler = handle_reschedule_ipi,
199 .name = "resched",
200 .dev_id = handle_reschedule_ipi /* unique token */,
201};
202
203void __init ipi_init(void)
204{
205#if CHIP_HAS_IPI()
206 int cpu;
207 /* Map IPI trigger MMIO addresses. */
208 for_each_possible_cpu(cpu) {
209 HV_Coord tile;
210 HV_PTE pte;
211 unsigned long offset;
212
213 tile.x = cpu_x(cpu);
214 tile.y = cpu_y(cpu);
215 if (hv_get_ipi_pte(tile, 1, &pte) != 0)
216 panic("Failed to initialize IPI for cpu %d\n", cpu);
217
218 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT;
219 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
220 }
221#endif
222
223 /* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
224 tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
225 BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
226}
227
228#if CHIP_HAS_IPI()
229
230void smp_send_reschedule(int cpu)
231{
232 WARN_ON(cpu_is_offline(cpu));
233
234 /*
235 * We just want to do an MMIO store. The traditional writeq()
236 * functions aren't really correct here, since they're always
237 * directed at the PCI shim. For now, just do a raw store,
238 * casting away the __iomem attribute.
239 */
240 ((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
241}
242
243#else
244
194void smp_send_reschedule(int cpu) 245void smp_send_reschedule(int cpu)
195{ 246{
196 HV_Coord coord; 247 HV_Coord coord;
197 248
198 WARN_ON(cpu_is_offline(cpu)); 249 WARN_ON(cpu_is_offline(cpu));
199 coord.y = cpu / smp_width; 250
200 coord.x = cpu % smp_width; 251 coord.y = cpu_y(cpu);
252 coord.x = cpu_x(cpu);
201 hv_trigger_ipi(coord, IRQ_RESCHEDULE); 253 hv_trigger_ipi(coord, IRQ_RESCHEDULE);
202} 254}
255
256#endif /* CHIP_HAS_IPI() */