diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/include/asm/smp.h | 14 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 61 |
2 files changed, 67 insertions, 8 deletions
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 8bc6c70a4030..060f23ff1817 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h | |||
@@ -85,6 +85,20 @@ static inline void __cpu_die(unsigned int cpu) | |||
85 | extern void play_dead(void); | 85 | extern void play_dead(void); |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | /* | ||
89 | * This function will set up the necessary IPIs for Linux to communicate | ||
90 | * with the CPUs in mask. | ||
91 | * Return 0 on success. | ||
92 | */ | ||
93 | int mips_smp_ipi_allocate(const struct cpumask *mask); | ||
94 | |||
95 | /* | ||
96 | * This function will free up IPIs allocated with mips_smp_ipi_allocate to the | ||
97 | * CPUs in mask, which must be a subset of the IPIs that have been configured. | ||
98 | * Return 0 on success. | ||
99 | */ | ||
100 | int mips_smp_ipi_free(const struct cpumask *mask); | ||
101 | |||
88 | static inline void arch_send_call_function_single_ipi(int cpu) | 102 | static inline void arch_send_call_function_single_ipi(int cpu) |
89 | { | 103 | { |
90 | extern struct plat_smp_ops *mp_ops; /* private */ | 104 | extern struct plat_smp_ops *mp_ops; /* private */ |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index cb02df215365..0e131c9c39f6 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -231,7 +231,7 @@ static struct irqaction irq_call = { | |||
231 | .name = "IPI call" | 231 | .name = "IPI call" |
232 | }; | 232 | }; |
233 | 233 | ||
234 | static __init void smp_ipi_init_one(unsigned int virq, | 234 | static void smp_ipi_init_one(unsigned int virq, |
235 | struct irqaction *action) | 235 | struct irqaction *action) |
236 | { | 236 | { |
237 | int ret; | 237 | int ret; |
@@ -241,9 +241,11 @@ static __init void smp_ipi_init_one(unsigned int virq, | |||
241 | BUG_ON(ret); | 241 | BUG_ON(ret); |
242 | } | 242 | } |
243 | 243 | ||
244 | static int __init mips_smp_ipi_init(void) | 244 | static unsigned int call_virq, sched_virq; |
245 | |||
246 | int mips_smp_ipi_allocate(const struct cpumask *mask) | ||
245 | { | 247 | { |
246 | unsigned int call_virq, sched_virq; | 248 | int virq; |
247 | struct irq_domain *ipidomain; | 249 | struct irq_domain *ipidomain; |
248 | struct device_node *node; | 250 | struct device_node *node; |
249 | 251 | ||
@@ -270,16 +272,20 @@ static int __init mips_smp_ipi_init(void) | |||
270 | if (!ipidomain) | 272 | if (!ipidomain) |
271 | return 0; | 273 | return 0; |
272 | 274 | ||
273 | call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); | 275 | virq = irq_reserve_ipi(ipidomain, mask); |
274 | BUG_ON(!call_virq); | 276 | BUG_ON(!virq); |
277 | if (!call_virq) | ||
278 | call_virq = virq; | ||
275 | 279 | ||
276 | sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); | 280 | virq = irq_reserve_ipi(ipidomain, mask); |
277 | BUG_ON(!sched_virq); | 281 | BUG_ON(!virq); |
282 | if (!sched_virq) | ||
283 | sched_virq = virq; | ||
278 | 284 | ||
279 | if (irq_domain_is_ipi_per_cpu(ipidomain)) { | 285 | if (irq_domain_is_ipi_per_cpu(ipidomain)) { |
280 | int cpu; | 286 | int cpu; |
281 | 287 | ||
282 | for_each_cpu(cpu, cpu_possible_mask) { | 288 | for_each_cpu(cpu, mask) { |
283 | smp_ipi_init_one(call_virq + cpu, &irq_call); | 289 | smp_ipi_init_one(call_virq + cpu, &irq_call); |
284 | smp_ipi_init_one(sched_virq + cpu, &irq_resched); | 290 | smp_ipi_init_one(sched_virq + cpu, &irq_resched); |
285 | } | 291 | } |
@@ -288,6 +294,45 @@ static int __init mips_smp_ipi_init(void) | |||
288 | smp_ipi_init_one(sched_virq, &irq_resched); | 294 | smp_ipi_init_one(sched_virq, &irq_resched); |
289 | } | 295 | } |
290 | 296 | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | int mips_smp_ipi_free(const struct cpumask *mask) | ||
301 | { | ||
302 | struct irq_domain *ipidomain; | ||
303 | struct device_node *node; | ||
304 | |||
305 | node = of_irq_find_parent(of_root); | ||
306 | ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); | ||
307 | |||
308 | /* | ||
309 | * Some platforms have half DT setup. So if we found irq node but | ||
310 | * didn't find an ipidomain, try to search for one that is not in the | ||
311 | * DT. | ||
312 | */ | ||
313 | if (node && !ipidomain) | ||
314 | ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); | ||
315 | |||
316 | BUG_ON(!ipidomain); | ||
317 | |||
318 | if (irq_domain_is_ipi_per_cpu(ipidomain)) { | ||
319 | int cpu; | ||
320 | |||
321 | for_each_cpu(cpu, mask) { | ||
322 | remove_irq(call_virq + cpu, &irq_call); | ||
323 | remove_irq(sched_virq + cpu, &irq_resched); | ||
324 | } | ||
325 | } | ||
326 | irq_destroy_ipi(call_virq, mask); | ||
327 | irq_destroy_ipi(sched_virq, mask); | ||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | |||
332 | static int __init mips_smp_ipi_init(void) | ||
333 | { | ||
334 | mips_smp_ipi_allocate(cpu_possible_mask); | ||
335 | |||
291 | call_desc = irq_to_desc(call_virq); | 336 | call_desc = irq_to_desc(call_virq); |
292 | sched_desc = irq_to_desc(sched_virq); | 337 | sched_desc = irq_to_desc(sched_virq); |
293 | 338 | ||