aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c65
1 files changed, 49 insertions, 16 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5b428e30866..4b1e82a5823 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -157,12 +157,6 @@ notrace void arch_local_irq_restore(unsigned long en)
157 if (get_hard_enabled()) 157 if (get_hard_enabled())
158 return; 158 return;
159 159
160#if defined(CONFIG_BOOKE) && defined(CONFIG_SMP)
161 /* Check for pending doorbell interrupts and resend to ourself */
162 if (cpu_has_feature(CPU_FTR_DBELL))
163 smp_muxed_ipi_resend();
164#endif
165
166 /* 160 /*
167 * Need to hard-enable interrupts here. Since currently disabled, 161 * Need to hard-enable interrupts here. Since currently disabled,
168 * no need to take further asm precautions against preemption; but 162 * no need to take further asm precautions against preemption; but
@@ -170,16 +164,13 @@ notrace void arch_local_irq_restore(unsigned long en)
170 */ 164 */
171 local_paca->hard_enabled = en; 165 local_paca->hard_enabled = en;
172 166
173#ifndef CONFIG_BOOKE 167 /*
174 /* On server, re-trigger the decrementer if it went negative since 168 * Trigger the decrementer if we have a pending event. Some processors
175 * some processors only trigger on edge transitions of the sign bit. 169 * only trigger on edge transitions of the sign bit. We might also
176 * 170 * have disabled interrupts long enough that the decrementer wrapped
177 * BookE has a level sensitive decrementer (latches in TSR) so we 171 * to positive.
178 * don't need that
179 */ 172 */
180 if ((int)mfspr(SPRN_DEC) < 0) 173 decrementer_check_overflow();
181 mtspr(SPRN_DEC, 1);
182#endif /* CONFIG_BOOKE */
183 174
184 /* 175 /*
185 * Force the delivery of pending soft-disabled interrupts on PS3. 176 * Force the delivery of pending soft-disabled interrupts on PS3.
@@ -457,11 +448,18 @@ static inline void do_softirq_onstack(void)
457 curtp = current_thread_info(); 448 curtp = current_thread_info();
458 irqtp = softirq_ctx[smp_processor_id()]; 449 irqtp = softirq_ctx[smp_processor_id()];
459 irqtp->task = curtp->task; 450 irqtp->task = curtp->task;
451 irqtp->flags = 0;
460 current->thread.ksp_limit = (unsigned long)irqtp + 452 current->thread.ksp_limit = (unsigned long)irqtp +
461 _ALIGN_UP(sizeof(struct thread_info), 16); 453 _ALIGN_UP(sizeof(struct thread_info), 16);
462 call_do_softirq(irqtp); 454 call_do_softirq(irqtp);
463 current->thread.ksp_limit = saved_sp_limit; 455 current->thread.ksp_limit = saved_sp_limit;
464 irqtp->task = NULL; 456 irqtp->task = NULL;
457
458 /* Set any flag that may have been set on the
459 * alternate stack
460 */
461 if (irqtp->flags)
462 set_bits(irqtp->flags, &curtp->flags);
465} 463}
466 464
467void do_softirq(void) 465void do_softirq(void)
@@ -750,7 +748,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
750 if (irq_setup_virq(host, virq, hwirq)) 748 if (irq_setup_virq(host, virq, hwirq))
751 return NO_IRQ; 749 return NO_IRQ;
752 750
753 printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n", 751 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
754 hwirq, host->of_node ? host->of_node->full_name : "null", virq); 752 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
755 753
756 return virq; 754 return virq;
@@ -882,6 +880,41 @@ unsigned int irq_find_mapping(struct irq_host *host,
882} 880}
883EXPORT_SYMBOL_GPL(irq_find_mapping); 881EXPORT_SYMBOL_GPL(irq_find_mapping);
884 882
883#ifdef CONFIG_SMP
884int irq_choose_cpu(const struct cpumask *mask)
885{
886 int cpuid;
887
888 if (cpumask_equal(mask, cpu_all_mask)) {
889 static int irq_rover;
890 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
891 unsigned long flags;
892
893 /* Round-robin distribution... */
894do_round_robin:
895 raw_spin_lock_irqsave(&irq_rover_lock, flags);
896
897 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
898 if (irq_rover >= nr_cpu_ids)
899 irq_rover = cpumask_first(cpu_online_mask);
900
901 cpuid = irq_rover;
902
903 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
904 } else {
905 cpuid = cpumask_first_and(mask, cpu_online_mask);
906 if (cpuid >= nr_cpu_ids)
907 goto do_round_robin;
908 }
909
910 return get_hard_smp_processor_id(cpuid);
911}
912#else
913int irq_choose_cpu(const struct cpumask *mask)
914{
915 return hard_smp_processor_id();
916}
917#endif
885 918
886unsigned int irq_radix_revmap_lookup(struct irq_host *host, 919unsigned int irq_radix_revmap_lookup(struct irq_host *host,
887 irq_hw_number_t hwirq) 920 irq_hw_number_t hwirq)