diff options
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 80 |
1 files changed, 77 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5e37bf14ef2d..0bd8c7665834 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -64,8 +64,9 @@ | |||
64 | #include <asm/ptrace.h> | 64 | #include <asm/ptrace.h> |
65 | #include <asm/machdep.h> | 65 | #include <asm/machdep.h> |
66 | #include <asm/udbg.h> | 66 | #include <asm/udbg.h> |
67 | #ifdef CONFIG_PPC_ISERIES | 67 | #ifdef CONFIG_PPC64 |
68 | #include <asm/paca.h> | 68 | #include <asm/paca.h> |
69 | #include <asm/firmware.h> | ||
69 | #endif | 70 | #endif |
70 | 71 | ||
71 | int __irq_offset_value; | 72 | int __irq_offset_value; |
@@ -95,6 +96,74 @@ extern atomic_t ipi_sent; | |||
95 | EXPORT_SYMBOL(irq_desc); | 96 | EXPORT_SYMBOL(irq_desc); |
96 | 97 | ||
97 | int distribute_irqs = 1; | 98 | int distribute_irqs = 1; |
99 | |||
100 | static inline unsigned long get_hard_enabled(void) | ||
101 | { | ||
102 | unsigned long enabled; | ||
103 | |||
104 | __asm__ __volatile__("lbz %0,%1(13)" | ||
105 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | ||
106 | |||
107 | return enabled; | ||
108 | } | ||
109 | |||
110 | static inline void set_soft_enabled(unsigned long enable) | ||
111 | { | ||
112 | __asm__ __volatile__("stb %0,%1(13)" | ||
113 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | ||
114 | } | ||
115 | |||
116 | void local_irq_restore(unsigned long en) | ||
117 | { | ||
118 | /* | ||
119 | * get_paca()->soft_enabled = en; | ||
120 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | ||
121 | * That was allowed before, and in such a case we do need to take care | ||
122 | * that gcc will set soft_enabled directly via r13, not choose to use | ||
123 | * an intermediate register, lest we're preempted to a different cpu. | ||
124 | */ | ||
125 | set_soft_enabled(en); | ||
126 | if (!en) | ||
127 | return; | ||
128 | |||
129 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | ||
130 | /* | ||
131 | * Do we need to disable preemption here? Not really: in the | ||
132 | * unlikely event that we're preempted to a different cpu in | ||
133 | * between getting r13, loading its lppaca_ptr, and loading | ||
134 | * its any_int, we might call iseries_handle_interrupts without | ||
135 | * an interrupt pending on the new cpu, but that's no disaster, | ||
136 | * is it? And the business of preempting us off the old cpu | ||
137 | * would itself involve a local_irq_restore which handles the | ||
138 | * interrupt to that cpu. | ||
139 | * | ||
140 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | ||
141 | * to avoid any preemption checking added into get_paca(). | ||
142 | */ | ||
143 | if (local_paca->lppaca_ptr->int_dword.any_int) | ||
144 | iseries_handle_interrupts(); | ||
145 | return; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * if (get_paca()->hard_enabled) return; | ||
150 | * But again we need to take care that gcc gets hard_enabled directly | ||
151 | * via r13, not choose to use an intermediate register, lest we're | ||
152 | * preempted to a different cpu in between the two instructions. | ||
153 | */ | ||
154 | if (get_hard_enabled()) | ||
155 | return; | ||
156 | |||
157 | /* | ||
158 | * Need to hard-enable interrupts here. Since currently disabled, | ||
159 | * no need to take further asm precautions against preemption; but | ||
160 | * use local_paca instead of get_paca() to avoid preemption checking. | ||
161 | */ | ||
162 | local_paca->hard_enabled = en; | ||
163 | if ((int)mfspr(SPRN_DEC) < 0) | ||
164 | mtspr(SPRN_DEC, 1); | ||
165 | hard_irq_enable(); | ||
166 | } | ||
98 | #endif /* CONFIG_PPC64 */ | 167 | #endif /* CONFIG_PPC64 */ |
99 | 168 | ||
100 | int show_interrupts(struct seq_file *p, void *v) | 169 | int show_interrupts(struct seq_file *p, void *v) |
@@ -246,7 +315,8 @@ void do_IRQ(struct pt_regs *regs) | |||
246 | set_irq_regs(old_regs); | 315 | set_irq_regs(old_regs); |
247 | 316 | ||
248 | #ifdef CONFIG_PPC_ISERIES | 317 | #ifdef CONFIG_PPC_ISERIES |
249 | if (get_lppaca()->int_dword.fields.decr_int) { | 318 | if (firmware_has_feature(FW_FEATURE_ISERIES) && |
319 | get_lppaca()->int_dword.fields.decr_int) { | ||
250 | get_lppaca()->int_dword.fields.decr_int = 0; | 320 | get_lppaca()->int_dword.fields.decr_int = 0; |
251 | /* Signal a fake decrementer interrupt */ | 321 | /* Signal a fake decrementer interrupt */ |
252 | timer_interrupt(regs); | 322 | timer_interrupt(regs); |
@@ -626,10 +696,14 @@ EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |||
626 | 696 | ||
627 | void irq_dispose_mapping(unsigned int virq) | 697 | void irq_dispose_mapping(unsigned int virq) |
628 | { | 698 | { |
629 | struct irq_host *host = irq_map[virq].host; | 699 | struct irq_host *host; |
630 | irq_hw_number_t hwirq; | 700 | irq_hw_number_t hwirq; |
631 | unsigned long flags; | 701 | unsigned long flags; |
632 | 702 | ||
703 | if (virq == NO_IRQ) | ||
704 | return; | ||
705 | |||
706 | host = irq_map[virq].host; | ||
633 | WARN_ON (host == NULL); | 707 | WARN_ON (host == NULL); |
634 | if (host == NULL) | 708 | if (host == NULL) |
635 | return; | 709 | return; |