diff options
| author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-09 00:19:53 -0500 |
|---|---|---|
| committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-11-09 00:19:53 -0500 |
| commit | d4be4f37d9d2a5afc8e79a95beafbac4b83f20c5 (patch) | |
| tree | 848bb06787a199c777c9e54ebd33c5f80d398c1b | |
| parent | 5a7b3ff4670be3330842558dc5ae46ec3fc448e5 (diff) | |
ppc64: remove ppc_irq_dispatch_handler
Use __do_IRQ instead. The only difference is that every controller
is now assumed to have an end() routine (only xics_8259 did not).
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
| -rw-r--r-- | arch/powerpc/kernel/misc_64.S | 4 | ||||
| -rw-r--r-- | arch/powerpc/platforms/iseries/irq.c | 10 | ||||
| -rw-r--r-- | arch/powerpc/platforms/pseries/xics.c | 1 | ||||
| -rw-r--r-- | arch/ppc64/kernel/irq.c | 108 | ||||
| -rw-r--r-- | arch/ppc64/kernel/misc.S | 4 | ||||
| -rw-r--r-- | include/asm-powerpc/hw_irq.h | 1 | ||||
| -rw-r--r-- | include/asm-powerpc/irq.h | 2 |
7 files changed, 12 insertions, 118 deletions
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 9d09f0ad6efe..ae48a002f81a 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
| @@ -89,12 +89,12 @@ _GLOBAL(call_do_softirq) | |||
| 89 | mtlr r0 | 89 | mtlr r0 |
| 90 | blr | 90 | blr |
| 91 | 91 | ||
| 92 | _GLOBAL(call_ppc_irq_dispatch_handler) | 92 | _GLOBAL(call___do_IRQ) |
| 93 | mflr r0 | 93 | mflr r0 |
| 94 | std r0,16(r1) | 94 | std r0,16(r1) |
| 95 | stdu r1,THREAD_SIZE-112(r5) | 95 | stdu r1,THREAD_SIZE-112(r5) |
| 96 | mr r1,r5 | 96 | mr r1,r5 |
| 97 | bl .ppc_irq_dispatch_handler | 97 | bl .__do_IRQ |
| 98 | ld r1,0(r1) | 98 | ld r1,0(r1) |
| 99 | ld r0,16(r1) | 99 | ld r0,16(r1) |
| 100 | mtlr r0 | 100 | mtlr r0 |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index a0ff7d95fdf3..01090e9ce0cf 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
| @@ -120,13 +120,13 @@ static void intReceived(struct XmPciLpEvent *eventParm, | |||
| 120 | if (curtp != irqtp) { | 120 | if (curtp != irqtp) { |
| 121 | irqtp->task = curtp->task; | 121 | irqtp->task = curtp->task; |
| 122 | irqtp->flags = 0; | 122 | irqtp->flags = 0; |
| 123 | call_ppc_irq_dispatch_handler(regsParm, irq, irqtp); | 123 | call___do_IRQ(irq, regsParm, irqtp); |
| 124 | irqtp->task = NULL; | 124 | irqtp->task = NULL; |
| 125 | if (irqtp->flags) | 125 | if (irqtp->flags) |
| 126 | set_bits(irqtp->flags, &curtp->flags); | 126 | set_bits(irqtp->flags, &curtp->flags); |
| 127 | } else | 127 | } else |
| 128 | #endif | 128 | #endif |
| 129 | ppc_irq_dispatch_handler(regsParm, irq); | 129 | __do_IRQ(irq, regsParm); |
| 130 | HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, | 130 | HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, |
| 131 | eventParm->eventData.slotInterrupt.subBusNumber, | 131 | eventParm->eventData.slotInterrupt.subBusNumber, |
| 132 | eventParm->eventData.slotInterrupt.deviceId); | 132 | eventParm->eventData.slotInterrupt.deviceId); |
| @@ -326,10 +326,8 @@ static void iSeries_disable_IRQ(unsigned int irq) | |||
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | /* | 328 | /* |
| 329 | * Need to define this so ppc_irq_dispatch_handler will NOT call | 329 | * This does nothing because there is not enough information |
| 330 | * enable_IRQ at the end of interrupt handling. However, this does | 330 | * provided to do the EOI HvCall. This is done by XmPciLpEvent.c |
| 331 | * nothing because there is not enough information provided to do | ||
| 332 | * the EOI HvCall. This is done by XmPciLpEvent.c | ||
| 333 | */ | 331 | */ |
| 334 | static void iSeries_end_IRQ(unsigned int irq) | 332 | static void iSeries_end_IRQ(unsigned int irq) |
| 335 | { | 333 | { |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index c72c86f05cb6..405c4f3229b3 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
| @@ -567,6 +567,7 @@ nextnode: | |||
| 567 | 567 | ||
| 568 | xics_8259_pic.enable = i8259_pic.enable; | 568 | xics_8259_pic.enable = i8259_pic.enable; |
| 569 | xics_8259_pic.disable = i8259_pic.disable; | 569 | xics_8259_pic.disable = i8259_pic.disable; |
| 570 | xics_8259_pic.end = i8259_pic.end; | ||
| 570 | for (i = 0; i < 16; ++i) | 571 | for (i = 0; i < 16; ++i) |
| 571 | get_irq_desc(i)->handler = &xics_8259_pic; | 572 | get_irq_desc(i)->handler = &xics_8259_pic; |
| 572 | for (; i < NR_IRQS; ++i) | 573 | for (; i < NR_IRQS; ++i) |
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c index b61497d5be28..bd6a95a5914d 100644 --- a/arch/ppc64/kernel/irq.c +++ b/arch/ppc64/kernel/irq.c | |||
| @@ -144,110 +144,6 @@ void fixup_irqs(cpumask_t map) | |||
| 144 | } | 144 | } |
| 145 | #endif | 145 | #endif |
| 146 | 146 | ||
| 147 | extern int noirqdebug; | ||
| 148 | |||
| 149 | /* | ||
| 150 | * Eventually, this should take an array of interrupts and an array size | ||
| 151 | * so it can dispatch multiple interrupts. | ||
| 152 | */ | ||
| 153 | void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq) | ||
| 154 | { | ||
| 155 | int status; | ||
| 156 | struct irqaction *action; | ||
| 157 | int cpu = smp_processor_id(); | ||
| 158 | irq_desc_t *desc = get_irq_desc(irq); | ||
| 159 | irqreturn_t action_ret; | ||
| 160 | |||
| 161 | kstat_cpu(cpu).irqs[irq]++; | ||
| 162 | |||
| 163 | if (desc->status & IRQ_PER_CPU) { | ||
| 164 | /* no locking required for CPU-local interrupts: */ | ||
| 165 | ack_irq(irq); | ||
| 166 | action_ret = handle_IRQ_event(irq, regs, desc->action); | ||
| 167 | desc->handler->end(irq); | ||
| 168 | return; | ||
| 169 | } | ||
| 170 | |||
| 171 | spin_lock(&desc->lock); | ||
| 172 | ack_irq(irq); | ||
| 173 | /* | ||
| 174 | REPLAY is when Linux resends an IRQ that was dropped earlier | ||
| 175 | WAITING is used by probe to mark irqs that are being tested | ||
| 176 | */ | ||
| 177 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
| 178 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
| 179 | |||
| 180 | /* | ||
| 181 | * If the IRQ is disabled for whatever reason, we cannot | ||
| 182 | * use the action we have. | ||
| 183 | */ | ||
| 184 | action = NULL; | ||
| 185 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | ||
| 186 | action = desc->action; | ||
| 187 | if (!action || !action->handler) { | ||
| 188 | ppc_spurious_interrupts++; | ||
| 189 | printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq); | ||
| 190 | /* We can't call disable_irq here, it would deadlock */ | ||
| 191 | if (!desc->depth) | ||
| 192 | desc->depth = 1; | ||
| 193 | desc->status |= IRQ_DISABLED; | ||
| 194 | /* This is not a real spurrious interrupt, we | ||
| 195 | * have to eoi it, so we jump to out | ||
| 196 | */ | ||
| 197 | mask_irq(irq); | ||
| 198 | goto out; | ||
| 199 | } | ||
| 200 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
| 201 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
| 202 | } | ||
| 203 | desc->status = status; | ||
| 204 | |||
| 205 | /* | ||
| 206 | * If there is no IRQ handler or it was disabled, exit early. | ||
| 207 | Since we set PENDING, if another processor is handling | ||
| 208 | a different instance of this same irq, the other processor | ||
| 209 | will take care of it. | ||
| 210 | */ | ||
| 211 | if (unlikely(!action)) | ||
| 212 | goto out; | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Edge triggered interrupts need to remember | ||
| 216 | * pending events. | ||
| 217 | * This applies to any hw interrupts that allow a second | ||
| 218 | * instance of the same irq to arrive while we are in do_IRQ | ||
| 219 | * or in the handler. But the code here only handles the _second_ | ||
| 220 | * instance of the irq, not the third or fourth. So it is mostly | ||
| 221 | * useful for irq hardware that does not mask cleanly in an | ||
| 222 | * SMP environment. | ||
| 223 | */ | ||
| 224 | for (;;) { | ||
| 225 | spin_unlock(&desc->lock); | ||
| 226 | |||
| 227 | action_ret = handle_IRQ_event(irq, regs, action); | ||
| 228 | |||
| 229 | spin_lock(&desc->lock); | ||
| 230 | if (!noirqdebug) | ||
| 231 | note_interrupt(irq, desc, action_ret, regs); | ||
| 232 | if (likely(!(desc->status & IRQ_PENDING))) | ||
| 233 | break; | ||
| 234 | desc->status &= ~IRQ_PENDING; | ||
| 235 | } | ||
| 236 | out: | ||
| 237 | desc->status &= ~IRQ_INPROGRESS; | ||
| 238 | /* | ||
| 239 | * The ->end() handler has to deal with interrupts which got | ||
| 240 | * disabled while the handler was running. | ||
| 241 | */ | ||
| 242 | if (desc->handler) { | ||
| 243 | if (desc->handler->end) | ||
| 244 | desc->handler->end(irq); | ||
| 245 | else if (desc->handler->enable) | ||
| 246 | desc->handler->enable(irq); | ||
| 247 | } | ||
| 248 | spin_unlock(&desc->lock); | ||
| 249 | } | ||
| 250 | |||
| 251 | #ifdef CONFIG_PPC_ISERIES | 147 | #ifdef CONFIG_PPC_ISERIES |
| 252 | void do_IRQ(struct pt_regs *regs) | 148 | void do_IRQ(struct pt_regs *regs) |
| 253 | { | 149 | { |
| @@ -325,13 +221,13 @@ void do_IRQ(struct pt_regs *regs) | |||
| 325 | if (curtp != irqtp) { | 221 | if (curtp != irqtp) { |
| 326 | irqtp->task = curtp->task; | 222 | irqtp->task = curtp->task; |
| 327 | irqtp->flags = 0; | 223 | irqtp->flags = 0; |
| 328 | call_ppc_irq_dispatch_handler(regs, irq, irqtp); | 224 | call___do_IRQ(irq, regs, irqtp); |
| 329 | irqtp->task = NULL; | 225 | irqtp->task = NULL; |
| 330 | if (irqtp->flags) | 226 | if (irqtp->flags) |
| 331 | set_bits(irqtp->flags, &curtp->flags); | 227 | set_bits(irqtp->flags, &curtp->flags); |
| 332 | } else | 228 | } else |
| 333 | #endif | 229 | #endif |
| 334 | ppc_irq_dispatch_handler(regs, irq); | 230 | __do_IRQ(irq, regs); |
| 335 | } else | 231 | } else |
| 336 | /* That's not SMP safe ... but who cares ? */ | 232 | /* That's not SMP safe ... but who cares ? */ |
| 337 | ppc_spurious_interrupts++; | 233 | ppc_spurious_interrupts++; |
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index d82a30dc26f8..492bca6137eb 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S | |||
| @@ -78,12 +78,12 @@ _GLOBAL(call_do_softirq) | |||
| 78 | mtlr r0 | 78 | mtlr r0 |
| 79 | blr | 79 | blr |
| 80 | 80 | ||
| 81 | _GLOBAL(call_ppc_irq_dispatch_handler) | 81 | _GLOBAL(call___do_IRQ) |
| 82 | mflr r0 | 82 | mflr r0 |
| 83 | std r0,16(r1) | 83 | std r0,16(r1) |
| 84 | stdu r1,THREAD_SIZE-112(r5) | 84 | stdu r1,THREAD_SIZE-112(r5) |
| 85 | mr r1,r5 | 85 | mr r1,r5 |
| 86 | bl .ppc_irq_dispatch_handler | 86 | bl .__do_IRQ |
| 87 | ld r1,0(r1) | 87 | ld r1,0(r1) |
| 88 | ld r0,16(r1) | 88 | ld r0,16(r1) |
| 89 | mtlr r0 | 89 | mtlr r0 |
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h index c37b31b96337..26b89d859c56 100644 --- a/include/asm-powerpc/hw_irq.h +++ b/include/asm-powerpc/hw_irq.h | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
| 13 | 13 | ||
| 14 | extern void timer_interrupt(struct pt_regs *); | 14 | extern void timer_interrupt(struct pt_regs *); |
| 15 | extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq); | ||
| 16 | 15 | ||
| 17 | #ifdef CONFIG_PPC_ISERIES | 16 | #ifdef CONFIG_PPC_ISERIES |
| 18 | 17 | ||
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index 2a768e096067..225dc182ef3b 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h | |||
| @@ -488,7 +488,7 @@ extern struct thread_info *softirq_ctx[NR_CPUS]; | |||
| 488 | 488 | ||
| 489 | extern void irq_ctx_init(void); | 489 | extern void irq_ctx_init(void); |
| 490 | extern void call_do_softirq(struct thread_info *tp); | 490 | extern void call_do_softirq(struct thread_info *tp); |
| 491 | extern int call_ppc_irq_dispatch_handler(struct pt_regs *regs, int irq, | 491 | extern int call___do_IRQ(int irq, struct pt_regs *regs, |
| 492 | struct thread_info *tp); | 492 | struct thread_info *tp); |
| 493 | 493 | ||
| 494 | #define __ARCH_HAS_DO_SOFTIRQ | 494 | #define __ARCH_HAS_DO_SOFTIRQ |
