diff options
author | Paul Mackerras <paulus@samba.org> | 2005-11-09 21:08:55 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-09 21:08:55 -0500 |
commit | e130bedb7ce718a8eb6b56a3806b96281f618111 (patch) | |
tree | da5676a4291a79b06da6fe2b005e0058ec03cc2c /arch/ppc64/kernel | |
parent | 00557b59c69ce284e5a61bcfcdbcc3dc867cb2da (diff) | |
parent | 756e7104fefc82e3ebaa5f1da5ba6659c9c1cae5 (diff) |
Merge git://oak/home/sfr/kernels/iseries/work
Diffstat (limited to 'arch/ppc64/kernel')
-rw-r--r-- | arch/ppc64/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/ppc64/kernel/irq.c | 519 | ||||
-rw-r--r-- | arch/ppc64/kernel/misc.S | 8 |
3 files changed, 5 insertions, 524 deletions
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile index 7548968b7997..1f71f23cc26d 100644 --- a/arch/ppc64/kernel/Makefile +++ b/arch/ppc64/kernel/Makefile | |||
@@ -11,7 +11,7 @@ obj-y := misc.o prom.o | |||
11 | 11 | ||
12 | endif | 12 | endif |
13 | 13 | ||
14 | obj-y += irq.o idle.o dma.o \ | 14 | obj-y += idle.o dma.o \ |
15 | align.o \ | 15 | align.o \ |
16 | udbg.o \ | 16 | udbg.o \ |
17 | rtc.o \ | 17 | rtc.o \ |
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c deleted file mode 100644 index 87474584033f..000000000000 --- a/arch/ppc64/kernel/irq.c +++ /dev/null | |||
@@ -1,519 +0,0 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/irq.c | ||
3 | * | ||
4 | * Derived from arch/i386/kernel/irq.c | ||
5 | * Copyright (C) 1992 Linus Torvalds | ||
6 | * Adapted from arch/i386 by Gary Thomas | ||
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | ||
8 | * Updated and modified by Cort Dougan (cort@cs.nmt.edu) | ||
9 | * Copyright (C) 1996 Cort Dougan | ||
10 | * Adapted for Power Macintosh by Paul Mackerras | ||
11 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | ||
12 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or | ||
15 | * modify it under the terms of the GNU General Public License | ||
16 | * as published by the Free Software Foundation; either version | ||
17 | * 2 of the License, or (at your option) any later version. | ||
18 | * | ||
19 | * This file contains the code used by various IRQ handling routines: | ||
20 | * asking for different IRQ's should be done through these routines | ||
21 | * instead of just grabbing them. Thus setups with different IRQ numbers | ||
22 | * shouldn't result in any weird surprises, and installing new handlers | ||
23 | * should be easier. | ||
24 | */ | ||
25 | |||
26 | #include <linux/errno.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/threads.h> | ||
29 | #include <linux/kernel_stat.h> | ||
30 | #include <linux/signal.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/ioport.h> | ||
33 | #include <linux/interrupt.h> | ||
34 | #include <linux/timex.h> | ||
35 | #include <linux/config.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/pci.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/irq.h> | ||
41 | #include <linux/proc_fs.h> | ||
42 | #include <linux/random.h> | ||
43 | #include <linux/kallsyms.h> | ||
44 | #include <linux/profile.h> | ||
45 | #include <linux/bitops.h> | ||
46 | |||
47 | #include <asm/uaccess.h> | ||
48 | #include <asm/system.h> | ||
49 | #include <asm/io.h> | ||
50 | #include <asm/pgtable.h> | ||
51 | #include <asm/irq.h> | ||
52 | #include <asm/cache.h> | ||
53 | #include <asm/prom.h> | ||
54 | #include <asm/ptrace.h> | ||
55 | #include <asm/iseries/it_lp_queue.h> | ||
56 | #include <asm/machdep.h> | ||
57 | #include <asm/paca.h> | ||
58 | |||
59 | #ifdef CONFIG_SMP | ||
60 | extern void iSeries_smp_message_recv( struct pt_regs * ); | ||
61 | #endif | ||
62 | |||
63 | extern irq_desc_t irq_desc[NR_IRQS]; | ||
64 | EXPORT_SYMBOL(irq_desc); | ||
65 | |||
66 | int distribute_irqs = 1; | ||
67 | int __irq_offset_value; | ||
68 | int ppc_spurious_interrupts; | ||
69 | u64 ppc64_interrupt_controller; | ||
70 | |||
71 | int show_interrupts(struct seq_file *p, void *v) | ||
72 | { | ||
73 | int i = *(loff_t *) v, j; | ||
74 | struct irqaction * action; | ||
75 | irq_desc_t *desc; | ||
76 | unsigned long flags; | ||
77 | |||
78 | if (i == 0) { | ||
79 | seq_printf(p, " "); | ||
80 | for (j=0; j<NR_CPUS; j++) { | ||
81 | if (cpu_online(j)) | ||
82 | seq_printf(p, "CPU%d ",j); | ||
83 | } | ||
84 | seq_putc(p, '\n'); | ||
85 | } | ||
86 | |||
87 | if (i < NR_IRQS) { | ||
88 | desc = get_irq_desc(i); | ||
89 | spin_lock_irqsave(&desc->lock, flags); | ||
90 | action = desc->action; | ||
91 | if (!action || !action->handler) | ||
92 | goto skip; | ||
93 | seq_printf(p, "%3d: ", i); | ||
94 | #ifdef CONFIG_SMP | ||
95 | for (j = 0; j < NR_CPUS; j++) { | ||
96 | if (cpu_online(j)) | ||
97 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | ||
98 | } | ||
99 | #else | ||
100 | seq_printf(p, "%10u ", kstat_irqs(i)); | ||
101 | #endif /* CONFIG_SMP */ | ||
102 | if (desc->handler) | ||
103 | seq_printf(p, " %s ", desc->handler->typename ); | ||
104 | else | ||
105 | seq_printf(p, " None "); | ||
106 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); | ||
107 | seq_printf(p, " %s",action->name); | ||
108 | for (action=action->next; action; action = action->next) | ||
109 | seq_printf(p, ", %s", action->name); | ||
110 | seq_putc(p, '\n'); | ||
111 | skip: | ||
112 | spin_unlock_irqrestore(&desc->lock, flags); | ||
113 | } else if (i == NR_IRQS) | ||
114 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | #ifdef CONFIG_HOTPLUG_CPU | ||
119 | void fixup_irqs(cpumask_t map) | ||
120 | { | ||
121 | unsigned int irq; | ||
122 | static int warned; | ||
123 | |||
124 | for_each_irq(irq) { | ||
125 | cpumask_t mask; | ||
126 | |||
127 | if (irq_desc[irq].status & IRQ_PER_CPU) | ||
128 | continue; | ||
129 | |||
130 | cpus_and(mask, irq_affinity[irq], map); | ||
131 | if (any_online_cpu(mask) == NR_CPUS) { | ||
132 | printk("Breaking affinity for irq %i\n", irq); | ||
133 | mask = map; | ||
134 | } | ||
135 | if (irq_desc[irq].handler->set_affinity) | ||
136 | irq_desc[irq].handler->set_affinity(irq, mask); | ||
137 | else if (irq_desc[irq].action && !(warned++)) | ||
138 | printk("Cannot set affinity for irq %i\n", irq); | ||
139 | } | ||
140 | |||
141 | local_irq_enable(); | ||
142 | mdelay(1); | ||
143 | local_irq_disable(); | ||
144 | } | ||
145 | #endif | ||
146 | |||
147 | extern int noirqdebug; | ||
148 | |||
149 | /* | ||
150 | * Eventually, this should take an array of interrupts and an array size | ||
151 | * so it can dispatch multiple interrupts. | ||
152 | */ | ||
153 | void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq) | ||
154 | { | ||
155 | int status; | ||
156 | struct irqaction *action; | ||
157 | int cpu = smp_processor_id(); | ||
158 | irq_desc_t *desc = get_irq_desc(irq); | ||
159 | irqreturn_t action_ret; | ||
160 | #ifdef CONFIG_IRQSTACKS | ||
161 | struct thread_info *curtp, *irqtp; | ||
162 | #endif | ||
163 | |||
164 | kstat_cpu(cpu).irqs[irq]++; | ||
165 | |||
166 | if (desc->status & IRQ_PER_CPU) { | ||
167 | /* no locking required for CPU-local interrupts: */ | ||
168 | ack_irq(irq); | ||
169 | action_ret = handle_IRQ_event(irq, regs, desc->action); | ||
170 | desc->handler->end(irq); | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | spin_lock(&desc->lock); | ||
175 | ack_irq(irq); | ||
176 | /* | ||
177 | REPLAY is when Linux resends an IRQ that was dropped earlier | ||
178 | WAITING is used by probe to mark irqs that are being tested | ||
179 | */ | ||
180 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
181 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
182 | |||
183 | /* | ||
184 | * If the IRQ is disabled for whatever reason, we cannot | ||
185 | * use the action we have. | ||
186 | */ | ||
187 | action = NULL; | ||
188 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | ||
189 | action = desc->action; | ||
190 | if (!action || !action->handler) { | ||
191 | ppc_spurious_interrupts++; | ||
192 | printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq); | ||
193 | /* We can't call disable_irq here, it would deadlock */ | ||
194 | if (!desc->depth) | ||
195 | desc->depth = 1; | ||
196 | desc->status |= IRQ_DISABLED; | ||
197 | /* This is not a real spurrious interrupt, we | ||
198 | * have to eoi it, so we jump to out | ||
199 | */ | ||
200 | mask_irq(irq); | ||
201 | goto out; | ||
202 | } | ||
203 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
204 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
205 | } | ||
206 | desc->status = status; | ||
207 | |||
208 | /* | ||
209 | * If there is no IRQ handler or it was disabled, exit early. | ||
210 | Since we set PENDING, if another processor is handling | ||
211 | a different instance of this same irq, the other processor | ||
212 | will take care of it. | ||
213 | */ | ||
214 | if (unlikely(!action)) | ||
215 | goto out; | ||
216 | |||
217 | /* | ||
218 | * Edge triggered interrupts need to remember | ||
219 | * pending events. | ||
220 | * This applies to any hw interrupts that allow a second | ||
221 | * instance of the same irq to arrive while we are in do_IRQ | ||
222 | * or in the handler. But the code here only handles the _second_ | ||
223 | * instance of the irq, not the third or fourth. So it is mostly | ||
224 | * useful for irq hardware that does not mask cleanly in an | ||
225 | * SMP environment. | ||
226 | */ | ||
227 | for (;;) { | ||
228 | spin_unlock(&desc->lock); | ||
229 | |||
230 | #ifdef CONFIG_IRQSTACKS | ||
231 | /* Switch to the irq stack to handle this */ | ||
232 | curtp = current_thread_info(); | ||
233 | irqtp = hardirq_ctx[smp_processor_id()]; | ||
234 | if (curtp != irqtp) { | ||
235 | irqtp->task = curtp->task; | ||
236 | irqtp->flags = 0; | ||
237 | action_ret = call_handle_IRQ_event(irq, regs, action, irqtp); | ||
238 | irqtp->task = NULL; | ||
239 | if (irqtp->flags) | ||
240 | set_bits(irqtp->flags, &curtp->flags); | ||
241 | } else | ||
242 | #endif | ||
243 | action_ret = handle_IRQ_event(irq, regs, action); | ||
244 | |||
245 | spin_lock(&desc->lock); | ||
246 | if (!noirqdebug) | ||
247 | note_interrupt(irq, desc, action_ret, regs); | ||
248 | if (likely(!(desc->status & IRQ_PENDING))) | ||
249 | break; | ||
250 | desc->status &= ~IRQ_PENDING; | ||
251 | } | ||
252 | out: | ||
253 | desc->status &= ~IRQ_INPROGRESS; | ||
254 | /* | ||
255 | * The ->end() handler has to deal with interrupts which got | ||
256 | * disabled while the handler was running. | ||
257 | */ | ||
258 | if (desc->handler) { | ||
259 | if (desc->handler->end) | ||
260 | desc->handler->end(irq); | ||
261 | else if (desc->handler->enable) | ||
262 | desc->handler->enable(irq); | ||
263 | } | ||
264 | spin_unlock(&desc->lock); | ||
265 | } | ||
266 | |||
267 | #ifdef CONFIG_PPC_ISERIES | ||
268 | void do_IRQ(struct pt_regs *regs) | ||
269 | { | ||
270 | struct paca_struct *lpaca; | ||
271 | |||
272 | irq_enter(); | ||
273 | |||
274 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
275 | /* Debugging check for stack overflow: is there less than 2KB free? */ | ||
276 | { | ||
277 | long sp; | ||
278 | |||
279 | sp = __get_SP() & (THREAD_SIZE-1); | ||
280 | |||
281 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | ||
282 | printk("do_IRQ: stack overflow: %ld\n", | ||
283 | sp - sizeof(struct thread_info)); | ||
284 | dump_stack(); | ||
285 | } | ||
286 | } | ||
287 | #endif | ||
288 | |||
289 | lpaca = get_paca(); | ||
290 | #ifdef CONFIG_SMP | ||
291 | if (lpaca->lppaca.int_dword.fields.ipi_cnt) { | ||
292 | lpaca->lppaca.int_dword.fields.ipi_cnt = 0; | ||
293 | iSeries_smp_message_recv(regs); | ||
294 | } | ||
295 | #endif /* CONFIG_SMP */ | ||
296 | if (hvlpevent_is_pending()) | ||
297 | process_hvlpevents(regs); | ||
298 | |||
299 | irq_exit(); | ||
300 | |||
301 | if (lpaca->lppaca.int_dword.fields.decr_int) { | ||
302 | lpaca->lppaca.int_dword.fields.decr_int = 0; | ||
303 | /* Signal a fake decrementer interrupt */ | ||
304 | timer_interrupt(regs); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | #else /* CONFIG_PPC_ISERIES */ | ||
309 | |||
310 | void do_IRQ(struct pt_regs *regs) | ||
311 | { | ||
312 | int irq; | ||
313 | |||
314 | irq_enter(); | ||
315 | |||
316 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
317 | /* Debugging check for stack overflow: is there less than 2KB free? */ | ||
318 | { | ||
319 | long sp; | ||
320 | |||
321 | sp = __get_SP() & (THREAD_SIZE-1); | ||
322 | |||
323 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | ||
324 | printk("do_IRQ: stack overflow: %ld\n", | ||
325 | sp - sizeof(struct thread_info)); | ||
326 | dump_stack(); | ||
327 | } | ||
328 | } | ||
329 | #endif | ||
330 | |||
331 | irq = ppc_md.get_irq(regs); | ||
332 | |||
333 | if (irq >= 0) | ||
334 | ppc_irq_dispatch_handler(regs, irq); | ||
335 | else | ||
336 | /* That's not SMP safe ... but who cares ? */ | ||
337 | ppc_spurious_interrupts++; | ||
338 | |||
339 | irq_exit(); | ||
340 | } | ||
341 | #endif /* CONFIG_PPC_ISERIES */ | ||
342 | |||
343 | void __init init_IRQ(void) | ||
344 | { | ||
345 | static int once = 0; | ||
346 | |||
347 | if (once) | ||
348 | return; | ||
349 | |||
350 | once++; | ||
351 | |||
352 | ppc_md.init_IRQ(); | ||
353 | irq_ctx_init(); | ||
354 | } | ||
355 | |||
356 | #ifndef CONFIG_PPC_ISERIES | ||
357 | /* | ||
358 | * Virtual IRQ mapping code, used on systems with XICS interrupt controllers. | ||
359 | */ | ||
360 | |||
361 | #define UNDEFINED_IRQ 0xffffffff | ||
362 | unsigned int virt_irq_to_real_map[NR_IRQS]; | ||
363 | |||
364 | /* | ||
365 | * Don't use virtual irqs 0, 1, 2 for devices. | ||
366 | * The pcnet32 driver considers interrupt numbers < 2 to be invalid, | ||
367 | * and 2 is the XICS IPI interrupt. | ||
368 | * We limit virtual irqs to 17 less than NR_IRQS so that when we | ||
369 | * offset them by 16 (to reserve the first 16 for ISA interrupts) | ||
370 | * we don't end up with an interrupt number >= NR_IRQS. | ||
371 | */ | ||
372 | #define MIN_VIRT_IRQ 3 | ||
373 | #define MAX_VIRT_IRQ (NR_IRQS - NUM_ISA_INTERRUPTS - 1) | ||
374 | #define NR_VIRT_IRQS (MAX_VIRT_IRQ - MIN_VIRT_IRQ + 1) | ||
375 | |||
376 | void | ||
377 | virt_irq_init(void) | ||
378 | { | ||
379 | int i; | ||
380 | for (i = 0; i < NR_IRQS; i++) | ||
381 | virt_irq_to_real_map[i] = UNDEFINED_IRQ; | ||
382 | } | ||
383 | |||
384 | /* Create a mapping for a real_irq if it doesn't already exist. | ||
385 | * Return the virtual irq as a convenience. | ||
386 | */ | ||
387 | int virt_irq_create_mapping(unsigned int real_irq) | ||
388 | { | ||
389 | unsigned int virq, first_virq; | ||
390 | static int warned; | ||
391 | |||
392 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | ||
393 | return real_irq; /* no mapping for openpic (for now) */ | ||
394 | |||
395 | if (ppc64_interrupt_controller == IC_CELL_PIC) | ||
396 | return real_irq; /* no mapping for iic either */ | ||
397 | |||
398 | /* don't map interrupts < MIN_VIRT_IRQ */ | ||
399 | if (real_irq < MIN_VIRT_IRQ) { | ||
400 | virt_irq_to_real_map[real_irq] = real_irq; | ||
401 | return real_irq; | ||
402 | } | ||
403 | |||
404 | /* map to a number between MIN_VIRT_IRQ and MAX_VIRT_IRQ */ | ||
405 | virq = real_irq; | ||
406 | if (virq > MAX_VIRT_IRQ) | ||
407 | virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; | ||
408 | |||
409 | /* search for this number or a free slot */ | ||
410 | first_virq = virq; | ||
411 | while (virt_irq_to_real_map[virq] != UNDEFINED_IRQ) { | ||
412 | if (virt_irq_to_real_map[virq] == real_irq) | ||
413 | return virq; | ||
414 | if (++virq > MAX_VIRT_IRQ) | ||
415 | virq = MIN_VIRT_IRQ; | ||
416 | if (virq == first_virq) | ||
417 | goto nospace; /* oops, no free slots */ | ||
418 | } | ||
419 | |||
420 | virt_irq_to_real_map[virq] = real_irq; | ||
421 | return virq; | ||
422 | |||
423 | nospace: | ||
424 | if (!warned) { | ||
425 | printk(KERN_CRIT "Interrupt table is full\n"); | ||
426 | printk(KERN_CRIT "Increase NR_IRQS (currently %d) " | ||
427 | "in your kernel sources and rebuild.\n", NR_IRQS); | ||
428 | warned = 1; | ||
429 | } | ||
430 | return NO_IRQ; | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * In most cases will get a hit on the very first slot checked in the | ||
435 | * virt_irq_to_real_map. Only when there are a large number of | ||
436 | * IRQs will this be expensive. | ||
437 | */ | ||
438 | unsigned int real_irq_to_virt_slowpath(unsigned int real_irq) | ||
439 | { | ||
440 | unsigned int virq; | ||
441 | unsigned int first_virq; | ||
442 | |||
443 | virq = real_irq; | ||
444 | |||
445 | if (virq > MAX_VIRT_IRQ) | ||
446 | virq = (virq % NR_VIRT_IRQS) + MIN_VIRT_IRQ; | ||
447 | |||
448 | first_virq = virq; | ||
449 | |||
450 | do { | ||
451 | if (virt_irq_to_real_map[virq] == real_irq) | ||
452 | return virq; | ||
453 | |||
454 | virq++; | ||
455 | |||
456 | if (virq >= MAX_VIRT_IRQ) | ||
457 | virq = 0; | ||
458 | |||
459 | } while (first_virq != virq); | ||
460 | |||
461 | return NO_IRQ; | ||
462 | |||
463 | } | ||
464 | |||
465 | #endif /* CONFIG_PPC_ISERIES */ | ||
466 | |||
467 | #ifdef CONFIG_IRQSTACKS | ||
468 | struct thread_info *softirq_ctx[NR_CPUS]; | ||
469 | struct thread_info *hardirq_ctx[NR_CPUS]; | ||
470 | |||
471 | void irq_ctx_init(void) | ||
472 | { | ||
473 | struct thread_info *tp; | ||
474 | int i; | ||
475 | |||
476 | for_each_cpu(i) { | ||
477 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | ||
478 | tp = softirq_ctx[i]; | ||
479 | tp->cpu = i; | ||
480 | tp->preempt_count = SOFTIRQ_OFFSET; | ||
481 | |||
482 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | ||
483 | tp = hardirq_ctx[i]; | ||
484 | tp->cpu = i; | ||
485 | tp->preempt_count = HARDIRQ_OFFSET; | ||
486 | } | ||
487 | } | ||
488 | |||
489 | void do_softirq(void) | ||
490 | { | ||
491 | unsigned long flags; | ||
492 | struct thread_info *curtp, *irqtp; | ||
493 | |||
494 | if (in_interrupt()) | ||
495 | return; | ||
496 | |||
497 | local_irq_save(flags); | ||
498 | |||
499 | if (local_softirq_pending()) { | ||
500 | curtp = current_thread_info(); | ||
501 | irqtp = softirq_ctx[smp_processor_id()]; | ||
502 | irqtp->task = curtp->task; | ||
503 | call_do_softirq(irqtp); | ||
504 | irqtp->task = NULL; | ||
505 | } | ||
506 | |||
507 | local_irq_restore(flags); | ||
508 | } | ||
509 | EXPORT_SYMBOL(do_softirq); | ||
510 | |||
511 | #endif /* CONFIG_IRQSTACKS */ | ||
512 | |||
513 | static int __init setup_noirqdistrib(char *str) | ||
514 | { | ||
515 | distribute_irqs = 0; | ||
516 | return 1; | ||
517 | } | ||
518 | |||
519 | __setup("noirqdistrib", setup_noirqdistrib); | ||
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index 914632ec587d..492bca6137eb 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S | |||
@@ -78,12 +78,12 @@ _GLOBAL(call_do_softirq) | |||
78 | mtlr r0 | 78 | mtlr r0 |
79 | blr | 79 | blr |
80 | 80 | ||
81 | _GLOBAL(call_handle_IRQ_event) | 81 | _GLOBAL(call___do_IRQ) |
82 | mflr r0 | 82 | mflr r0 |
83 | std r0,16(r1) | 83 | std r0,16(r1) |
84 | stdu r1,THREAD_SIZE-112(r6) | 84 | stdu r1,THREAD_SIZE-112(r5) |
85 | mr r1,r6 | 85 | mr r1,r5 |
86 | bl .handle_IRQ_event | 86 | bl .__do_IRQ |
87 | ld r1,0(r1) | 87 | ld r1,0(r1) |
88 | ld r0,16(r1) | 88 | ld r0,16(r1) |
89 | mtlr r0 | 89 | mtlr r0 |