aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2006-07-01 17:30:09 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-07-01 17:30:09 -0400
commit4a2581a080098ca3a0c4e416d7a282e96c75ebf8 (patch)
tree6709b53f00a271fe8dd76b6cfb821419c8afb206 /arch/arm
parentf8b5473fcbddbfde827ecf82aa0e81fa2a878220 (diff)
[ARM] 3692/1: ARM: coswitch irq handling to the generic implementation
Patch from Thomas Gleixner From: Thomas Gleixner <tglx@linutronix.de> Switch the ARM irq core handling to the generic implementation. The ARM specific header files now contain mostly migration stubs and helper macros. Note that each machine type must be converted after this step seperately. This was seperated out from the patch for easier review. The main changes for the machine type code is the conversion of the type handlers to a 'type flow' and 'chip' model. This affects only the multiplex interrupt handlers. A conversion macro needs to be added to those implementations, which defines the data structure which is registered by the set_irq_chained_handler() macro. Some minor fixups of include files and the conversion of data structure access is necessary all over the place. The mostly macro based conversion was provided to allow an easy migration of the existing implementations. The code compiles on all defconfigs available in arch/arm/configs except those which were broken also before applying the conversion patches. The code has been boot and runtime tested on most ARM platforms. The results of an extensive testing and bugfixing series can be found at: http://www.linutronix.de/index.php?page=testing Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/kernel/fiq.c1
-rw-r--r--arch/arm/kernel/irq.c951
3 files changed, 39 insertions, 925 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index f123c7c9fc98..531661ac01b4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -47,6 +47,18 @@ config MCA
47 <file:Documentation/mca.txt> (and especially the web page given 47 <file:Documentation/mca.txt> (and especially the web page given
48 there) before attempting to build an MCA bus kernel. 48 there) before attempting to build an MCA bus kernel.
49 49
50config GENERIC_HARDIRQS
51 bool
52 default y
53
54config HARDIRQS_SW_RESEND
55 bool
56 default y
57
58config GENERIC_IRQ_PROBE
59 bool
60 default y
61
50config RWSEM_GENERIC_SPINLOCK 62config RWSEM_GENERIC_SPINLOCK
51 bool 63 bool
52 default y 64 default y
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 1ec3f7faa259..e8e90346f11c 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -38,6 +38,7 @@
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/kernel.h> 39#include <linux/kernel.h>
40#include <linux/init.h> 40#include <linux/init.h>
41#include <linux/interrupt.h>
41#include <linux/seq_file.h> 42#include <linux/seq_file.h>
42 43
43#include <asm/cacheflush.h> 44#include <asm/cacheflush.h>
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 4d31cf8d02d5..c3d4e94ef5bf 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -26,6 +26,7 @@
26#include <linux/signal.h> 26#include <linux/signal.h>
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/irq.h>
29#include <linux/ptrace.h> 30#include <linux/ptrace.h>
30#include <linux/slab.h> 31#include <linux/slab.h>
31#include <linux/random.h> 32#include <linux/random.h>
@@ -37,192 +38,18 @@
37#include <linux/kallsyms.h> 38#include <linux/kallsyms.h>
38#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
39 40
40#include <asm/irq.h>
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/mach/irq.h>
43#include <asm/mach/time.h> 42#include <asm/mach/time.h>
44 43
45/* 44/*
46 * Maximum IRQ count. Currently, this is arbitary. However, it should
47 * not be set too low to prevent false triggering. Conversely, if it
48 * is set too high, then you could miss a stuck IRQ.
49 *
50 * Maybe we ought to set a timer and re-enable the IRQ at a later time?
51 */
52#define MAX_IRQ_CNT 100000
53
54static int noirqdebug __read_mostly;
55static volatile unsigned long irq_err_count;
56static DEFINE_SPINLOCK(irq_controller_lock);
57static LIST_HEAD(irq_pending);
58
59struct irqdesc irq_desc[NR_IRQS];
60void (*init_arch_irq)(void) __initdata = NULL;
61
62/*
63 * No architecture-specific irq_finish function defined in arm/arch/irqs.h. 45 * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
64 */ 46 */
65#ifndef irq_finish 47#ifndef irq_finish
66#define irq_finish(irq) do { } while (0) 48#define irq_finish(irq) do { } while (0)
67#endif 49#endif
68 50
69/* 51void (*init_arch_irq)(void) __initdata = NULL;
70 * Dummy mask/unmask handler 52unsigned long irq_err_count;
71 */
72void dummy_mask_unmask_irq(unsigned int irq)
73{
74}
75
76irqreturn_t no_action(int irq, void *dev_id, struct pt_regs *regs)
77{
78 return IRQ_NONE;
79}
80
81void do_bad_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
82{
83 irq_err_count++;
84 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
85}
86
87static struct irqchip bad_chip = {
88 .ack = dummy_mask_unmask_irq,
89 .mask = dummy_mask_unmask_irq,
90 .unmask = dummy_mask_unmask_irq,
91};
92
93static struct irqdesc bad_irq_desc = {
94 .chip = &bad_chip,
95 .handle = do_bad_IRQ,
96 .pend = LIST_HEAD_INIT(bad_irq_desc.pend),
97 .disable_depth = 1,
98};
99
100#ifdef CONFIG_SMP
101void synchronize_irq(unsigned int irq)
102{
103 struct irqdesc *desc = irq_desc + irq;
104
105 while (desc->running)
106 barrier();
107}
108EXPORT_SYMBOL(synchronize_irq);
109
110#define smp_set_running(desc) do { desc->running = 1; } while (0)
111#define smp_clear_running(desc) do { desc->running = 0; } while (0)
112#else
113#define smp_set_running(desc) do { } while (0)
114#define smp_clear_running(desc) do { } while (0)
115#endif
116
117/**
118 * disable_irq_nosync - disable an irq without waiting
119 * @irq: Interrupt to disable
120 *
121 * Disable the selected interrupt line. Enables and disables
122 * are nested. We do this lazily.
123 *
124 * This function may be called from IRQ context.
125 */
126void disable_irq_nosync(unsigned int irq)
127{
128 struct irqdesc *desc = irq_desc + irq;
129 unsigned long flags;
130
131 spin_lock_irqsave(&irq_controller_lock, flags);
132 desc->disable_depth++;
133 list_del_init(&desc->pend);
134 spin_unlock_irqrestore(&irq_controller_lock, flags);
135}
136EXPORT_SYMBOL(disable_irq_nosync);
137
138/**
139 * disable_irq - disable an irq and wait for completion
140 * @irq: Interrupt to disable
141 *
142 * Disable the selected interrupt line. Enables and disables
143 * are nested. This functions waits for any pending IRQ
144 * handlers for this interrupt to complete before returning.
145 * If you use this function while holding a resource the IRQ
146 * handler may need you will deadlock.
147 *
148 * This function may be called - with care - from IRQ context.
149 */
150void disable_irq(unsigned int irq)
151{
152 struct irqdesc *desc = irq_desc + irq;
153
154 disable_irq_nosync(irq);
155 if (desc->action)
156 synchronize_irq(irq);
157}
158EXPORT_SYMBOL(disable_irq);
159
160/**
161 * enable_irq - enable interrupt handling on an irq
162 * @irq: Interrupt to enable
163 *
164 * Re-enables the processing of interrupts on this IRQ line.
165 * Note that this may call the interrupt handler, so you may
166 * get unexpected results if you hold IRQs disabled.
167 *
168 * This function may be called from IRQ context.
169 */
170void enable_irq(unsigned int irq)
171{
172 struct irqdesc *desc = irq_desc + irq;
173 unsigned long flags;
174
175 spin_lock_irqsave(&irq_controller_lock, flags);
176 if (unlikely(!desc->disable_depth)) {
177 printk("enable_irq(%u) unbalanced from %p\n", irq,
178 __builtin_return_address(0));
179 } else if (!--desc->disable_depth) {
180 desc->probing = 0;
181 desc->chip->unmask(irq);
182
183 /*
184 * If the interrupt is waiting to be processed,
185 * try to re-run it. We can't directly run it
186 * from here since the caller might be in an
187 * interrupt-protected region.
188 */
189 if (desc->pending && list_empty(&desc->pend)) {
190 desc->pending = 0;
191 if (!desc->chip->retrigger ||
192 desc->chip->retrigger(irq))
193 list_add(&desc->pend, &irq_pending);
194 }
195 }
196 spin_unlock_irqrestore(&irq_controller_lock, flags);
197}
198EXPORT_SYMBOL(enable_irq);
199
200/*
201 * Enable wake on selected irq
202 */
203void enable_irq_wake(unsigned int irq)
204{
205 struct irqdesc *desc = irq_desc + irq;
206 unsigned long flags;
207
208 spin_lock_irqsave(&irq_controller_lock, flags);
209 if (desc->chip->set_wake)
210 desc->chip->set_wake(irq, 1);
211 spin_unlock_irqrestore(&irq_controller_lock, flags);
212}
213EXPORT_SYMBOL(enable_irq_wake);
214
215void disable_irq_wake(unsigned int irq)
216{
217 struct irqdesc *desc = irq_desc + irq;
218 unsigned long flags;
219
220 spin_lock_irqsave(&irq_controller_lock, flags);
221 if (desc->chip->set_wake)
222 desc->chip->set_wake(irq, 0);
223 spin_unlock_irqrestore(&irq_controller_lock, flags);
224}
225EXPORT_SYMBOL(disable_irq_wake);
226 53
227int show_interrupts(struct seq_file *p, void *v) 54int show_interrupts(struct seq_file *p, void *v)
228{ 55{
@@ -242,8 +69,8 @@ int show_interrupts(struct seq_file *p, void *v)
242 } 69 }
243 70
244 if (i < NR_IRQS) { 71 if (i < NR_IRQS) {
245 spin_lock_irqsave(&irq_controller_lock, flags); 72 spin_lock_irqsave(&irq_desc[i].lock, flags);
246 action = irq_desc[i].action; 73 action = irq_desc[i].action;
247 if (!action) 74 if (!action)
248 goto unlock; 75 goto unlock;
249 76
@@ -256,7 +83,7 @@ int show_interrupts(struct seq_file *p, void *v)
256 83
257 seq_putc(p, '\n'); 84 seq_putc(p, '\n');
258unlock: 85unlock:
259 spin_unlock_irqrestore(&irq_controller_lock, flags); 86 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
260 } else if (i == NR_IRQS) { 87 } else if (i == NR_IRQS) {
261#ifdef CONFIG_ARCH_ACORN 88#ifdef CONFIG_ARCH_ACORN
262 show_fiq_list(p, v); 89 show_fiq_list(p, v);
@@ -270,267 +97,11 @@ unlock:
270 return 0; 97 return 0;
271} 98}
272 99
273/* 100/* Handle bad interrupts */
274 * IRQ lock detection. 101static struct irq_desc bad_irq_desc = {
275 * 102 .handle_irq = handle_bad_irq,
276 * Hopefully, this should get us out of a few locked situations. 103 .lock = SPIN_LOCK_UNLOCKED
277 * However, it may take a while for this to happen, since we need 104};
278 * a large number if IRQs to appear in the same jiffie with the
279 * same instruction pointer (or within 2 instructions).
280 */
281static int check_irq_lock(struct irqdesc *desc, int irq, struct pt_regs *regs)
282{
283 unsigned long instr_ptr = instruction_pointer(regs);
284
285 if (desc->lck_jif == jiffies &&
286 desc->lck_pc >= instr_ptr && desc->lck_pc < instr_ptr + 8) {
287 desc->lck_cnt += 1;
288
289 if (desc->lck_cnt > MAX_IRQ_CNT) {
290 printk(KERN_ERR "IRQ LOCK: IRQ%d is locking the system, disabled\n", irq);
291 return 1;
292 }
293 } else {
294 desc->lck_cnt = 0;
295 desc->lck_pc = instruction_pointer(regs);
296 desc->lck_jif = jiffies;
297 }
298 return 0;
299}
300
301static void
302report_bad_irq(unsigned int irq, struct pt_regs *regs, struct irqdesc *desc, int ret)
303{
304 static int count = 100;
305 struct irqaction *action;
306
307 if (noirqdebug)
308 return;
309
310 if (ret != IRQ_HANDLED && ret != IRQ_NONE) {
311 if (!count)
312 return;
313 count--;
314 printk("irq%u: bogus retval mask %x\n", irq, ret);
315 } else {
316 desc->irqs_unhandled++;
317 if (desc->irqs_unhandled <= 99900)
318 return;
319 desc->irqs_unhandled = 0;
320 printk("irq%u: nobody cared\n", irq);
321 }
322 show_regs(regs);
323 dump_stack();
324 printk(KERN_ERR "handlers:");
325 action = desc->action;
326 do {
327 printk("\n" KERN_ERR "[<%p>]", action->handler);
328 print_symbol(" (%s)", (unsigned long)action->handler);
329 action = action->next;
330 } while (action);
331 printk("\n");
332}
333
334static int
335__do_irq(unsigned int irq, struct irqaction *action, struct pt_regs *regs)
336{
337 unsigned int status;
338 int ret, retval = 0;
339
340 spin_unlock(&irq_controller_lock);
341
342#ifdef CONFIG_NO_IDLE_HZ
343 if (!(action->flags & SA_TIMER) && system_timer->dyn_tick != NULL) {
344 spin_lock(&system_timer->dyn_tick->lock);
345 if (system_timer->dyn_tick->state & DYN_TICK_ENABLED)
346 system_timer->dyn_tick->handler(irq, 0, regs);
347 spin_unlock(&system_timer->dyn_tick->lock);
348 }
349#endif
350
351 if (!(action->flags & SA_INTERRUPT))
352 local_irq_enable();
353
354 status = 0;
355 do {
356 ret = action->handler(irq, action->dev_id, regs);
357 if (ret == IRQ_HANDLED)
358 status |= action->flags;
359 retval |= ret;
360 action = action->next;
361 } while (action);
362
363 if (status & SA_SAMPLE_RANDOM)
364 add_interrupt_randomness(irq);
365
366 spin_lock_irq(&irq_controller_lock);
367
368 return retval;
369}
370
371/*
372 * This is for software-decoded IRQs. The caller is expected to
373 * handle the ack, clear, mask and unmask issues.
374 */
375void
376do_simple_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
377{
378 struct irqaction *action;
379 const unsigned int cpu = smp_processor_id();
380
381 desc->triggered = 1;
382
383 kstat_cpu(cpu).irqs[irq]++;
384
385 smp_set_running(desc);
386
387 action = desc->action;
388 if (action) {
389 int ret = __do_irq(irq, action, regs);
390 if (ret != IRQ_HANDLED)
391 report_bad_irq(irq, regs, desc, ret);
392 }
393
394 smp_clear_running(desc);
395}
396
397/*
398 * Most edge-triggered IRQ implementations seem to take a broken
399 * approach to this. Hence the complexity.
400 */
401void
402do_edge_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
403{
404 const unsigned int cpu = smp_processor_id();
405
406 desc->triggered = 1;
407
408 /*
409 * If we're currently running this IRQ, or its disabled,
410 * we shouldn't process the IRQ. Instead, turn on the
411 * hardware masks.
412 */
413 if (unlikely(desc->running || desc->disable_depth))
414 goto running;
415
416 /*
417 * Acknowledge and clear the IRQ, but don't mask it.
418 */
419 desc->chip->ack(irq);
420
421 /*
422 * Mark the IRQ currently in progress.
423 */
424 desc->running = 1;
425
426 kstat_cpu(cpu).irqs[irq]++;
427
428 do {
429 struct irqaction *action;
430
431 action = desc->action;
432 if (!action)
433 break;
434
435 if (desc->pending && !desc->disable_depth) {
436 desc->pending = 0;
437 desc->chip->unmask(irq);
438 }
439
440 __do_irq(irq, action, regs);
441 } while (desc->pending && !desc->disable_depth);
442
443 desc->running = 0;
444
445 /*
446 * If we were disabled or freed, shut down the handler.
447 */
448 if (likely(desc->action && !check_irq_lock(desc, irq, regs)))
449 return;
450
451 running:
452 /*
453 * We got another IRQ while this one was masked or
454 * currently running. Delay it.
455 */
456 desc->pending = 1;
457 desc->chip->mask(irq);
458 desc->chip->ack(irq);
459}
460
461/*
462 * Level-based IRQ handler. Nice and simple.
463 */
464void
465do_level_IRQ(unsigned int irq, struct irqdesc *desc, struct pt_regs *regs)
466{
467 struct irqaction *action;
468 const unsigned int cpu = smp_processor_id();
469
470 desc->triggered = 1;
471
472 /*
473 * Acknowledge, clear _AND_ disable the interrupt.
474 */
475 desc->chip->ack(irq);
476
477 if (likely(!desc->disable_depth)) {
478 kstat_cpu(cpu).irqs[irq]++;
479
480 smp_set_running(desc);
481
482 /*
483 * Return with this interrupt masked if no action
484 */
485 action = desc->action;
486 if (action) {
487 int ret = __do_irq(irq, desc->action, regs);
488
489 if (ret != IRQ_HANDLED)
490 report_bad_irq(irq, regs, desc, ret);
491
492 if (likely(!desc->disable_depth &&
493 !check_irq_lock(desc, irq, regs)))
494 desc->chip->unmask(irq);
495 }
496
497 smp_clear_running(desc);
498 }
499}
500
501static void do_pending_irqs(struct pt_regs *regs)
502{
503 struct list_head head, *l, *n;
504
505 do {
506 struct irqdesc *desc;
507
508 /*
509 * First, take the pending interrupts off the list.
510 * The act of calling the handlers may add some IRQs
511 * back onto the list.
512 */
513 head = irq_pending;
514 INIT_LIST_HEAD(&irq_pending);
515 head.next->prev = &head;
516 head.prev->next = &head;
517
518 /*
519 * Now run each entry. We must delete it from our
520 * list before calling the handler.
521 */
522 list_for_each_safe(l, n, &head) {
523 desc = list_entry(l, struct irqdesc, pend);
524 list_del_init(&desc->pend);
525 desc_handle_irq(desc - irq_desc, desc, regs);
526 }
527
528 /*
529 * The list must be empty.
530 */
531 BUG_ON(!list_empty(&head));
532 } while (!list_empty(&irq_pending));
533}
534 105
535/* 106/*
536 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not 107 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
@@ -549,96 +120,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
549 desc = &bad_irq_desc; 120 desc = &bad_irq_desc;
550 121
551 irq_enter(); 122 irq_enter();
552 spin_lock(&irq_controller_lock);
553 desc_handle_irq(irq, desc, regs);
554 123
555 /* 124 desc_handle_irq(irq, desc, regs);
556 * Now re-run any pending interrupts.
557 */
558 if (!list_empty(&irq_pending))
559 do_pending_irqs(regs);
560 125
126 /* AT91 specific workaround */
561 irq_finish(irq); 127 irq_finish(irq);
562 128
563 spin_unlock(&irq_controller_lock);
564 irq_exit(); 129 irq_exit();
565} 130}
566 131
567void __set_irq_handler(unsigned int irq, irq_handler_t handle, int is_chained)
568{
569 struct irqdesc *desc;
570 unsigned long flags;
571
572 if (irq >= NR_IRQS) {
573 printk(KERN_ERR "Trying to install handler for IRQ%d\n", irq);
574 return;
575 }
576
577 if (handle == NULL)
578 handle = do_bad_IRQ;
579
580 desc = irq_desc + irq;
581
582 if (is_chained && desc->chip == &bad_chip)
583 printk(KERN_WARNING "Trying to install chained handler for IRQ%d\n", irq);
584
585 spin_lock_irqsave(&irq_controller_lock, flags);
586 if (handle == do_bad_IRQ) {
587 desc->chip->mask(irq);
588 desc->chip->ack(irq);
589 desc->disable_depth = 1;
590 }
591 desc->handle = handle;
592 if (handle != do_bad_IRQ && is_chained) {
593 desc->valid = 0;
594 desc->probe_ok = 0;
595 desc->disable_depth = 0;
596 desc->chip->unmask(irq);
597 }
598 spin_unlock_irqrestore(&irq_controller_lock, flags);
599}
600
601void set_irq_chip(unsigned int irq, struct irqchip *chip)
602{
603 struct irqdesc *desc;
604 unsigned long flags;
605
606 if (irq >= NR_IRQS) {
607 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
608 return;
609 }
610
611 if (chip == NULL)
612 chip = &bad_chip;
613
614 desc = irq_desc + irq;
615 spin_lock_irqsave(&irq_controller_lock, flags);
616 desc->chip = chip;
617 spin_unlock_irqrestore(&irq_controller_lock, flags);
618}
619
620int set_irq_type(unsigned int irq, unsigned int type)
621{
622 struct irqdesc *desc;
623 unsigned long flags;
624 int ret = -ENXIO;
625
626 if (irq >= NR_IRQS) {
627 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
628 return -ENODEV;
629 }
630
631 desc = irq_desc + irq;
632 if (desc->chip->set_type) {
633 spin_lock_irqsave(&irq_controller_lock, flags);
634 ret = desc->chip->set_type(irq, type);
635 spin_unlock_irqrestore(&irq_controller_lock, flags);
636 }
637
638 return ret;
639}
640EXPORT_SYMBOL(set_irq_type);
641
642void set_irq_flags(unsigned int irq, unsigned int iflags) 132void set_irq_flags(unsigned int irq, unsigned int iflags)
643{ 133{
644 struct irqdesc *desc; 134 struct irqdesc *desc;
@@ -650,421 +140,32 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
650 } 140 }
651 141
652 desc = irq_desc + irq; 142 desc = irq_desc + irq;
653 spin_lock_irqsave(&irq_controller_lock, flags); 143 spin_lock_irqsave(&desc->lock, flags);
654 desc->valid = (iflags & IRQF_VALID) != 0; 144 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
655 desc->probe_ok = (iflags & IRQF_PROBE) != 0; 145 if (iflags & IRQF_VALID)
656 desc->noautoenable = (iflags & IRQF_NOAUTOEN) != 0; 146 desc->status &= ~IRQ_NOREQUEST;
657 spin_unlock_irqrestore(&irq_controller_lock, flags); 147 if (iflags & IRQF_PROBE)
658} 148 desc->status &= ~IRQ_NOPROBE;
659 149 if (!(iflags & IRQF_NOAUTOEN))
660int setup_irq(unsigned int irq, struct irqaction *new) 150 desc->status &= ~IRQ_NOAUTOEN;
661{ 151 spin_unlock_irqrestore(&desc->lock, flags);
662 int shared = 0;
663 struct irqaction *old, **p;
664 unsigned long flags;
665 struct irqdesc *desc;
666
667 /*
668 * Some drivers like serial.c use request_irq() heavily,
669 * so we have to be careful not to interfere with a
670 * running system.
671 */
672 if (new->flags & SA_SAMPLE_RANDOM) {
673 /*
674 * This function might sleep, we want to call it first,
675 * outside of the atomic block.
676 * Yes, this might clear the entropy pool if the wrong
677 * driver is attempted to be loaded, without actually
678 * installing a new handler, but is this really a problem,
679 * only the sysadmin is able to do this.
680 */
681 rand_initialize_irq(irq);
682 }
683
684 /*
685 * The following block of code has to be executed atomically
686 */
687 desc = irq_desc + irq;
688 spin_lock_irqsave(&irq_controller_lock, flags);
689 p = &desc->action;
690 if ((old = *p) != NULL) {
691 /*
692 * Can't share interrupts unless both agree to and are
693 * the same type.
694 */
695 if (!(old->flags & new->flags & SA_SHIRQ) ||
696 (~old->flags & new->flags) & SA_TRIGGER_MASK) {
697 spin_unlock_irqrestore(&irq_controller_lock, flags);
698 return -EBUSY;
699 }
700
701 /* add new interrupt at end of irq queue */
702 do {
703 p = &old->next;
704 old = *p;
705 } while (old);
706 shared = 1;
707 }
708
709 *p = new;
710
711 if (!shared) {
712 desc->probing = 0;
713 desc->running = 0;
714 desc->pending = 0;
715 desc->disable_depth = 1;
716
717 if (new->flags & SA_TRIGGER_MASK &&
718 desc->chip->set_type) {
719 unsigned int type = new->flags & SA_TRIGGER_MASK;
720 desc->chip->set_type(irq, type);
721 }
722
723 if (!desc->noautoenable) {
724 desc->disable_depth = 0;
725 desc->chip->unmask(irq);
726 }
727 }
728
729 spin_unlock_irqrestore(&irq_controller_lock, flags);
730 return 0;
731}
732
733/**
734 * request_irq - allocate an interrupt line
735 * @irq: Interrupt line to allocate
736 * @handler: Function to be called when the IRQ occurs
737 * @irqflags: Interrupt type flags
738 * @devname: An ascii name for the claiming device
739 * @dev_id: A cookie passed back to the handler function
740 *
741 * This call allocates interrupt resources and enables the
742 * interrupt line and IRQ handling. From the point this
743 * call is made your handler function may be invoked. Since
744 * your handler function must clear any interrupt the board
745 * raises, you must take care both to initialise your hardware
746 * and to set up the interrupt handler in the right order.
747 *
748 * Dev_id must be globally unique. Normally the address of the
749 * device data structure is used as the cookie. Since the handler
750 * receives this value it makes sense to use it.
751 *
752 * If your interrupt is shared you must pass a non NULL dev_id
753 * as this is required when freeing the interrupt.
754 *
755 * Flags:
756 *
757 * SA_SHIRQ Interrupt is shared
758 *
759 * SA_INTERRUPT Disable local interrupts while processing
760 *
761 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
762 *
763 */
764int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
765 unsigned long irq_flags, const char * devname, void *dev_id)
766{
767 unsigned long retval;
768 struct irqaction *action;
769
770 if (irq >= NR_IRQS || !irq_desc[irq].valid || !handler ||
771 (irq_flags & SA_SHIRQ && !dev_id))
772 return -EINVAL;
773
774 action = (struct irqaction *)kmalloc(sizeof(struct irqaction), GFP_KERNEL);
775 if (!action)
776 return -ENOMEM;
777
778 action->handler = handler;
779 action->flags = irq_flags;
780 cpus_clear(action->mask);
781 action->name = devname;
782 action->next = NULL;
783 action->dev_id = dev_id;
784
785 retval = setup_irq(irq, action);
786
787 if (retval)
788 kfree(action);
789 return retval;
790}
791
792EXPORT_SYMBOL(request_irq);
793
794/**
795 * free_irq - free an interrupt
796 * @irq: Interrupt line to free
797 * @dev_id: Device identity to free
798 *
799 * Remove an interrupt handler. The handler is removed and if the
800 * interrupt line is no longer in use by any driver it is disabled.
801 * On a shared IRQ the caller must ensure the interrupt is disabled
802 * on the card it drives before calling this function.
803 *
804 * This function must not be called from interrupt context.
805 */
806void free_irq(unsigned int irq, void *dev_id)
807{
808 struct irqaction * action, **p;
809 unsigned long flags;
810
811 if (irq >= NR_IRQS || !irq_desc[irq].valid) {
812 printk(KERN_ERR "Trying to free IRQ%d\n",irq);
813 dump_stack();
814 return;
815 }
816
817 spin_lock_irqsave(&irq_controller_lock, flags);
818 for (p = &irq_desc[irq].action; (action = *p) != NULL; p = &action->next) {
819 if (action->dev_id != dev_id)
820 continue;
821
822 /* Found it - now free it */
823 *p = action->next;
824 break;
825 }
826 spin_unlock_irqrestore(&irq_controller_lock, flags);
827
828 if (!action) {
829 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
830 dump_stack();
831 } else {
832 synchronize_irq(irq);
833 kfree(action);
834 }
835}
836
837EXPORT_SYMBOL(free_irq);
838
839static DECLARE_MUTEX(probe_sem);
840
841/* Start the interrupt probing. Unlike other architectures,
842 * we don't return a mask of interrupts from probe_irq_on,
843 * but return the number of interrupts enabled for the probe.
844 * The interrupts which have been enabled for probing is
845 * instead recorded in the irq_desc structure.
846 */
847unsigned long probe_irq_on(void)
848{
849 unsigned int i, irqs = 0;
850 unsigned long delay;
851
852 down(&probe_sem);
853
854 /*
855 * first snaffle up any unassigned but
856 * probe-able interrupts
857 */
858 spin_lock_irq(&irq_controller_lock);
859 for (i = 0; i < NR_IRQS; i++) {
860 if (!irq_desc[i].probe_ok || irq_desc[i].action)
861 continue;
862
863 irq_desc[i].probing = 1;
864 irq_desc[i].triggered = 0;
865 if (irq_desc[i].chip->set_type)
866 irq_desc[i].chip->set_type(i, IRQT_PROBE);
867 irq_desc[i].chip->unmask(i);
868 irqs += 1;
869 }
870 spin_unlock_irq(&irq_controller_lock);
871
872 /*
873 * wait for spurious interrupts to mask themselves out again
874 */
875 for (delay = jiffies + HZ/10; time_before(jiffies, delay); )
876 /* min 100ms delay */;
877
878 /*
879 * now filter out any obviously spurious interrupts
880 */
881 spin_lock_irq(&irq_controller_lock);
882 for (i = 0; i < NR_IRQS; i++) {
883 if (irq_desc[i].probing && irq_desc[i].triggered) {
884 irq_desc[i].probing = 0;
885 irqs -= 1;
886 }
887 }
888 spin_unlock_irq(&irq_controller_lock);
889
890 return irqs;
891}
892
893EXPORT_SYMBOL(probe_irq_on);
894
895unsigned int probe_irq_mask(unsigned long irqs)
896{
897 unsigned int mask = 0, i;
898
899 spin_lock_irq(&irq_controller_lock);
900 for (i = 0; i < 16 && i < NR_IRQS; i++)
901 if (irq_desc[i].probing && irq_desc[i].triggered)
902 mask |= 1 << i;
903 spin_unlock_irq(&irq_controller_lock);
904
905 up(&probe_sem);
906
907 return mask;
908}
909EXPORT_SYMBOL(probe_irq_mask);
910
911/*
912 * Possible return values:
913 * >= 0 - interrupt number
914 * -1 - no interrupt/many interrupts
915 */
916int probe_irq_off(unsigned long irqs)
917{
918 unsigned int i;
919 int irq_found = NO_IRQ;
920
921 /*
922 * look at the interrupts, and find exactly one
923 * that we were probing has been triggered
924 */
925 spin_lock_irq(&irq_controller_lock);
926 for (i = 0; i < NR_IRQS; i++) {
927 if (irq_desc[i].probing &&
928 irq_desc[i].triggered) {
929 if (irq_found != NO_IRQ) {
930 irq_found = NO_IRQ;
931 goto out;
932 }
933 irq_found = i;
934 }
935 }
936
937 if (irq_found == -1)
938 irq_found = NO_IRQ;
939out:
940 spin_unlock_irq(&irq_controller_lock);
941
942 up(&probe_sem);
943
944 return irq_found;
945}
946
947EXPORT_SYMBOL(probe_irq_off);
948
949#ifdef CONFIG_SMP
950static void route_irq(struct irqdesc *desc, unsigned int irq, unsigned int cpu)
951{
952 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu);
953
954 spin_lock_irq(&irq_controller_lock);
955 desc->cpu = cpu;
956 desc->chip->set_cpu(desc, irq, cpu);
957 spin_unlock_irq(&irq_controller_lock);
958}
959
960#ifdef CONFIG_PROC_FS
961static int
962irq_affinity_read_proc(char *page, char **start, off_t off, int count,
963 int *eof, void *data)
964{
965 struct irqdesc *desc = irq_desc + ((int)data);
966 int len = cpumask_scnprintf(page, count, desc->affinity);
967
968 if (count - len < 2)
969 return -EINVAL;
970 page[len++] = '\n';
971 page[len] = '\0';
972
973 return len;
974}
975
976static int
977irq_affinity_write_proc(struct file *file, const char __user *buffer,
978 unsigned long count, void *data)
979{
980 unsigned int irq = (unsigned int)data;
981 struct irqdesc *desc = irq_desc + irq;
982 cpumask_t affinity, tmp;
983 int ret = -EIO;
984
985 if (!desc->chip->set_cpu)
986 goto out;
987
988 ret = cpumask_parse(buffer, count, affinity);
989 if (ret)
990 goto out;
991
992 cpus_and(tmp, affinity, cpu_online_map);
993 if (cpus_empty(tmp)) {
994 ret = -EINVAL;
995 goto out;
996 }
997
998 desc->affinity = affinity;
999 route_irq(desc, irq, first_cpu(tmp));
1000 ret = count;
1001
1002 out:
1003 return ret;
1004}
1005#endif
1006#endif
1007
1008void __init init_irq_proc(void)
1009{
1010#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
1011 struct proc_dir_entry *dir;
1012 int irq;
1013
1014 dir = proc_mkdir("irq", NULL);
1015 if (!dir)
1016 return;
1017
1018 for (irq = 0; irq < NR_IRQS; irq++) {
1019 struct proc_dir_entry *entry;
1020 struct irqdesc *desc;
1021 char name[16];
1022
1023 desc = irq_desc + irq;
1024 memset(name, 0, sizeof(name));
1025 snprintf(name, sizeof(name) - 1, "%u", irq);
1026
1027 desc->procdir = proc_mkdir(name, dir);
1028 if (!desc->procdir)
1029 continue;
1030
1031 entry = create_proc_entry("smp_affinity", 0600, desc->procdir);
1032 if (entry) {
1033 entry->nlink = 1;
1034 entry->data = (void *)irq;
1035 entry->read_proc = irq_affinity_read_proc;
1036 entry->write_proc = irq_affinity_write_proc;
1037 }
1038 }
1039#endif
1040} 152}
1041 153
1042void __init init_IRQ(void) 154void __init init_IRQ(void)
1043{ 155{
1044 struct irqdesc *desc;
1045 int irq; 156 int irq;
1046 157
158 for (irq = 0; irq < NR_IRQS; irq++)
159 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_DELAYED_DISABLE |
160 IRQ_NOPROBE;
161
1047#ifdef CONFIG_SMP 162#ifdef CONFIG_SMP
1048 bad_irq_desc.affinity = CPU_MASK_ALL; 163 bad_irq_desc.affinity = CPU_MASK_ALL;
1049 bad_irq_desc.cpu = smp_processor_id(); 164 bad_irq_desc.cpu = smp_processor_id();
1050#endif 165#endif
1051
1052 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
1053 *desc = bad_irq_desc;
1054 INIT_LIST_HEAD(&desc->pend);
1055 }
1056
1057 init_arch_irq(); 166 init_arch_irq();
1058} 167}
1059 168
1060static int __init noirqdebug_setup(char *str)
1061{
1062 noirqdebug = 1;
1063 return 1;
1064}
1065
1066__setup("noirqdebug", noirqdebug_setup);
1067
1068#ifdef CONFIG_HOTPLUG_CPU 169#ifdef CONFIG_HOTPLUG_CPU
1069/* 170/*
1070 * The CPU has been marked offline. Migrate IRQs off this CPU. If 171 * The CPU has been marked offline. Migrate IRQs off this CPU. If