aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /kernel/irq
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/Makefile5
-rw-r--r--kernel/irq/autoprobe.c189
-rw-r--r--kernel/irq/handle.c193
-rw-r--r--kernel/irq/internals.h18
-rw-r--r--kernel/irq/manage.c349
-rw-r--r--kernel/irq/proc.c159
-rw-r--r--kernel/irq/spurious.c96
7 files changed, 1009 insertions, 0 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
new file mode 100644
index 000000000000..49378738ff5e
--- /dev/null
+++ b/kernel/irq/Makefile
@@ -0,0 +1,5 @@
1
2obj-y := handle.o manage.o spurious.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o
5
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
new file mode 100644
index 000000000000..98d62d8efeaf
--- /dev/null
+++ b/kernel/irq/autoprobe.c
@@ -0,0 +1,189 @@
1/*
2 * linux/kernel/irq/autoprobe.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the interrupt probing code and driver APIs.
7 */
8
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/interrupt.h>
12
13/*
14 * Autodetection depends on the fact that any interrupt that
15 * comes in on to an unassigned handler will get stuck with
16 * "IRQ_WAITING" cleared and the interrupt disabled.
17 */
18static DECLARE_MUTEX(probe_sem);
19
20/**
21 * probe_irq_on - begin an interrupt autodetect
22 *
23 * Commence probing for an interrupt. The interrupts are scanned
24 * and a mask of potential interrupt lines is returned.
25 *
26 */
27unsigned long probe_irq_on(void)
28{
29 unsigned long val, delay;
30 irq_desc_t *desc;
31 unsigned int i;
32
33 down(&probe_sem);
34 /*
35 * something may have generated an irq long ago and we want to
36 * flush such a longstanding irq before considering it as spurious.
37 */
38 for (i = NR_IRQS-1; i > 0; i--) {
39 desc = irq_desc + i;
40
41 spin_lock_irq(&desc->lock);
42 if (!irq_desc[i].action)
43 irq_desc[i].handler->startup(i);
44 spin_unlock_irq(&desc->lock);
45 }
46
47 /* Wait for longstanding interrupts to trigger. */
48 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
49 /* about 20ms delay */ barrier();
50
51 /*
52 * enable any unassigned irqs
53 * (we must startup again here because if a longstanding irq
54 * happened in the previous stage, it may have masked itself)
55 */
56 for (i = NR_IRQS-1; i > 0; i--) {
57 desc = irq_desc + i;
58
59 spin_lock_irq(&desc->lock);
60 if (!desc->action) {
61 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
62 if (desc->handler->startup(i))
63 desc->status |= IRQ_PENDING;
64 }
65 spin_unlock_irq(&desc->lock);
66 }
67
68 /*
69 * Wait for spurious interrupts to trigger
70 */
71 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
72 /* about 100ms delay */ barrier();
73
74 /*
75 * Now filter out any obviously spurious interrupts
76 */
77 val = 0;
78 for (i = 0; i < NR_IRQS; i++) {
79 irq_desc_t *desc = irq_desc + i;
80 unsigned int status;
81
82 spin_lock_irq(&desc->lock);
83 status = desc->status;
84
85 if (status & IRQ_AUTODETECT) {
86 /* It triggered already - consider it spurious. */
87 if (!(status & IRQ_WAITING)) {
88 desc->status = status & ~IRQ_AUTODETECT;
89 desc->handler->shutdown(i);
90 } else
91 if (i < 32)
92 val |= 1 << i;
93 }
94 spin_unlock_irq(&desc->lock);
95 }
96
97 return val;
98}
99
100EXPORT_SYMBOL(probe_irq_on);
101
102/**
103 * probe_irq_mask - scan a bitmap of interrupt lines
104 * @val: mask of interrupts to consider
105 *
106 * Scan the interrupt lines and return a bitmap of active
107 * autodetect interrupts. The interrupt probe logic state
108 * is then returned to its previous value.
109 *
110 * Note: we need to scan all the irq's even though we will
111 * only return autodetect irq numbers - just so that we reset
112 * them all to a known state.
113 */
114unsigned int probe_irq_mask(unsigned long val)
115{
116 unsigned int mask;
117 int i;
118
119 mask = 0;
120 for (i = 0; i < NR_IRQS; i++) {
121 irq_desc_t *desc = irq_desc + i;
122 unsigned int status;
123
124 spin_lock_irq(&desc->lock);
125 status = desc->status;
126
127 if (status & IRQ_AUTODETECT) {
128 if (i < 16 && !(status & IRQ_WAITING))
129 mask |= 1 << i;
130
131 desc->status = status & ~IRQ_AUTODETECT;
132 desc->handler->shutdown(i);
133 }
134 spin_unlock_irq(&desc->lock);
135 }
136 up(&probe_sem);
137
138 return mask & val;
139}
140EXPORT_SYMBOL(probe_irq_mask);
141
142/**
143 * probe_irq_off - end an interrupt autodetect
144 * @val: mask of potential interrupts (unused)
145 *
146 * Scans the unused interrupt lines and returns the line which
147 * appears to have triggered the interrupt. If no interrupt was
148 * found then zero is returned. If more than one interrupt is
149 * found then minus the first candidate is returned to indicate
150 * their is doubt.
151 *
152 * The interrupt probe logic state is returned to its previous
153 * value.
154 *
155 * BUGS: When used in a module (which arguably shouldn't happen)
156 * nothing prevents two IRQ probe callers from overlapping. The
157 * results of this are non-optimal.
158 */
159int probe_irq_off(unsigned long val)
160{
161 int i, irq_found = 0, nr_irqs = 0;
162
163 for (i = 0; i < NR_IRQS; i++) {
164 irq_desc_t *desc = irq_desc + i;
165 unsigned int status;
166
167 spin_lock_irq(&desc->lock);
168 status = desc->status;
169
170 if (status & IRQ_AUTODETECT) {
171 if (!(status & IRQ_WAITING)) {
172 if (!nr_irqs)
173 irq_found = i;
174 nr_irqs++;
175 }
176 desc->status = status & ~IRQ_AUTODETECT;
177 desc->handler->shutdown(i);
178 }
179 spin_unlock_irq(&desc->lock);
180 }
181 up(&probe_sem);
182
183 if (nr_irqs > 1)
184 irq_found = -irq_found;
185 return irq_found;
186}
187
188EXPORT_SYMBOL(probe_irq_off);
189
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
new file mode 100644
index 000000000000..2fb0e46e11f3
--- /dev/null
+++ b/kernel/irq/handle.c
@@ -0,0 +1,193 @@
1/*
2 * linux/kernel/irq/handle.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the core interrupt handling code.
7 */
8
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/random.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14
15#include "internals.h"
16
17/*
18 * Linux has a controller-independent interrupt architecture.
19 * Every controller has a 'controller-template', that is used
20 * by the main code to do the right thing. Each driver-visible
21 * interrupt source is transparently wired to the apropriate
22 * controller. Thus drivers need not be aware of the
23 * interrupt-controller.
24 *
25 * The code is designed to be easily extended with new/different
26 * interrupt controllers, without having to do assembly magic or
27 * having to touch the generic code.
28 *
29 * Controller mappings for all interrupt sources:
30 */
31irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
32 [0 ... NR_IRQS-1] = {
33 .handler = &no_irq_type,
34 .lock = SPIN_LOCK_UNLOCKED
35 }
36};
37
38/*
39 * Generic 'no controller' code
40 */
41static void end_none(unsigned int irq) { }
42static void enable_none(unsigned int irq) { }
43static void disable_none(unsigned int irq) { }
44static void shutdown_none(unsigned int irq) { }
45static unsigned int startup_none(unsigned int irq) { return 0; }
46
47static void ack_none(unsigned int irq)
48{
49 /*
50 * 'what should we do if we get a hw irq event on an illegal vector'.
51 * each architecture has to answer this themself.
52 */
53 ack_bad_irq(irq);
54}
55
56struct hw_interrupt_type no_irq_type = {
57 .typename = "none",
58 .startup = startup_none,
59 .shutdown = shutdown_none,
60 .enable = enable_none,
61 .disable = disable_none,
62 .ack = ack_none,
63 .end = end_none,
64 .set_affinity = NULL
65};
66
67/*
68 * Special, empty irq handler:
69 */
70irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
71{
72 return IRQ_NONE;
73}
74
75/*
76 * Have got an event to handle:
77 */
78fastcall int handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
79 struct irqaction *action)
80{
81 int ret, retval = 0, status = 0;
82
83 if (!(action->flags & SA_INTERRUPT))
84 local_irq_enable();
85
86 do {
87 ret = action->handler(irq, action->dev_id, regs);
88 if (ret == IRQ_HANDLED)
89 status |= action->flags;
90 retval |= ret;
91 action = action->next;
92 } while (action);
93
94 if (status & SA_SAMPLE_RANDOM)
95 add_interrupt_randomness(irq);
96 local_irq_disable();
97
98 return retval;
99}
100
101/*
102 * do_IRQ handles all normal device IRQ's (the special
103 * SMP cross-CPU interrupts have their own specific
104 * handlers).
105 */
106fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
107{
108 irq_desc_t *desc = irq_desc + irq;
109 struct irqaction * action;
110 unsigned int status;
111
112 kstat_this_cpu.irqs[irq]++;
113 if (desc->status & IRQ_PER_CPU) {
114 irqreturn_t action_ret;
115
116 /*
117 * No locking required for CPU-local interrupts:
118 */
119 desc->handler->ack(irq);
120 action_ret = handle_IRQ_event(irq, regs, desc->action);
121 if (!noirqdebug)
122 note_interrupt(irq, desc, action_ret);
123 desc->handler->end(irq);
124 return 1;
125 }
126
127 spin_lock(&desc->lock);
128 desc->handler->ack(irq);
129 /*
130 * REPLAY is when Linux resends an IRQ that was dropped earlier
131 * WAITING is used by probe to mark irqs that are being tested
132 */
133 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
134 status |= IRQ_PENDING; /* we _want_ to handle it */
135
136 /*
137 * If the IRQ is disabled for whatever reason, we cannot
138 * use the action we have.
139 */
140 action = NULL;
141 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
142 action = desc->action;
143 status &= ~IRQ_PENDING; /* we commit to handling */
144 status |= IRQ_INPROGRESS; /* we are handling it */
145 }
146 desc->status = status;
147
148 /*
149 * If there is no IRQ handler or it was disabled, exit early.
150 * Since we set PENDING, if another processor is handling
151 * a different instance of this same irq, the other processor
152 * will take care of it.
153 */
154 if (unlikely(!action))
155 goto out;
156
157 /*
158 * Edge triggered interrupts need to remember
159 * pending events.
160 * This applies to any hw interrupts that allow a second
161 * instance of the same irq to arrive while we are in do_IRQ
162 * or in the handler. But the code here only handles the _second_
163 * instance of the irq, not the third or fourth. So it is mostly
164 * useful for irq hardware that does not mask cleanly in an
165 * SMP environment.
166 */
167 for (;;) {
168 irqreturn_t action_ret;
169
170 spin_unlock(&desc->lock);
171
172 action_ret = handle_IRQ_event(irq, regs, action);
173
174 spin_lock(&desc->lock);
175 if (!noirqdebug)
176 note_interrupt(irq, desc, action_ret);
177 if (likely(!(desc->status & IRQ_PENDING)))
178 break;
179 desc->status &= ~IRQ_PENDING;
180 }
181 desc->status &= ~IRQ_INPROGRESS;
182
183out:
184 /*
185 * The ->end() handler has to deal with interrupts which got
186 * disabled while the handler was running.
187 */
188 desc->handler->end(irq);
189 spin_unlock(&desc->lock);
190
191 return 1;
192}
193
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
new file mode 100644
index 000000000000..46feba630266
--- /dev/null
+++ b/kernel/irq/internals.h
@@ -0,0 +1,18 @@
1/*
2 * IRQ subsystem internal functions and variables:
3 */
4
5extern int noirqdebug;
6
7#ifdef CONFIG_PROC_FS
8extern void register_irq_proc(unsigned int irq);
9extern void register_handler_proc(unsigned int irq, struct irqaction *action);
10extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
11#else
12static inline void register_irq_proc(unsigned int irq) { }
13static inline void register_handler_proc(unsigned int irq,
14 struct irqaction *action) { }
15static inline void unregister_handler_proc(unsigned int irq,
16 struct irqaction *action) { }
17#endif
18
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
new file mode 100644
index 000000000000..5202e4c4a5b6
--- /dev/null
+++ b/kernel/irq/manage.c
@@ -0,0 +1,349 @@
1/*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains driver APIs to the irq subsystem.
7 */
8
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/random.h>
12#include <linux/interrupt.h>
13
14#include "internals.h"
15
16#ifdef CONFIG_SMP
17
18cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
19
20/**
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 *
23 * This function waits for any pending IRQ handlers for this interrupt
24 * to complete before returning. If you use this function while
25 * holding a resource the IRQ handler may need you will deadlock.
26 *
27 * This function may be called - with care - from IRQ context.
28 */
29void synchronize_irq(unsigned int irq)
30{
31 struct irq_desc *desc = irq_desc + irq;
32
33 while (desc->status & IRQ_INPROGRESS)
34 cpu_relax();
35}
36
37EXPORT_SYMBOL(synchronize_irq);
38
39#endif
40
41/**
42 * disable_irq_nosync - disable an irq without waiting
43 * @irq: Interrupt to disable
44 *
45 * Disable the selected interrupt line. Disables and Enables are
46 * nested.
47 * Unlike disable_irq(), this function does not ensure existing
48 * instances of the IRQ handler have completed before returning.
49 *
50 * This function may be called from IRQ context.
51 */
52void disable_irq_nosync(unsigned int irq)
53{
54 irq_desc_t *desc = irq_desc + irq;
55 unsigned long flags;
56
57 spin_lock_irqsave(&desc->lock, flags);
58 if (!desc->depth++) {
59 desc->status |= IRQ_DISABLED;
60 desc->handler->disable(irq);
61 }
62 spin_unlock_irqrestore(&desc->lock, flags);
63}
64
65EXPORT_SYMBOL(disable_irq_nosync);
66
67/**
68 * disable_irq - disable an irq and wait for completion
69 * @irq: Interrupt to disable
70 *
71 * Disable the selected interrupt line. Enables and Disables are
72 * nested.
73 * This function waits for any pending IRQ handlers for this interrupt
74 * to complete before returning. If you use this function while
75 * holding a resource the IRQ handler may need you will deadlock.
76 *
77 * This function may be called - with care - from IRQ context.
78 */
79void disable_irq(unsigned int irq)
80{
81 irq_desc_t *desc = irq_desc + irq;
82
83 disable_irq_nosync(irq);
84 if (desc->action)
85 synchronize_irq(irq);
86}
87
88EXPORT_SYMBOL(disable_irq);
89
90/**
91 * enable_irq - enable handling of an irq
92 * @irq: Interrupt to enable
93 *
94 * Undoes the effect of one call to disable_irq(). If this
95 * matches the last disable, processing of interrupts on this
96 * IRQ line is re-enabled.
97 *
98 * This function may be called from IRQ context.
99 */
100void enable_irq(unsigned int irq)
101{
102 irq_desc_t *desc = irq_desc + irq;
103 unsigned long flags;
104
105 spin_lock_irqsave(&desc->lock, flags);
106 switch (desc->depth) {
107 case 0:
108 WARN_ON(1);
109 break;
110 case 1: {
111 unsigned int status = desc->status & ~IRQ_DISABLED;
112
113 desc->status = status;
114 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
115 desc->status = status | IRQ_REPLAY;
116 hw_resend_irq(desc->handler,irq);
117 }
118 desc->handler->enable(irq);
119 /* fall-through */
120 }
121 default:
122 desc->depth--;
123 }
124 spin_unlock_irqrestore(&desc->lock, flags);
125}
126
127EXPORT_SYMBOL(enable_irq);
128
129/*
130 * Internal function that tells the architecture code whether a
131 * particular irq has been exclusively allocated or is available
132 * for driver use.
133 */
134int can_request_irq(unsigned int irq, unsigned long irqflags)
135{
136 struct irqaction *action;
137
138 if (irq >= NR_IRQS)
139 return 0;
140
141 action = irq_desc[irq].action;
142 if (action)
143 if (irqflags & action->flags & SA_SHIRQ)
144 action = NULL;
145
146 return !action;
147}
148
149/*
150 * Internal function to register an irqaction - typically used to
151 * allocate special interrupts that are part of the architecture.
152 */
153int setup_irq(unsigned int irq, struct irqaction * new)
154{
155 struct irq_desc *desc = irq_desc + irq;
156 struct irqaction *old, **p;
157 unsigned long flags;
158 int shared = 0;
159
160 if (desc->handler == &no_irq_type)
161 return -ENOSYS;
162 /*
163 * Some drivers like serial.c use request_irq() heavily,
164 * so we have to be careful not to interfere with a
165 * running system.
166 */
167 if (new->flags & SA_SAMPLE_RANDOM) {
168 /*
169 * This function might sleep, we want to call it first,
170 * outside of the atomic block.
171 * Yes, this might clear the entropy pool if the wrong
172 * driver is attempted to be loaded, without actually
173 * installing a new handler, but is this really a problem,
174 * only the sysadmin is able to do this.
175 */
176 rand_initialize_irq(irq);
177 }
178
179 /*
180 * The following block of code has to be executed atomically
181 */
182 spin_lock_irqsave(&desc->lock,flags);
183 p = &desc->action;
184 if ((old = *p) != NULL) {
185 /* Can't share interrupts unless both agree to */
186 if (!(old->flags & new->flags & SA_SHIRQ)) {
187 spin_unlock_irqrestore(&desc->lock,flags);
188 return -EBUSY;
189 }
190
191 /* add new interrupt at end of irq queue */
192 do {
193 p = &old->next;
194 old = *p;
195 } while (old);
196 shared = 1;
197 }
198
199 *p = new;
200
201 if (!shared) {
202 desc->depth = 0;
203 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT |
204 IRQ_WAITING | IRQ_INPROGRESS);
205 if (desc->handler->startup)
206 desc->handler->startup(irq);
207 else
208 desc->handler->enable(irq);
209 }
210 spin_unlock_irqrestore(&desc->lock,flags);
211
212 new->irq = irq;
213 register_irq_proc(irq);
214 new->dir = NULL;
215 register_handler_proc(irq, new);
216
217 return 0;
218}
219
220/**
221 * free_irq - free an interrupt
222 * @irq: Interrupt line to free
223 * @dev_id: Device identity to free
224 *
225 * Remove an interrupt handler. The handler is removed and if the
226 * interrupt line is no longer in use by any driver it is disabled.
227 * On a shared IRQ the caller must ensure the interrupt is disabled
228 * on the card it drives before calling this function. The function
229 * does not return until any executing interrupts for this IRQ
230 * have completed.
231 *
232 * This function must not be called from interrupt context.
233 */
234void free_irq(unsigned int irq, void *dev_id)
235{
236 struct irq_desc *desc;
237 struct irqaction **p;
238 unsigned long flags;
239
240 if (irq >= NR_IRQS)
241 return;
242
243 desc = irq_desc + irq;
244 spin_lock_irqsave(&desc->lock,flags);
245 p = &desc->action;
246 for (;;) {
247 struct irqaction * action = *p;
248
249 if (action) {
250 struct irqaction **pp = p;
251
252 p = &action->next;
253 if (action->dev_id != dev_id)
254 continue;
255
256 /* Found it - now remove it from the list of entries */
257 *pp = action->next;
258 if (!desc->action) {
259 desc->status |= IRQ_DISABLED;
260 if (desc->handler->shutdown)
261 desc->handler->shutdown(irq);
262 else
263 desc->handler->disable(irq);
264 }
265 spin_unlock_irqrestore(&desc->lock,flags);
266 unregister_handler_proc(irq, action);
267
268 /* Make sure it's not being used on another CPU */
269 synchronize_irq(irq);
270 kfree(action);
271 return;
272 }
273 printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
274 spin_unlock_irqrestore(&desc->lock,flags);
275 return;
276 }
277}
278
279EXPORT_SYMBOL(free_irq);
280
281/**
282 * request_irq - allocate an interrupt line
283 * @irq: Interrupt line to allocate
284 * @handler: Function to be called when the IRQ occurs
285 * @irqflags: Interrupt type flags
286 * @devname: An ascii name for the claiming device
287 * @dev_id: A cookie passed back to the handler function
288 *
289 * This call allocates interrupt resources and enables the
290 * interrupt line and IRQ handling. From the point this
291 * call is made your handler function may be invoked. Since
292 * your handler function must clear any interrupt the board
293 * raises, you must take care both to initialise your hardware
294 * and to set up the interrupt handler in the right order.
295 *
296 * Dev_id must be globally unique. Normally the address of the
297 * device data structure is used as the cookie. Since the handler
298 * receives this value it makes sense to use it.
299 *
300 * If your interrupt is shared you must pass a non NULL dev_id
301 * as this is required when freeing the interrupt.
302 *
303 * Flags:
304 *
305 * SA_SHIRQ Interrupt is shared
306 * SA_INTERRUPT Disable local interrupts while processing
307 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
308 *
309 */
310int request_irq(unsigned int irq,
311 irqreturn_t (*handler)(int, void *, struct pt_regs *),
312 unsigned long irqflags, const char * devname, void *dev_id)
313{
314 struct irqaction * action;
315 int retval;
316
317 /*
318 * Sanity-check: shared interrupts must pass in a real dev-ID,
319 * otherwise we'll have trouble later trying to figure out
320 * which interrupt is which (messes up the interrupt freeing
321 * logic etc).
322 */
323 if ((irqflags & SA_SHIRQ) && !dev_id)
324 return -EINVAL;
325 if (irq >= NR_IRQS)
326 return -EINVAL;
327 if (!handler)
328 return -EINVAL;
329
330 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
331 if (!action)
332 return -ENOMEM;
333
334 action->handler = handler;
335 action->flags = irqflags;
336 cpus_clear(action->mask);
337 action->name = devname;
338 action->next = NULL;
339 action->dev_id = dev_id;
340
341 retval = setup_irq(irq, action);
342 if (retval)
343 kfree(action);
344
345 return retval;
346}
347
348EXPORT_SYMBOL(request_irq);
349
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
new file mode 100644
index 000000000000..85d08daa6600
--- /dev/null
+++ b/kernel/irq/proc.c
@@ -0,0 +1,159 @@
1/*
2 * linux/kernel/irq/proc.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the /proc/irq/ handling code.
7 */
8
9#include <linux/irq.h>
10#include <linux/proc_fs.h>
11#include <linux/interrupt.h>
12
13static struct proc_dir_entry *root_irq_dir, *irq_dir[NR_IRQS];
14
15#ifdef CONFIG_SMP
16
17/*
18 * The /proc/irq/<irq>/smp_affinity values:
19 */
20static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
21
22void __attribute__((weak))
23proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
24{
25 irq_affinity[irq] = mask_val;
26 irq_desc[irq].handler->set_affinity(irq, mask_val);
27}
28
29static int irq_affinity_read_proc(char *page, char **start, off_t off,
30 int count, int *eof, void *data)
31{
32 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
33
34 if (count - len < 2)
35 return -EINVAL;
36 len += sprintf(page + len, "\n");
37 return len;
38}
39
40int no_irq_affinity;
41static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
42 unsigned long count, void *data)
43{
44 unsigned int irq = (int)(long)data, full_count = count, err;
45 cpumask_t new_value, tmp;
46
47 if (!irq_desc[irq].handler->set_affinity || no_irq_affinity)
48 return -EIO;
49
50 err = cpumask_parse(buffer, count, new_value);
51 if (err)
52 return err;
53
54 /*
55 * Do not allow disabling IRQs completely - it's a too easy
56 * way to make the system unusable accidentally :-) At least
57 * one online CPU still has to be targeted.
58 */
59 cpus_and(tmp, new_value, cpu_online_map);
60 if (cpus_empty(tmp))
61 return -EINVAL;
62
63 proc_set_irq_affinity(irq, new_value);
64
65 return full_count;
66}
67
68#endif
69
70#define MAX_NAMELEN 128
71
72static int name_unique(unsigned int irq, struct irqaction *new_action)
73{
74 struct irq_desc *desc = irq_desc + irq;
75 struct irqaction *action;
76
77 for (action = desc->action ; action; action = action->next)
78 if ((action != new_action) && action->name &&
79 !strcmp(new_action->name, action->name))
80 return 0;
81 return 1;
82}
83
84void register_handler_proc(unsigned int irq, struct irqaction *action)
85{
86 char name [MAX_NAMELEN];
87
88 if (!irq_dir[irq] || action->dir || !action->name ||
89 !name_unique(irq, action))
90 return;
91
92 memset(name, 0, MAX_NAMELEN);
93 snprintf(name, MAX_NAMELEN, "%s", action->name);
94
95 /* create /proc/irq/1234/handler/ */
96 action->dir = proc_mkdir(name, irq_dir[irq]);
97}
98
99#undef MAX_NAMELEN
100
101#define MAX_NAMELEN 10
102
103void register_irq_proc(unsigned int irq)
104{
105 char name [MAX_NAMELEN];
106
107 if (!root_irq_dir ||
108 (irq_desc[irq].handler == &no_irq_type) ||
109 irq_dir[irq])
110 return;
111
112 memset(name, 0, MAX_NAMELEN);
113 sprintf(name, "%d", irq);
114
115 /* create /proc/irq/1234 */
116 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
117
118#ifdef CONFIG_SMP
119 {
120 struct proc_dir_entry *entry;
121
122 /* create /proc/irq/<irq>/smp_affinity */
123 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
124
125 if (entry) {
126 entry->nlink = 1;
127 entry->data = (void *)(long)irq;
128 entry->read_proc = irq_affinity_read_proc;
129 entry->write_proc = irq_affinity_write_proc;
130 }
131 smp_affinity_entry[irq] = entry;
132 }
133#endif
134}
135
136#undef MAX_NAMELEN
137
138void unregister_handler_proc(unsigned int irq, struct irqaction *action)
139{
140 if (action->dir)
141 remove_proc_entry(action->dir->name, irq_dir[irq]);
142}
143
144void init_irq_proc(void)
145{
146 int i;
147
148 /* create /proc/irq */
149 root_irq_dir = proc_mkdir("irq", NULL);
150 if (!root_irq_dir)
151 return;
152
153 /*
154 * Create entries for all existing IRQs.
155 */
156 for (i = 0; i < NR_IRQS; i++)
157 register_irq_proc(i);
158}
159
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
new file mode 100644
index 000000000000..f6297c306905
--- /dev/null
+++ b/kernel/irq/spurious.c
@@ -0,0 +1,96 @@
1/*
2 * linux/kernel/irq/spurious.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains spurious interrupt handling.
7 */
8
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/kallsyms.h>
12#include <linux/interrupt.h>
13
14/*
15 * If 99,900 of the previous 100,000 interrupts have not been handled
16 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
17 * and try to turn the IRQ off.
18 *
19 * (The other 100-of-100,000 interrupts may have been a correctly
20 * functioning device sharing an IRQ with the failing one)
21 *
22 * Called under desc->lock
23 */
24
25static void
26__report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
27{
28 struct irqaction *action;
29
30 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
31 printk(KERN_ERR "irq event %d: bogus return value %x\n",
32 irq, action_ret);
33 } else {
34 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
35 }
36 dump_stack();
37 printk(KERN_ERR "handlers:\n");
38 action = desc->action;
39 while (action) {
40 printk(KERN_ERR "[<%p>]", action->handler);
41 print_symbol(" (%s)",
42 (unsigned long)action->handler);
43 printk("\n");
44 action = action->next;
45 }
46}
47
48void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
49{
50 static int count = 100;
51
52 if (count > 0) {
53 count--;
54 __report_bad_irq(irq, desc, action_ret);
55 }
56}
57
58void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
59{
60 if (action_ret != IRQ_HANDLED) {
61 desc->irqs_unhandled++;
62 if (action_ret != IRQ_NONE)
63 report_bad_irq(irq, desc, action_ret);
64 }
65
66 desc->irq_count++;
67 if (desc->irq_count < 100000)
68 return;
69
70 desc->irq_count = 0;
71 if (desc->irqs_unhandled > 99900) {
72 /*
73 * The interrupt is stuck
74 */
75 __report_bad_irq(irq, desc, action_ret);
76 /*
77 * Now kill the IRQ
78 */
79 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
80 desc->status |= IRQ_DISABLED;
81 desc->handler->disable(irq);
82 }
83 desc->irqs_unhandled = 0;
84}
85
86int noirqdebug;
87
88int __init noirqdebug_setup(char *str)
89{
90 noirqdebug = 1;
91 printk(KERN_INFO "IRQ lockup detection disabled\n");
92 return 1;
93}
94
95__setup("noirqdebug", noirqdebug_setup);
96