diff options
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r-- | include/linux/interrupt.h | 289 |
1 files changed, 289 insertions, 0 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h new file mode 100644 index 000000000000..d99e7aeb7d33 --- /dev/null +++ b/include/linux/interrupt.h | |||
@@ -0,0 +1,289 @@ | |||
1 | /* interrupt.h */ | ||
2 | #ifndef _LINUX_INTERRUPT_H | ||
3 | #define _LINUX_INTERRUPT_H | ||
4 | |||
5 | #include <linux/config.h> | ||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/linkage.h> | ||
8 | #include <linux/bitops.h> | ||
9 | #include <linux/preempt.h> | ||
10 | #include <linux/cpumask.h> | ||
11 | #include <linux/hardirq.h> | ||
12 | #include <asm/atomic.h> | ||
13 | #include <asm/ptrace.h> | ||
14 | #include <asm/system.h> | ||
15 | |||
16 | /* | ||
17 | * For 2.4.x compatibility, 2.4.x can use | ||
18 | * | ||
19 | * typedef void irqreturn_t; | ||
20 | * #define IRQ_NONE | ||
21 | * #define IRQ_HANDLED | ||
22 | * #define IRQ_RETVAL(x) | ||
23 | * | ||
24 | * To mix old-style and new-style irq handler returns. | ||
25 | * | ||
26 | * IRQ_NONE means we didn't handle it. | ||
27 | * IRQ_HANDLED means that we did have a valid interrupt and handled it. | ||
28 | * IRQ_RETVAL(x) selects on the two depending on x being non-zero (for handled) | ||
29 | */ | ||
30 | typedef int irqreturn_t; | ||
31 | |||
32 | #define IRQ_NONE (0) | ||
33 | #define IRQ_HANDLED (1) | ||
34 | #define IRQ_RETVAL(x) ((x) != 0) | ||
35 | |||
36 | struct irqaction { | ||
37 | irqreturn_t (*handler)(int, void *, struct pt_regs *); | ||
38 | unsigned long flags; | ||
39 | cpumask_t mask; | ||
40 | const char *name; | ||
41 | void *dev_id; | ||
42 | struct irqaction *next; | ||
43 | int irq; | ||
44 | struct proc_dir_entry *dir; | ||
45 | }; | ||
46 | |||
47 | extern irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs); | ||
48 | extern int request_irq(unsigned int, | ||
49 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | ||
50 | unsigned long, const char *, void *); | ||
51 | extern void free_irq(unsigned int, void *); | ||
52 | |||
53 | |||
54 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
55 | extern void disable_irq_nosync(unsigned int irq); | ||
56 | extern void disable_irq(unsigned int irq); | ||
57 | extern void enable_irq(unsigned int irq); | ||
58 | #endif | ||
59 | |||
60 | /* | ||
61 | * Temporary defines for UP kernels, until all code gets fixed. | ||
62 | */ | ||
63 | #ifndef CONFIG_SMP | ||
64 | static inline void __deprecated cli(void) | ||
65 | { | ||
66 | local_irq_disable(); | ||
67 | } | ||
68 | static inline void __deprecated sti(void) | ||
69 | { | ||
70 | local_irq_enable(); | ||
71 | } | ||
72 | static inline void __deprecated save_flags(unsigned long *x) | ||
73 | { | ||
74 | local_save_flags(*x); | ||
75 | } | ||
76 | #define save_flags(x) save_flags(&x); | ||
77 | static inline void __deprecated restore_flags(unsigned long x) | ||
78 | { | ||
79 | local_irq_restore(x); | ||
80 | } | ||
81 | |||
82 | static inline void __deprecated save_and_cli(unsigned long *x) | ||
83 | { | ||
84 | local_irq_save(*x); | ||
85 | } | ||
86 | #define save_and_cli(x) save_and_cli(&x) | ||
87 | #endif /* CONFIG_SMP */ | ||
88 | |||
89 | /* SoftIRQ primitives. */ | ||
90 | #define local_bh_disable() \ | ||
91 | do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) | ||
92 | #define __local_bh_enable() \ | ||
93 | do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) | ||
94 | |||
95 | extern void local_bh_enable(void); | ||
96 | |||
97 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high | ||
98 | frequency threaded job scheduling. For almost all the purposes | ||
99 | tasklets are more than enough. F.e. all serial device BHs et | ||
100 | al. should be converted to tasklets, not to softirqs. | ||
101 | */ | ||
102 | |||
103 | enum | ||
104 | { | ||
105 | HI_SOFTIRQ=0, | ||
106 | TIMER_SOFTIRQ, | ||
107 | NET_TX_SOFTIRQ, | ||
108 | NET_RX_SOFTIRQ, | ||
109 | SCSI_SOFTIRQ, | ||
110 | TASKLET_SOFTIRQ | ||
111 | }; | ||
112 | |||
113 | /* softirq mask and active fields moved to irq_cpustat_t in | ||
114 | * asm/hardirq.h to get better cache usage. KAO | ||
115 | */ | ||
116 | |||
117 | struct softirq_action | ||
118 | { | ||
119 | void (*action)(struct softirq_action *); | ||
120 | void *data; | ||
121 | }; | ||
122 | |||
123 | asmlinkage void do_softirq(void); | ||
124 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); | ||
125 | extern void softirq_init(void); | ||
126 | #define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0) | ||
127 | extern void FASTCALL(raise_softirq_irqoff(unsigned int nr)); | ||
128 | extern void FASTCALL(raise_softirq(unsigned int nr)); | ||
129 | |||
130 | |||
131 | /* Tasklets --- multithreaded analogue of BHs. | ||
132 | |||
133 | Main feature differing them of generic softirqs: tasklet | ||
134 | is running only on one CPU simultaneously. | ||
135 | |||
136 | Main feature differing them of BHs: different tasklets | ||
137 | may be run simultaneously on different CPUs. | ||
138 | |||
139 | Properties: | ||
140 | * If tasklet_schedule() is called, then tasklet is guaranteed | ||
141 | to be executed on some cpu at least once after this. | ||
142 | * If the tasklet is already scheduled, but its excecution is still not | ||
143 | started, it will be executed only once. | ||
144 | * If this tasklet is already running on another CPU (or schedule is called | ||
145 | from tasklet itself), it is rescheduled for later. | ||
146 | * Tasklet is strictly serialized wrt itself, but not | ||
147 | wrt another tasklets. If client needs some intertask synchronization, | ||
148 | he makes it with spinlocks. | ||
149 | */ | ||
150 | |||
151 | struct tasklet_struct | ||
152 | { | ||
153 | struct tasklet_struct *next; | ||
154 | unsigned long state; | ||
155 | atomic_t count; | ||
156 | void (*func)(unsigned long); | ||
157 | unsigned long data; | ||
158 | }; | ||
159 | |||
160 | #define DECLARE_TASKLET(name, func, data) \ | ||
161 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } | ||
162 | |||
163 | #define DECLARE_TASKLET_DISABLED(name, func, data) \ | ||
164 | struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } | ||
165 | |||
166 | |||
167 | enum | ||
168 | { | ||
169 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ | ||
170 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ | ||
171 | }; | ||
172 | |||
173 | #ifdef CONFIG_SMP | ||
174 | static inline int tasklet_trylock(struct tasklet_struct *t) | ||
175 | { | ||
176 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); | ||
177 | } | ||
178 | |||
179 | static inline void tasklet_unlock(struct tasklet_struct *t) | ||
180 | { | ||
181 | smp_mb__before_clear_bit(); | ||
182 | clear_bit(TASKLET_STATE_RUN, &(t)->state); | ||
183 | } | ||
184 | |||
185 | static inline void tasklet_unlock_wait(struct tasklet_struct *t) | ||
186 | { | ||
187 | while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } | ||
188 | } | ||
189 | #else | ||
190 | #define tasklet_trylock(t) 1 | ||
191 | #define tasklet_unlock_wait(t) do { } while (0) | ||
192 | #define tasklet_unlock(t) do { } while (0) | ||
193 | #endif | ||
194 | |||
195 | extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t)); | ||
196 | |||
197 | static inline void tasklet_schedule(struct tasklet_struct *t) | ||
198 | { | ||
199 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
200 | __tasklet_schedule(t); | ||
201 | } | ||
202 | |||
203 | extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t)); | ||
204 | |||
205 | static inline void tasklet_hi_schedule(struct tasklet_struct *t) | ||
206 | { | ||
207 | if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
208 | __tasklet_hi_schedule(t); | ||
209 | } | ||
210 | |||
211 | |||
212 | static inline void tasklet_disable_nosync(struct tasklet_struct *t) | ||
213 | { | ||
214 | atomic_inc(&t->count); | ||
215 | smp_mb__after_atomic_inc(); | ||
216 | } | ||
217 | |||
218 | static inline void tasklet_disable(struct tasklet_struct *t) | ||
219 | { | ||
220 | tasklet_disable_nosync(t); | ||
221 | tasklet_unlock_wait(t); | ||
222 | smp_mb(); | ||
223 | } | ||
224 | |||
225 | static inline void tasklet_enable(struct tasklet_struct *t) | ||
226 | { | ||
227 | smp_mb__before_atomic_dec(); | ||
228 | atomic_dec(&t->count); | ||
229 | } | ||
230 | |||
231 | static inline void tasklet_hi_enable(struct tasklet_struct *t) | ||
232 | { | ||
233 | smp_mb__before_atomic_dec(); | ||
234 | atomic_dec(&t->count); | ||
235 | } | ||
236 | |||
237 | extern void tasklet_kill(struct tasklet_struct *t); | ||
238 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); | ||
239 | extern void tasklet_init(struct tasklet_struct *t, | ||
240 | void (*func)(unsigned long), unsigned long data); | ||
241 | |||
242 | /* | ||
243 | * Autoprobing for irqs: | ||
244 | * | ||
245 | * probe_irq_on() and probe_irq_off() provide robust primitives | ||
246 | * for accurate IRQ probing during kernel initialization. They are | ||
247 | * reasonably simple to use, are not "fooled" by spurious interrupts, | ||
248 | * and, unlike other attempts at IRQ probing, they do not get hung on | ||
249 | * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards). | ||
250 | * | ||
251 | * For reasonably foolproof probing, use them as follows: | ||
252 | * | ||
253 | * 1. clear and/or mask the device's internal interrupt. | ||
254 | * 2. sti(); | ||
255 | * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs | ||
256 | * 4. enable the device and cause it to trigger an interrupt. | ||
257 | * 5. wait for the device to interrupt, using non-intrusive polling or a delay. | ||
258 | * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple | ||
259 | * 7. service the device to clear its pending interrupt. | ||
260 | * 8. loop again if paranoia is required. | ||
261 | * | ||
262 | * probe_irq_on() returns a mask of allocated irq's. | ||
263 | * | ||
264 | * probe_irq_off() takes the mask as a parameter, | ||
265 | * and returns the irq number which occurred, | ||
266 | * or zero if none occurred, or a negative irq number | ||
267 | * if more than one irq occurred. | ||
268 | */ | ||
269 | |||
270 | #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) | ||
271 | static inline unsigned long probe_irq_on(void) | ||
272 | { | ||
273 | return 0; | ||
274 | } | ||
275 | static inline int probe_irq_off(unsigned long val) | ||
276 | { | ||
277 | return 0; | ||
278 | } | ||
279 | static inline unsigned int probe_irq_mask(unsigned long val) | ||
280 | { | ||
281 | return 0; | ||
282 | } | ||
283 | #else | ||
284 | extern unsigned long probe_irq_on(void); /* returns 0 on failure */ | ||
285 | extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */ | ||
286 | extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */ | ||
287 | #endif | ||
288 | |||
289 | #endif | ||