diff options
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r-- | include/linux/interrupt.h | 154 |
1 files changed, 148 insertions, 6 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 70741e17011..1f97e3d9263 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -10,10 +10,60 @@ | |||
10 | #include <linux/irqreturn.h> | 10 | #include <linux/irqreturn.h> |
11 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/irqflags.h> | ||
13 | #include <asm/atomic.h> | 14 | #include <asm/atomic.h> |
14 | #include <asm/ptrace.h> | 15 | #include <asm/ptrace.h> |
15 | #include <asm/system.h> | 16 | #include <asm/system.h> |
16 | 17 | ||
18 | /* | ||
19 | * These correspond to the IORESOURCE_IRQ_* defines in | ||
20 | * linux/ioport.h to select the interrupt line behaviour. When | ||
21 | * requesting an interrupt without specifying a IRQF_TRIGGER, the | ||
22 | * setting should be assumed to be "as already configured", which | ||
23 | * may be as per machine or firmware initialisation. | ||
24 | */ | ||
25 | #define IRQF_TRIGGER_NONE 0x00000000 | ||
26 | #define IRQF_TRIGGER_RISING 0x00000001 | ||
27 | #define IRQF_TRIGGER_FALLING 0x00000002 | ||
28 | #define IRQF_TRIGGER_HIGH 0x00000004 | ||
29 | #define IRQF_TRIGGER_LOW 0x00000008 | ||
30 | #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \ | ||
31 | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING) | ||
32 | #define IRQF_TRIGGER_PROBE 0x00000010 | ||
33 | |||
34 | /* | ||
35 | * These flags used only by the kernel as part of the | ||
36 | * irq handling routines. | ||
37 | * | ||
38 | * IRQF_DISABLED - keep irqs disabled when calling the action handler | ||
39 | * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator | ||
40 | * IRQF_SHARED - allow sharing the irq among several devices | ||
41 | * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur | ||
42 | * IRQF_TIMER - Flag to mark this interrupt as timer interrupt | ||
43 | */ | ||
44 | #define IRQF_DISABLED 0x00000020 | ||
45 | #define IRQF_SAMPLE_RANDOM 0x00000040 | ||
46 | #define IRQF_SHARED 0x00000080 | ||
47 | #define IRQF_PROBE_SHARED 0x00000100 | ||
48 | #define IRQF_TIMER 0x00000200 | ||
49 | #define IRQF_PERCPU 0x00000400 | ||
50 | |||
51 | /* | ||
52 | * Migration helpers. Scheduled for removal in 1/2007 | ||
53 | * Do not use for new code ! | ||
54 | */ | ||
55 | #define SA_INTERRUPT IRQF_DISABLED | ||
56 | #define SA_SAMPLE_RANDOM IRQF_SAMPLE_RANDOM | ||
57 | #define SA_SHIRQ IRQF_SHARED | ||
58 | #define SA_PROBEIRQ IRQF_PROBE_SHARED | ||
59 | #define SA_PERCPU IRQF_PERCPU | ||
60 | |||
61 | #define SA_TRIGGER_LOW IRQF_TRIGGER_LOW | ||
62 | #define SA_TRIGGER_HIGH IRQF_TRIGGER_HIGH | ||
63 | #define SA_TRIGGER_FALLING IRQF_TRIGGER_FALLING | ||
64 | #define SA_TRIGGER_RISING IRQF_TRIGGER_RISING | ||
65 | #define SA_TRIGGER_MASK IRQF_TRIGGER_MASK | ||
66 | |||
17 | struct irqaction { | 67 | struct irqaction { |
18 | irqreturn_t (*handler)(int, void *, struct pt_regs *); | 68 | irqreturn_t (*handler)(int, void *, struct pt_regs *); |
19 | unsigned long flags; | 69 | unsigned long flags; |
@@ -31,12 +81,106 @@ extern int request_irq(unsigned int, | |||
31 | unsigned long, const char *, void *); | 81 | unsigned long, const char *, void *); |
32 | extern void free_irq(unsigned int, void *); | 82 | extern void free_irq(unsigned int, void *); |
33 | 83 | ||
84 | /* | ||
85 | * On lockdep we dont want to enable hardirqs in hardirq | ||
86 | * context. Use local_irq_enable_in_hardirq() to annotate | ||
87 | * kernel code that has to do this nevertheless (pretty much | ||
88 | * the only valid case is for old/broken hardware that is | ||
89 | * insanely slow). | ||
90 | * | ||
91 | * NOTE: in theory this might break fragile code that relies | ||
92 | * on hardirq delivery - in practice we dont seem to have such | ||
93 | * places left. So the only effect should be slightly increased | ||
94 | * irqs-off latencies. | ||
95 | */ | ||
96 | #ifdef CONFIG_LOCKDEP | ||
97 | # define local_irq_enable_in_hardirq() do { } while (0) | ||
98 | #else | ||
99 | # define local_irq_enable_in_hardirq() local_irq_enable() | ||
100 | #endif | ||
34 | 101 | ||
35 | #ifdef CONFIG_GENERIC_HARDIRQS | 102 | #ifdef CONFIG_GENERIC_HARDIRQS |
36 | extern void disable_irq_nosync(unsigned int irq); | 103 | extern void disable_irq_nosync(unsigned int irq); |
37 | extern void disable_irq(unsigned int irq); | 104 | extern void disable_irq(unsigned int irq); |
38 | extern void enable_irq(unsigned int irq); | 105 | extern void enable_irq(unsigned int irq); |
106 | |||
107 | /* | ||
108 | * Special lockdep variants of irq disabling/enabling. | ||
109 | * These should be used for locking constructs that | ||
110 | * know that a particular irq context which is disabled, | ||
111 | * and which is the only irq-context user of a lock, | ||
112 | * that it's safe to take the lock in the irq-disabled | ||
113 | * section without disabling hardirqs. | ||
114 | * | ||
115 | * On !CONFIG_LOCKDEP they are equivalent to the normal | ||
116 | * irq disable/enable methods. | ||
117 | */ | ||
118 | static inline void disable_irq_nosync_lockdep(unsigned int irq) | ||
119 | { | ||
120 | disable_irq_nosync(irq); | ||
121 | #ifdef CONFIG_LOCKDEP | ||
122 | local_irq_disable(); | ||
123 | #endif | ||
124 | } | ||
125 | |||
126 | static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) | ||
127 | { | ||
128 | disable_irq_nosync(irq); | ||
129 | #ifdef CONFIG_LOCKDEP | ||
130 | local_irq_save(*flags); | ||
131 | #endif | ||
132 | } | ||
133 | |||
134 | static inline void disable_irq_lockdep(unsigned int irq) | ||
135 | { | ||
136 | disable_irq(irq); | ||
137 | #ifdef CONFIG_LOCKDEP | ||
138 | local_irq_disable(); | ||
139 | #endif | ||
140 | } | ||
141 | |||
142 | static inline void enable_irq_lockdep(unsigned int irq) | ||
143 | { | ||
144 | #ifdef CONFIG_LOCKDEP | ||
145 | local_irq_enable(); | ||
146 | #endif | ||
147 | enable_irq(irq); | ||
148 | } | ||
149 | |||
150 | static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) | ||
151 | { | ||
152 | #ifdef CONFIG_LOCKDEP | ||
153 | local_irq_restore(*flags); | ||
39 | #endif | 154 | #endif |
155 | enable_irq(irq); | ||
156 | } | ||
157 | |||
158 | /* IRQ wakeup (PM) control: */ | ||
159 | extern int set_irq_wake(unsigned int irq, unsigned int on); | ||
160 | |||
161 | static inline int enable_irq_wake(unsigned int irq) | ||
162 | { | ||
163 | return set_irq_wake(irq, 1); | ||
164 | } | ||
165 | |||
166 | static inline int disable_irq_wake(unsigned int irq) | ||
167 | { | ||
168 | return set_irq_wake(irq, 0); | ||
169 | } | ||
170 | |||
171 | #else /* !CONFIG_GENERIC_HARDIRQS */ | ||
172 | /* | ||
173 | * NOTE: non-genirq architectures, if they want to support the lock | ||
174 | * validator need to define the methods below in their asm/irq.h | ||
175 | * files, under an #ifdef CONFIG_LOCKDEP section. | ||
176 | */ | ||
177 | # ifndef CONFIG_LOCKDEP | ||
178 | # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) | ||
179 | # define disable_irq_lockdep(irq) disable_irq(irq) | ||
180 | # define enable_irq_lockdep(irq) enable_irq(irq) | ||
181 | # endif | ||
182 | |||
183 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
40 | 184 | ||
41 | #ifndef __ARCH_SET_SOFTIRQ_PENDING | 185 | #ifndef __ARCH_SET_SOFTIRQ_PENDING |
42 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) | 186 | #define set_softirq_pending(x) (local_softirq_pending() = (x)) |
@@ -72,13 +216,11 @@ static inline void __deprecated save_and_cli(unsigned long *x) | |||
72 | #define save_and_cli(x) save_and_cli(&x) | 216 | #define save_and_cli(x) save_and_cli(&x) |
73 | #endif /* CONFIG_SMP */ | 217 | #endif /* CONFIG_SMP */ |
74 | 218 | ||
75 | /* SoftIRQ primitives. */ | 219 | extern void local_bh_disable(void); |
76 | #define local_bh_disable() \ | 220 | extern void __local_bh_enable(void); |
77 | do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0) | 221 | extern void _local_bh_enable(void); |
78 | #define __local_bh_enable() \ | ||
79 | do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0) | ||
80 | |||
81 | extern void local_bh_enable(void); | 222 | extern void local_bh_enable(void); |
223 | extern void local_bh_enable_ip(unsigned long ip); | ||
82 | 224 | ||
83 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high | 225 | /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high |
84 | frequency threaded job scheduling. For almost all the purposes | 226 | frequency threaded job scheduling. For almost all the purposes |