diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 634 |
1 files changed, 430 insertions, 204 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747dd..f7ce0021e1c4 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -17,6 +17,17 @@ | |||
17 | 17 | ||
18 | #include "internals.h" | 18 | #include "internals.h" |
19 | 19 | ||
20 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
21 | __read_mostly bool force_irqthreads; | ||
22 | |||
23 | static int __init setup_forced_irqthreads(char *arg) | ||
24 | { | ||
25 | force_irqthreads = true; | ||
26 | return 0; | ||
27 | } | ||
28 | early_param("threadirqs", setup_forced_irqthreads); | ||
29 | #endif | ||
30 | |||
20 | /** | 31 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 32 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
22 | * @irq: interrupt number to wait for | 33 | * @irq: interrupt number to wait for |
@@ -30,7 +41,7 @@ | |||
30 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
31 | { | 42 | { |
32 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
33 | unsigned int status; | 44 | bool inprogress; |
34 | 45 | ||
35 | if (!desc) | 46 | if (!desc) |
36 | return; | 47 | return; |
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq) | |||
42 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
43 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
44 | */ | 55 | */ |
45 | while (desc->status & IRQ_INPROGRESS) | 56 | while (irqd_irq_inprogress(&desc->irq_data)) |
46 | cpu_relax(); | 57 | cpu_relax(); |
47 | 58 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
49 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
50 | status = desc->status; | 61 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
51 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
52 | 63 | ||
53 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
54 | } while (status & IRQ_INPROGRESS); | 65 | } while (inprogress); |
55 | 66 | ||
56 | /* | 67 | /* |
57 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq) | |||
73 | { | 84 | { |
74 | struct irq_desc *desc = irq_to_desc(irq); | 85 | struct irq_desc *desc = irq_to_desc(irq); |
75 | 86 | ||
76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || | 87 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
77 | !desc->irq_data.chip->irq_set_affinity) | 88 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
78 | return 0; | 89 | return 0; |
79 | 90 | ||
80 | return 1; | 91 | return 1; |
@@ -100,67 +111,180 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
100 | } | 111 | } |
101 | } | 112 | } |
102 | 113 | ||
114 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
115 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | ||
116 | { | ||
117 | return irqd_can_move_in_process_context(data); | ||
118 | } | ||
119 | static inline bool irq_move_pending(struct irq_data *data) | ||
120 | { | ||
121 | return irqd_is_setaffinity_pending(data); | ||
122 | } | ||
123 | static inline void | ||
124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | ||
125 | { | ||
126 | cpumask_copy(desc->pending_mask, mask); | ||
127 | } | ||
128 | static inline void | ||
129 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | ||
130 | { | ||
131 | cpumask_copy(mask, desc->pending_mask); | ||
132 | } | ||
133 | #else | ||
134 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } | ||
135 | static inline bool irq_move_pending(struct irq_data *data) { return false; } | ||
136 | static inline void | ||
137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | ||
138 | static inline void | ||
139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | ||
140 | #endif | ||
141 | |||
142 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | ||
143 | { | ||
144 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
145 | struct irq_desc *desc = irq_data_to_desc(data); | ||
146 | int ret = 0; | ||
147 | |||
148 | if (!chip || !chip->irq_set_affinity) | ||
149 | return -EINVAL; | ||
150 | |||
151 | if (irq_can_move_pcntxt(data)) { | ||
152 | ret = chip->irq_set_affinity(data, mask, false); | ||
153 | switch (ret) { | ||
154 | case IRQ_SET_MASK_OK: | ||
155 | cpumask_copy(data->affinity, mask); | ||
156 | case IRQ_SET_MASK_OK_NOCOPY: | ||
157 | irq_set_thread_affinity(desc); | ||
158 | ret = 0; | ||
159 | } | ||
160 | } else { | ||
161 | irqd_set_move_pending(data); | ||
162 | irq_copy_pending(desc, mask); | ||
163 | } | ||
164 | |||
165 | if (desc->affinity_notify) { | ||
166 | kref_get(&desc->affinity_notify->kref); | ||
167 | schedule_work(&desc->affinity_notify->work); | ||
168 | } | ||
169 | irqd_set(data, IRQD_AFFINITY_SET); | ||
170 | |||
171 | return ret; | ||
172 | } | ||
173 | |||
103 | /** | 174 | /** |
104 | * irq_set_affinity - Set the irq affinity of a given irq | 175 | * irq_set_affinity - Set the irq affinity of a given irq |
105 | * @irq: Interrupt to set affinity | 176 | * @irq: Interrupt to set affinity |
106 | * @cpumask: cpumask | 177 | * @mask: cpumask |
107 | * | 178 | * |
108 | */ | 179 | */ |
109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 180 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) |
110 | { | 181 | { |
111 | struct irq_desc *desc = irq_to_desc(irq); | 182 | struct irq_desc *desc = irq_to_desc(irq); |
112 | struct irq_chip *chip = desc->irq_data.chip; | ||
113 | unsigned long flags; | 183 | unsigned long flags; |
184 | int ret; | ||
114 | 185 | ||
115 | if (!chip->irq_set_affinity) | 186 | if (!desc) |
116 | return -EINVAL; | 187 | return -EINVAL; |
117 | 188 | ||
118 | raw_spin_lock_irqsave(&desc->lock, flags); | 189 | raw_spin_lock_irqsave(&desc->lock, flags); |
119 | 190 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | |
120 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
121 | if (desc->status & IRQ_MOVE_PCNTXT) { | ||
122 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | ||
123 | cpumask_copy(desc->irq_data.affinity, cpumask); | ||
124 | irq_set_thread_affinity(desc); | ||
125 | } | ||
126 | } | ||
127 | else { | ||
128 | desc->status |= IRQ_MOVE_PENDING; | ||
129 | cpumask_copy(desc->pending_mask, cpumask); | ||
130 | } | ||
131 | #else | ||
132 | if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { | ||
133 | cpumask_copy(desc->irq_data.affinity, cpumask); | ||
134 | irq_set_thread_affinity(desc); | ||
135 | } | ||
136 | #endif | ||
137 | desc->status |= IRQ_AFFINITY_SET; | ||
138 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 191 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
139 | return 0; | 192 | return ret; |
140 | } | 193 | } |
141 | 194 | ||
142 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 195 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
143 | { | 196 | { |
197 | unsigned long flags; | ||
198 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
199 | |||
200 | if (!desc) | ||
201 | return -EINVAL; | ||
202 | desc->affinity_hint = m; | ||
203 | irq_put_desc_unlock(desc, flags); | ||
204 | return 0; | ||
205 | } | ||
206 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | ||
207 | |||
208 | static void irq_affinity_notify(struct work_struct *work) | ||
209 | { | ||
210 | struct irq_affinity_notify *notify = | ||
211 | container_of(work, struct irq_affinity_notify, work); | ||
212 | struct irq_desc *desc = irq_to_desc(notify->irq); | ||
213 | cpumask_var_t cpumask; | ||
214 | unsigned long flags; | ||
215 | |||
216 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | ||
217 | goto out; | ||
218 | |||
219 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
220 | if (irq_move_pending(&desc->irq_data)) | ||
221 | irq_get_pending(cpumask, desc); | ||
222 | else | ||
223 | cpumask_copy(cpumask, desc->irq_data.affinity); | ||
224 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
225 | |||
226 | notify->notify(notify, cpumask); | ||
227 | |||
228 | free_cpumask_var(cpumask); | ||
229 | out: | ||
230 | kref_put(¬ify->kref, notify->release); | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | ||
235 | * @irq: Interrupt for which to enable/disable notification | ||
236 | * @notify: Context for notification, or %NULL to disable | ||
237 | * notification. Function pointers must be initialised; | ||
238 | * the other fields will be initialised by this function. | ||
239 | * | ||
240 | * Must be called in process context. Notification may only be enabled | ||
241 | * after the IRQ is allocated and must be disabled before the IRQ is | ||
242 | * freed using free_irq(). | ||
243 | */ | ||
244 | int | ||
245 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | ||
246 | { | ||
144 | struct irq_desc *desc = irq_to_desc(irq); | 247 | struct irq_desc *desc = irq_to_desc(irq); |
248 | struct irq_affinity_notify *old_notify; | ||
145 | unsigned long flags; | 249 | unsigned long flags; |
146 | 250 | ||
251 | /* The release function is promised process context */ | ||
252 | might_sleep(); | ||
253 | |||
147 | if (!desc) | 254 | if (!desc) |
148 | return -EINVAL; | 255 | return -EINVAL; |
149 | 256 | ||
257 | /* Complete initialisation of *notify */ | ||
258 | if (notify) { | ||
259 | notify->irq = irq; | ||
260 | kref_init(¬ify->kref); | ||
261 | INIT_WORK(¬ify->work, irq_affinity_notify); | ||
262 | } | ||
263 | |||
150 | raw_spin_lock_irqsave(&desc->lock, flags); | 264 | raw_spin_lock_irqsave(&desc->lock, flags); |
151 | desc->affinity_hint = m; | 265 | old_notify = desc->affinity_notify; |
266 | desc->affinity_notify = notify; | ||
152 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 267 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
153 | 268 | ||
269 | if (old_notify) | ||
270 | kref_put(&old_notify->kref, old_notify->release); | ||
271 | |||
154 | return 0; | 272 | return 0; |
155 | } | 273 | } |
156 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 274 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
157 | 275 | ||
158 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 276 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
159 | /* | 277 | /* |
160 | * Generic version of the affinity autoselector. | 278 | * Generic version of the affinity autoselector. |
161 | */ | 279 | */ |
162 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) | 280 | static int |
281 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
163 | { | 282 | { |
283 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
284 | struct cpumask *set = irq_default_affinity; | ||
285 | int ret; | ||
286 | |||
287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | ||
164 | if (!irq_can_set_affinity(irq)) | 288 | if (!irq_can_set_affinity(irq)) |
165 | return 0; | 289 | return 0; |
166 | 290 | ||
@@ -168,22 +292,27 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
168 | * Preserve an userspace affinity setup, but make sure that | 292 | * Preserve an userspace affinity setup, but make sure that |
169 | * one of the targets is online. | 293 | * one of the targets is online. |
170 | */ | 294 | */ |
171 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 295 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
172 | if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) | 296 | if (cpumask_intersects(desc->irq_data.affinity, |
173 | < nr_cpu_ids) | 297 | cpu_online_mask)) |
174 | goto set_affinity; | 298 | set = desc->irq_data.affinity; |
175 | else | 299 | else |
176 | desc->status &= ~IRQ_AFFINITY_SET; | 300 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
177 | } | 301 | } |
178 | 302 | ||
179 | cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); | 303 | cpumask_and(mask, cpu_online_mask, set); |
180 | set_affinity: | 304 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
181 | desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); | 305 | switch (ret) { |
182 | 306 | case IRQ_SET_MASK_OK: | |
307 | cpumask_copy(desc->irq_data.affinity, mask); | ||
308 | case IRQ_SET_MASK_OK_NOCOPY: | ||
309 | irq_set_thread_affinity(desc); | ||
310 | } | ||
183 | return 0; | 311 | return 0; |
184 | } | 312 | } |
185 | #else | 313 | #else |
186 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | 314 | static inline int |
315 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | ||
187 | { | 316 | { |
188 | return irq_select_affinity(irq); | 317 | return irq_select_affinity(irq); |
189 | } | 318 | } |
@@ -192,23 +321,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | |||
192 | /* | 321 | /* |
193 | * Called when affinity is set via /proc/irq | 322 | * Called when affinity is set via /proc/irq |
194 | */ | 323 | */ |
195 | int irq_select_affinity_usr(unsigned int irq) | 324 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
196 | { | 325 | { |
197 | struct irq_desc *desc = irq_to_desc(irq); | 326 | struct irq_desc *desc = irq_to_desc(irq); |
198 | unsigned long flags; | 327 | unsigned long flags; |
199 | int ret; | 328 | int ret; |
200 | 329 | ||
201 | raw_spin_lock_irqsave(&desc->lock, flags); | 330 | raw_spin_lock_irqsave(&desc->lock, flags); |
202 | ret = setup_affinity(irq, desc); | 331 | ret = setup_affinity(irq, desc, mask); |
203 | if (!ret) | ||
204 | irq_set_thread_affinity(desc); | ||
205 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 332 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
206 | |||
207 | return ret; | 333 | return ret; |
208 | } | 334 | } |
209 | 335 | ||
210 | #else | 336 | #else |
211 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | 337 | static inline int |
338 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
212 | { | 339 | { |
213 | return 0; | 340 | return 0; |
214 | } | 341 | } |
@@ -219,13 +346,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
219 | if (suspend) { | 346 | if (suspend) { |
220 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 347 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
221 | return; | 348 | return; |
222 | desc->status |= IRQ_SUSPENDED; | 349 | desc->istate |= IRQS_SUSPENDED; |
223 | } | 350 | } |
224 | 351 | ||
225 | if (!desc->depth++) { | 352 | if (!desc->depth++) |
226 | desc->status |= IRQ_DISABLED; | 353 | irq_disable(desc); |
227 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 354 | } |
228 | } | 355 | |
356 | static int __disable_irq_nosync(unsigned int irq) | ||
357 | { | ||
358 | unsigned long flags; | ||
359 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
360 | |||
361 | if (!desc) | ||
362 | return -EINVAL; | ||
363 | __disable_irq(desc, irq, false); | ||
364 | irq_put_desc_busunlock(desc, flags); | ||
365 | return 0; | ||
229 | } | 366 | } |
230 | 367 | ||
231 | /** | 368 | /** |
@@ -241,17 +378,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
241 | */ | 378 | */ |
242 | void disable_irq_nosync(unsigned int irq) | 379 | void disable_irq_nosync(unsigned int irq) |
243 | { | 380 | { |
244 | struct irq_desc *desc = irq_to_desc(irq); | 381 | __disable_irq_nosync(irq); |
245 | unsigned long flags; | ||
246 | |||
247 | if (!desc) | ||
248 | return; | ||
249 | |||
250 | chip_bus_lock(desc); | ||
251 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
252 | __disable_irq(desc, irq, false); | ||
253 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
254 | chip_bus_sync_unlock(desc); | ||
255 | } | 382 | } |
256 | EXPORT_SYMBOL(disable_irq_nosync); | 383 | EXPORT_SYMBOL(disable_irq_nosync); |
257 | 384 | ||
@@ -269,21 +396,24 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
269 | */ | 396 | */ |
270 | void disable_irq(unsigned int irq) | 397 | void disable_irq(unsigned int irq) |
271 | { | 398 | { |
272 | struct irq_desc *desc = irq_to_desc(irq); | 399 | if (!__disable_irq_nosync(irq)) |
273 | |||
274 | if (!desc) | ||
275 | return; | ||
276 | |||
277 | disable_irq_nosync(irq); | ||
278 | if (desc->action) | ||
279 | synchronize_irq(irq); | 400 | synchronize_irq(irq); |
280 | } | 401 | } |
281 | EXPORT_SYMBOL(disable_irq); | 402 | EXPORT_SYMBOL(disable_irq); |
282 | 403 | ||
283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 404 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
284 | { | 405 | { |
285 | if (resume) | 406 | if (resume) { |
286 | desc->status &= ~IRQ_SUSPENDED; | 407 | if (!(desc->istate & IRQS_SUSPENDED)) { |
408 | if (!desc->action) | ||
409 | return; | ||
410 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
411 | return; | ||
412 | /* Pretend that it got disabled ! */ | ||
413 | desc->depth++; | ||
414 | } | ||
415 | desc->istate &= ~IRQS_SUSPENDED; | ||
416 | } | ||
287 | 417 | ||
288 | switch (desc->depth) { | 418 | switch (desc->depth) { |
289 | case 0: | 419 | case 0: |
@@ -291,12 +421,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
291 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 421 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
292 | break; | 422 | break; |
293 | case 1: { | 423 | case 1: { |
294 | unsigned int status = desc->status & ~IRQ_DISABLED; | 424 | if (desc->istate & IRQS_SUSPENDED) |
295 | |||
296 | if (desc->status & IRQ_SUSPENDED) | ||
297 | goto err_out; | 425 | goto err_out; |
298 | /* Prevent probing on this irq: */ | 426 | /* Prevent probing on this irq: */ |
299 | desc->status = status | IRQ_NOPROBE; | 427 | irq_settings_set_noprobe(desc); |
428 | irq_enable(desc); | ||
300 | check_irq_resend(desc, irq); | 429 | check_irq_resend(desc, irq); |
301 | /* fall-through */ | 430 | /* fall-through */ |
302 | } | 431 | } |
@@ -318,21 +447,18 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
318 | */ | 447 | */ |
319 | void enable_irq(unsigned int irq) | 448 | void enable_irq(unsigned int irq) |
320 | { | 449 | { |
321 | struct irq_desc *desc = irq_to_desc(irq); | ||
322 | unsigned long flags; | 450 | unsigned long flags; |
451 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
323 | 452 | ||
324 | if (!desc) | 453 | if (!desc) |
325 | return; | 454 | return; |
455 | if (WARN(!desc->irq_data.chip, | ||
456 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | ||
457 | goto out; | ||
326 | 458 | ||
327 | if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable, | ||
328 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | ||
329 | return; | ||
330 | |||
331 | chip_bus_lock(desc); | ||
332 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
333 | __enable_irq(desc, irq, false); | 459 | __enable_irq(desc, irq, false); |
334 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 460 | out: |
335 | chip_bus_sync_unlock(desc); | 461 | irq_put_desc_busunlock(desc, flags); |
336 | } | 462 | } |
337 | EXPORT_SYMBOL(enable_irq); | 463 | EXPORT_SYMBOL(enable_irq); |
338 | 464 | ||
@@ -348,7 +474,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
348 | } | 474 | } |
349 | 475 | ||
350 | /** | 476 | /** |
351 | * set_irq_wake - control irq power management wakeup | 477 | * irq_set_irq_wake - control irq power management wakeup |
352 | * @irq: interrupt to control | 478 | * @irq: interrupt to control |
353 | * @on: enable/disable power management wakeup | 479 | * @on: enable/disable power management wakeup |
354 | * | 480 | * |
@@ -359,23 +485,22 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
359 | * Wakeup mode lets this IRQ wake the system from sleep | 485 | * Wakeup mode lets this IRQ wake the system from sleep |
360 | * states like "suspend to RAM". | 486 | * states like "suspend to RAM". |
361 | */ | 487 | */ |
362 | int set_irq_wake(unsigned int irq, unsigned int on) | 488 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
363 | { | 489 | { |
364 | struct irq_desc *desc = irq_to_desc(irq); | ||
365 | unsigned long flags; | 490 | unsigned long flags; |
491 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
366 | int ret = 0; | 492 | int ret = 0; |
367 | 493 | ||
368 | /* wakeup-capable irqs can be shared between drivers that | 494 | /* wakeup-capable irqs can be shared between drivers that |
369 | * don't need to have the same sleep mode behaviors. | 495 | * don't need to have the same sleep mode behaviors. |
370 | */ | 496 | */ |
371 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
372 | if (on) { | 497 | if (on) { |
373 | if (desc->wake_depth++ == 0) { | 498 | if (desc->wake_depth++ == 0) { |
374 | ret = set_irq_wake_real(irq, on); | 499 | ret = set_irq_wake_real(irq, on); |
375 | if (ret) | 500 | if (ret) |
376 | desc->wake_depth = 0; | 501 | desc->wake_depth = 0; |
377 | else | 502 | else |
378 | desc->status |= IRQ_WAKEUP; | 503 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
379 | } | 504 | } |
380 | } else { | 505 | } else { |
381 | if (desc->wake_depth == 0) { | 506 | if (desc->wake_depth == 0) { |
@@ -385,14 +510,13 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
385 | if (ret) | 510 | if (ret) |
386 | desc->wake_depth = 1; | 511 | desc->wake_depth = 1; |
387 | else | 512 | else |
388 | desc->status &= ~IRQ_WAKEUP; | 513 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
389 | } | 514 | } |
390 | } | 515 | } |
391 | 516 | irq_put_desc_busunlock(desc, flags); | |
392 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
393 | return ret; | 517 | return ret; |
394 | } | 518 | } |
395 | EXPORT_SYMBOL(set_irq_wake); | 519 | EXPORT_SYMBOL(irq_set_irq_wake); |
396 | 520 | ||
397 | /* | 521 | /* |
398 | * Internal function that tells the architecture code whether a | 522 | * Internal function that tells the architecture code whether a |
@@ -401,43 +525,27 @@ EXPORT_SYMBOL(set_irq_wake); | |||
401 | */ | 525 | */ |
402 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 526 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
403 | { | 527 | { |
404 | struct irq_desc *desc = irq_to_desc(irq); | ||
405 | struct irqaction *action; | ||
406 | unsigned long flags; | 528 | unsigned long flags; |
529 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
530 | int canrequest = 0; | ||
407 | 531 | ||
408 | if (!desc) | 532 | if (!desc) |
409 | return 0; | 533 | return 0; |
410 | 534 | ||
411 | if (desc->status & IRQ_NOREQUEST) | 535 | if (irq_settings_can_request(desc)) { |
412 | return 0; | 536 | if (desc->action) |
413 | 537 | if (irqflags & desc->action->flags & IRQF_SHARED) | |
414 | raw_spin_lock_irqsave(&desc->lock, flags); | 538 | canrequest =1; |
415 | action = desc->action; | 539 | } |
416 | if (action) | 540 | irq_put_desc_unlock(desc, flags); |
417 | if (irqflags & action->flags & IRQF_SHARED) | 541 | return canrequest; |
418 | action = NULL; | ||
419 | |||
420 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
421 | |||
422 | return !action; | ||
423 | } | ||
424 | |||
425 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | ||
426 | { | ||
427 | /* | ||
428 | * If the architecture still has not overriden | ||
429 | * the flow handler then zap the default. This | ||
430 | * should catch incorrect flow-type setting. | ||
431 | */ | ||
432 | if (desc->handle_irq == &handle_bad_irq) | ||
433 | desc->handle_irq = NULL; | ||
434 | } | 542 | } |
435 | 543 | ||
436 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 544 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
437 | unsigned long flags) | 545 | unsigned long flags) |
438 | { | 546 | { |
439 | int ret; | ||
440 | struct irq_chip *chip = desc->irq_data.chip; | 547 | struct irq_chip *chip = desc->irq_data.chip; |
548 | int ret, unmask = 0; | ||
441 | 549 | ||
442 | if (!chip || !chip->irq_set_type) { | 550 | if (!chip || !chip->irq_set_type) { |
443 | /* | 551 | /* |
@@ -449,23 +557,41 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
449 | return 0; | 557 | return 0; |
450 | } | 558 | } |
451 | 559 | ||
560 | flags &= IRQ_TYPE_SENSE_MASK; | ||
561 | |||
562 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | ||
563 | if (!irqd_irq_masked(&desc->irq_data)) | ||
564 | mask_irq(desc); | ||
565 | if (!irqd_irq_disabled(&desc->irq_data)) | ||
566 | unmask = 1; | ||
567 | } | ||
568 | |||
452 | /* caller masked out all except trigger mode flags */ | 569 | /* caller masked out all except trigger mode flags */ |
453 | ret = chip->irq_set_type(&desc->irq_data, flags); | 570 | ret = chip->irq_set_type(&desc->irq_data, flags); |
454 | 571 | ||
455 | if (ret) | 572 | switch (ret) { |
573 | case IRQ_SET_MASK_OK: | ||
574 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | ||
575 | irqd_set(&desc->irq_data, flags); | ||
576 | |||
577 | case IRQ_SET_MASK_OK_NOCOPY: | ||
578 | flags = irqd_get_trigger_type(&desc->irq_data); | ||
579 | irq_settings_set_trigger_mask(desc, flags); | ||
580 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | ||
581 | irq_settings_clr_level(desc); | ||
582 | if (flags & IRQ_TYPE_LEVEL_MASK) { | ||
583 | irq_settings_set_level(desc); | ||
584 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
585 | } | ||
586 | |||
587 | ret = 0; | ||
588 | break; | ||
589 | default: | ||
456 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | 590 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", |
457 | flags, irq, chip->irq_set_type); | 591 | flags, irq, chip->irq_set_type); |
458 | else { | ||
459 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | ||
460 | flags |= IRQ_LEVEL; | ||
461 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | ||
462 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | ||
463 | desc->status |= flags; | ||
464 | |||
465 | if (chip != desc->irq_data.chip) | ||
466 | irq_chip_set_defaults(desc->irq_data.chip); | ||
467 | } | 592 | } |
468 | 593 | if (unmask) | |
594 | unmask_irq(desc); | ||
469 | return ret; | 595 | return ret; |
470 | } | 596 | } |
471 | 597 | ||
@@ -509,8 +635,11 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
509 | * handler finished. unmask if the interrupt has not been disabled and | 635 | * handler finished. unmask if the interrupt has not been disabled and |
510 | * is marked MASKED. | 636 | * is marked MASKED. |
511 | */ | 637 | */ |
512 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 638 | static void irq_finalize_oneshot(struct irq_desc *desc, |
639 | struct irqaction *action, bool force) | ||
513 | { | 640 | { |
641 | if (!(desc->istate & IRQS_ONESHOT)) | ||
642 | return; | ||
514 | again: | 643 | again: |
515 | chip_bus_lock(desc); | 644 | chip_bus_lock(desc); |
516 | raw_spin_lock_irq(&desc->lock); | 645 | raw_spin_lock_irq(&desc->lock); |
@@ -522,26 +651,42 @@ again: | |||
522 | * The thread is faster done than the hard interrupt handler | 651 | * The thread is faster done than the hard interrupt handler |
523 | * on the other CPU. If we unmask the irq line then the | 652 | * on the other CPU. If we unmask the irq line then the |
524 | * interrupt can come in again and masks the line, leaves due | 653 | * interrupt can come in again and masks the line, leaves due |
525 | * to IRQ_INPROGRESS and the irq line is masked forever. | 654 | * to IRQS_INPROGRESS and the irq line is masked forever. |
655 | * | ||
656 | * This also serializes the state of shared oneshot handlers | ||
657 | * versus "desc->threads_onehsot |= action->thread_mask;" in | ||
658 | * irq_wake_thread(). See the comment there which explains the | ||
659 | * serialization. | ||
526 | */ | 660 | */ |
527 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | 661 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
528 | raw_spin_unlock_irq(&desc->lock); | 662 | raw_spin_unlock_irq(&desc->lock); |
529 | chip_bus_sync_unlock(desc); | 663 | chip_bus_sync_unlock(desc); |
530 | cpu_relax(); | 664 | cpu_relax(); |
531 | goto again; | 665 | goto again; |
532 | } | 666 | } |
533 | 667 | ||
534 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 668 | /* |
535 | desc->status &= ~IRQ_MASKED; | 669 | * Now check again, whether the thread should run. Otherwise |
536 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | 670 | * we would clear the threads_oneshot bit of this thread which |
537 | } | 671 | * was just set. |
672 | */ | ||
673 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
674 | goto out_unlock; | ||
675 | |||
676 | desc->threads_oneshot &= ~action->thread_mask; | ||
677 | |||
678 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | ||
679 | irqd_irq_masked(&desc->irq_data)) | ||
680 | unmask_irq(desc); | ||
681 | |||
682 | out_unlock: | ||
538 | raw_spin_unlock_irq(&desc->lock); | 683 | raw_spin_unlock_irq(&desc->lock); |
539 | chip_bus_sync_unlock(desc); | 684 | chip_bus_sync_unlock(desc); |
540 | } | 685 | } |
541 | 686 | ||
542 | #ifdef CONFIG_SMP | 687 | #ifdef CONFIG_SMP |
543 | /* | 688 | /* |
544 | * Check whether we need to change the affinity of the interrupt thread. | 689 | * Check whether we need to chasnge the affinity of the interrupt thread. |
545 | */ | 690 | */ |
546 | static void | 691 | static void |
547 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 692 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
@@ -573,6 +718,32 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
573 | #endif | 718 | #endif |
574 | 719 | ||
575 | /* | 720 | /* |
721 | * Interrupts which are not explicitely requested as threaded | ||
722 | * interrupts rely on the implicit bh/preempt disable of the hard irq | ||
723 | * context. So we need to disable bh here to avoid deadlocks and other | ||
724 | * side effects. | ||
725 | */ | ||
726 | static void | ||
727 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
728 | { | ||
729 | local_bh_disable(); | ||
730 | action->thread_fn(action->irq, action->dev_id); | ||
731 | irq_finalize_oneshot(desc, action, false); | ||
732 | local_bh_enable(); | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Interrupts explicitely requested as threaded interupts want to be | ||
737 | * preemtible - many of them need to sleep and wait for slow busses to | ||
738 | * complete. | ||
739 | */ | ||
740 | static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
741 | { | ||
742 | action->thread_fn(action->irq, action->dev_id); | ||
743 | irq_finalize_oneshot(desc, action, false); | ||
744 | } | ||
745 | |||
746 | /* | ||
576 | * Interrupt handler thread | 747 | * Interrupt handler thread |
577 | */ | 748 | */ |
578 | static int irq_thread(void *data) | 749 | static int irq_thread(void *data) |
@@ -582,7 +753,14 @@ static int irq_thread(void *data) | |||
582 | }; | 753 | }; |
583 | struct irqaction *action = data; | 754 | struct irqaction *action = data; |
584 | struct irq_desc *desc = irq_to_desc(action->irq); | 755 | struct irq_desc *desc = irq_to_desc(action->irq); |
585 | int wake, oneshot = desc->status & IRQ_ONESHOT; | 756 | void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); |
757 | int wake; | ||
758 | |||
759 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | ||
760 | &action->thread_flags)) | ||
761 | handler_fn = irq_forced_thread_fn; | ||
762 | else | ||
763 | handler_fn = irq_thread_fn; | ||
586 | 764 | ||
587 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 765 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
588 | current->irqaction = action; | 766 | current->irqaction = action; |
@@ -594,23 +772,19 @@ static int irq_thread(void *data) | |||
594 | atomic_inc(&desc->threads_active); | 772 | atomic_inc(&desc->threads_active); |
595 | 773 | ||
596 | raw_spin_lock_irq(&desc->lock); | 774 | raw_spin_lock_irq(&desc->lock); |
597 | if (unlikely(desc->status & IRQ_DISABLED)) { | 775 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { |
598 | /* | 776 | /* |
599 | * CHECKME: We might need a dedicated | 777 | * CHECKME: We might need a dedicated |
600 | * IRQ_THREAD_PENDING flag here, which | 778 | * IRQ_THREAD_PENDING flag here, which |
601 | * retriggers the thread in check_irq_resend() | 779 | * retriggers the thread in check_irq_resend() |
602 | * but AFAICT IRQ_PENDING should be fine as it | 780 | * but AFAICT IRQS_PENDING should be fine as it |
603 | * retriggers the interrupt itself --- tglx | 781 | * retriggers the interrupt itself --- tglx |
604 | */ | 782 | */ |
605 | desc->status |= IRQ_PENDING; | 783 | desc->istate |= IRQS_PENDING; |
606 | raw_spin_unlock_irq(&desc->lock); | 784 | raw_spin_unlock_irq(&desc->lock); |
607 | } else { | 785 | } else { |
608 | raw_spin_unlock_irq(&desc->lock); | 786 | raw_spin_unlock_irq(&desc->lock); |
609 | 787 | handler_fn(desc, action); | |
610 | action->thread_fn(action->irq, action->dev_id); | ||
611 | |||
612 | if (oneshot) | ||
613 | irq_finalize_oneshot(action->irq, desc); | ||
614 | } | 788 | } |
615 | 789 | ||
616 | wake = atomic_dec_and_test(&desc->threads_active); | 790 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -619,6 +793,9 @@ static int irq_thread(void *data) | |||
619 | wake_up(&desc->wait_for_threads); | 793 | wake_up(&desc->wait_for_threads); |
620 | } | 794 | } |
621 | 795 | ||
796 | /* Prevent a stale desc->threads_oneshot */ | ||
797 | irq_finalize_oneshot(desc, action, true); | ||
798 | |||
622 | /* | 799 | /* |
623 | * Clear irqaction. Otherwise exit_irq_thread() would make | 800 | * Clear irqaction. Otherwise exit_irq_thread() would make |
624 | * fuzz about an active irq thread going into nirvana. | 801 | * fuzz about an active irq thread going into nirvana. |
@@ -633,6 +810,7 @@ static int irq_thread(void *data) | |||
633 | void exit_irq_thread(void) | 810 | void exit_irq_thread(void) |
634 | { | 811 | { |
635 | struct task_struct *tsk = current; | 812 | struct task_struct *tsk = current; |
813 | struct irq_desc *desc; | ||
636 | 814 | ||
637 | if (!tsk->irqaction) | 815 | if (!tsk->irqaction) |
638 | return; | 816 | return; |
@@ -641,6 +819,14 @@ void exit_irq_thread(void) | |||
641 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 819 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
642 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 820 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); |
643 | 821 | ||
822 | desc = irq_to_desc(tsk->irqaction->irq); | ||
823 | |||
824 | /* | ||
825 | * Prevent a stale desc->threads_oneshot. Must be called | ||
826 | * before setting the IRQTF_DIED flag. | ||
827 | */ | ||
828 | irq_finalize_oneshot(desc, tsk->irqaction, true); | ||
829 | |||
644 | /* | 830 | /* |
645 | * Set the THREAD DIED flag to prevent further wakeups of the | 831 | * Set the THREAD DIED flag to prevent further wakeups of the |
646 | * soon to be gone threaded handler. | 832 | * soon to be gone threaded handler. |
@@ -648,6 +834,22 @@ void exit_irq_thread(void) | |||
648 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 834 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); |
649 | } | 835 | } |
650 | 836 | ||
837 | static void irq_setup_forced_threading(struct irqaction *new) | ||
838 | { | ||
839 | if (!force_irqthreads) | ||
840 | return; | ||
841 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | ||
842 | return; | ||
843 | |||
844 | new->flags |= IRQF_ONESHOT; | ||
845 | |||
846 | if (!new->thread_fn) { | ||
847 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | ||
848 | new->thread_fn = new->handler; | ||
849 | new->handler = irq_default_primary_handler; | ||
850 | } | ||
851 | } | ||
852 | |||
651 | /* | 853 | /* |
652 | * Internal function to register an irqaction - typically used to | 854 | * Internal function to register an irqaction - typically used to |
653 | * allocate special interrupts that are part of the architecture. | 855 | * allocate special interrupts that are part of the architecture. |
@@ -657,9 +859,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
657 | { | 859 | { |
658 | struct irqaction *old, **old_ptr; | 860 | struct irqaction *old, **old_ptr; |
659 | const char *old_name = NULL; | 861 | const char *old_name = NULL; |
660 | unsigned long flags; | 862 | unsigned long flags, thread_mask = 0; |
661 | int nested, shared = 0; | 863 | int ret, nested, shared = 0; |
662 | int ret; | 864 | cpumask_var_t mask; |
663 | 865 | ||
664 | if (!desc) | 866 | if (!desc) |
665 | return -EINVAL; | 867 | return -EINVAL; |
@@ -683,15 +885,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
683 | rand_initialize_irq(irq); | 885 | rand_initialize_irq(irq); |
684 | } | 886 | } |
685 | 887 | ||
686 | /* Oneshot interrupts are not allowed with shared */ | ||
687 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | ||
688 | return -EINVAL; | ||
689 | |||
690 | /* | 888 | /* |
691 | * Check whether the interrupt nests into another interrupt | 889 | * Check whether the interrupt nests into another interrupt |
692 | * thread. | 890 | * thread. |
693 | */ | 891 | */ |
694 | nested = desc->status & IRQ_NESTED_THREAD; | 892 | nested = irq_settings_is_nested_thread(desc); |
695 | if (nested) { | 893 | if (nested) { |
696 | if (!new->thread_fn) | 894 | if (!new->thread_fn) |
697 | return -EINVAL; | 895 | return -EINVAL; |
@@ -701,6 +899,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
701 | * dummy function which warns when called. | 899 | * dummy function which warns when called. |
702 | */ | 900 | */ |
703 | new->handler = irq_nested_primary_handler; | 901 | new->handler = irq_nested_primary_handler; |
902 | } else { | ||
903 | if (irq_settings_can_thread(desc)) | ||
904 | irq_setup_forced_threading(new); | ||
704 | } | 905 | } |
705 | 906 | ||
706 | /* | 907 | /* |
@@ -724,6 +925,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
724 | new->thread = t; | 925 | new->thread = t; |
725 | } | 926 | } |
726 | 927 | ||
928 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
929 | ret = -ENOMEM; | ||
930 | goto out_thread; | ||
931 | } | ||
932 | |||
727 | /* | 933 | /* |
728 | * The following block of code has to be executed atomically | 934 | * The following block of code has to be executed atomically |
729 | */ | 935 | */ |
@@ -735,32 +941,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
735 | * Can't share interrupts unless both agree to and are | 941 | * Can't share interrupts unless both agree to and are |
736 | * the same type (level, edge, polarity). So both flag | 942 | * the same type (level, edge, polarity). So both flag |
737 | * fields must have IRQF_SHARED set and the bits which | 943 | * fields must have IRQF_SHARED set and the bits which |
738 | * set the trigger type must match. | 944 | * set the trigger type must match. Also all must |
945 | * agree on ONESHOT. | ||
739 | */ | 946 | */ |
740 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 947 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
741 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | 948 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
949 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) { | ||
742 | old_name = old->name; | 950 | old_name = old->name; |
743 | goto mismatch; | 951 | goto mismatch; |
744 | } | 952 | } |
745 | 953 | ||
746 | #if defined(CONFIG_IRQ_PER_CPU) | ||
747 | /* All handlers must agree on per-cpuness */ | 954 | /* All handlers must agree on per-cpuness */ |
748 | if ((old->flags & IRQF_PERCPU) != | 955 | if ((old->flags & IRQF_PERCPU) != |
749 | (new->flags & IRQF_PERCPU)) | 956 | (new->flags & IRQF_PERCPU)) |
750 | goto mismatch; | 957 | goto mismatch; |
751 | #endif | ||
752 | 958 | ||
753 | /* add new interrupt at end of irq queue */ | 959 | /* add new interrupt at end of irq queue */ |
754 | do { | 960 | do { |
961 | thread_mask |= old->thread_mask; | ||
755 | old_ptr = &old->next; | 962 | old_ptr = &old->next; |
756 | old = *old_ptr; | 963 | old = *old_ptr; |
757 | } while (old); | 964 | } while (old); |
758 | shared = 1; | 965 | shared = 1; |
759 | } | 966 | } |
760 | 967 | ||
761 | if (!shared) { | 968 | /* |
762 | irq_chip_set_defaults(desc->irq_data.chip); | 969 | * Setup the thread mask for this irqaction. Unlikely to have |
970 | * 32 resp 64 irqs sharing one line, but who knows. | ||
971 | */ | ||
972 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | ||
973 | ret = -EBUSY; | ||
974 | goto out_mask; | ||
975 | } | ||
976 | new->thread_mask = 1 << ffz(thread_mask); | ||
763 | 977 | ||
978 | if (!shared) { | ||
764 | init_waitqueue_head(&desc->wait_for_threads); | 979 | init_waitqueue_head(&desc->wait_for_threads); |
765 | 980 | ||
766 | /* Setup the type (level, edge polarity) if configured: */ | 981 | /* Setup the type (level, edge polarity) if configured: */ |
@@ -769,42 +984,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
769 | new->flags & IRQF_TRIGGER_MASK); | 984 | new->flags & IRQF_TRIGGER_MASK); |
770 | 985 | ||
771 | if (ret) | 986 | if (ret) |
772 | goto out_thread; | 987 | goto out_mask; |
773 | } else | 988 | } |
774 | compat_irq_chip_set_default_handler(desc); | 989 | |
775 | #if defined(CONFIG_IRQ_PER_CPU) | 990 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
776 | if (new->flags & IRQF_PERCPU) | 991 | IRQS_ONESHOT | IRQS_WAITING); |
777 | desc->status |= IRQ_PER_CPU; | 992 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
778 | #endif | ||
779 | 993 | ||
780 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | | 994 | if (new->flags & IRQF_PERCPU) { |
781 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 995 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
996 | irq_settings_set_per_cpu(desc); | ||
997 | } | ||
782 | 998 | ||
783 | if (new->flags & IRQF_ONESHOT) | 999 | if (new->flags & IRQF_ONESHOT) |
784 | desc->status |= IRQ_ONESHOT; | 1000 | desc->istate |= IRQS_ONESHOT; |
785 | 1001 | ||
786 | if (!(desc->status & IRQ_NOAUTOEN)) { | 1002 | if (irq_settings_can_autoenable(desc)) |
787 | desc->depth = 0; | 1003 | irq_startup(desc); |
788 | desc->status &= ~IRQ_DISABLED; | 1004 | else |
789 | desc->irq_data.chip->irq_startup(&desc->irq_data); | ||
790 | } else | ||
791 | /* Undo nested disables: */ | 1005 | /* Undo nested disables: */ |
792 | desc->depth = 1; | 1006 | desc->depth = 1; |
793 | 1007 | ||
794 | /* Exclude IRQ from balancing if requested */ | 1008 | /* Exclude IRQ from balancing if requested */ |
795 | if (new->flags & IRQF_NOBALANCING) | 1009 | if (new->flags & IRQF_NOBALANCING) { |
796 | desc->status |= IRQ_NO_BALANCING; | 1010 | irq_settings_set_no_balancing(desc); |
1011 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | ||
1012 | } | ||
797 | 1013 | ||
798 | /* Set default affinity mask once everything is setup */ | 1014 | /* Set default affinity mask once everything is setup */ |
799 | setup_affinity(irq, desc); | 1015 | setup_affinity(irq, desc, mask); |
800 | 1016 | ||
801 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 1017 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
802 | && (new->flags & IRQF_TRIGGER_MASK) | 1018 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
803 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | 1019 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
804 | /* hope the handler works with the actual trigger mode... */ | 1020 | |
805 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | 1021 | if (nmsk != omsk) |
806 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | 1022 | /* hope the handler works with current trigger mode */ |
807 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 1023 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", |
1024 | irq, nmsk, omsk); | ||
808 | } | 1025 | } |
809 | 1026 | ||
810 | new->irq = irq; | 1027 | new->irq = irq; |
@@ -818,8 +1035,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
818 | * Check whether we disabled the irq via the spurious handler | 1035 | * Check whether we disabled the irq via the spurious handler |
819 | * before. Reenable it and give it another chance. | 1036 | * before. Reenable it and give it another chance. |
820 | */ | 1037 | */ |
821 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 1038 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
822 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 1039 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
823 | __enable_irq(desc, irq, false); | 1040 | __enable_irq(desc, irq, false); |
824 | } | 1041 | } |
825 | 1042 | ||
@@ -835,6 +1052,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
835 | register_irq_proc(irq, desc); | 1052 | register_irq_proc(irq, desc); |
836 | new->dir = NULL; | 1053 | new->dir = NULL; |
837 | register_handler_proc(irq, new); | 1054 | register_handler_proc(irq, new); |
1055 | free_cpumask_var(mask); | ||
838 | 1056 | ||
839 | return 0; | 1057 | return 0; |
840 | 1058 | ||
@@ -849,8 +1067,11 @@ mismatch: | |||
849 | #endif | 1067 | #endif |
850 | ret = -EBUSY; | 1068 | ret = -EBUSY; |
851 | 1069 | ||
852 | out_thread: | 1070 | out_mask: |
853 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1071 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1072 | free_cpumask_var(mask); | ||
1073 | |||
1074 | out_thread: | ||
854 | if (new->thread) { | 1075 | if (new->thread) { |
855 | struct task_struct *t = new->thread; | 1076 | struct task_struct *t = new->thread; |
856 | 1077 | ||
@@ -871,9 +1092,14 @@ out_thread: | |||
871 | */ | 1092 | */ |
872 | int setup_irq(unsigned int irq, struct irqaction *act) | 1093 | int setup_irq(unsigned int irq, struct irqaction *act) |
873 | { | 1094 | { |
1095 | int retval; | ||
874 | struct irq_desc *desc = irq_to_desc(irq); | 1096 | struct irq_desc *desc = irq_to_desc(irq); |
875 | 1097 | ||
876 | return __setup_irq(irq, desc, act); | 1098 | chip_bus_lock(desc); |
1099 | retval = __setup_irq(irq, desc, act); | ||
1100 | chip_bus_sync_unlock(desc); | ||
1101 | |||
1102 | return retval; | ||
877 | } | 1103 | } |
878 | EXPORT_SYMBOL_GPL(setup_irq); | 1104 | EXPORT_SYMBOL_GPL(setup_irq); |
879 | 1105 | ||
@@ -924,13 +1150,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
924 | #endif | 1150 | #endif |
925 | 1151 | ||
926 | /* If this was the last handler, shut down the IRQ line: */ | 1152 | /* If this was the last handler, shut down the IRQ line: */ |
927 | if (!desc->action) { | 1153 | if (!desc->action) |
928 | desc->status |= IRQ_DISABLED; | 1154 | irq_shutdown(desc); |
929 | if (desc->irq_data.chip->irq_shutdown) | ||
930 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | ||
931 | else | ||
932 | desc->irq_data.chip->irq_disable(&desc->irq_data); | ||
933 | } | ||
934 | 1155 | ||
935 | #ifdef CONFIG_SMP | 1156 | #ifdef CONFIG_SMP |
936 | /* make sure affinity_hint is cleaned up */ | 1157 | /* make sure affinity_hint is cleaned up */ |
@@ -1004,6 +1225,11 @@ void free_irq(unsigned int irq, void *dev_id) | |||
1004 | if (!desc) | 1225 | if (!desc) |
1005 | return; | 1226 | return; |
1006 | 1227 | ||
1228 | #ifdef CONFIG_SMP | ||
1229 | if (WARN_ON(desc->affinity_notify)) | ||
1230 | desc->affinity_notify = NULL; | ||
1231 | #endif | ||
1232 | |||
1007 | chip_bus_lock(desc); | 1233 | chip_bus_lock(desc); |
1008 | kfree(__free_irq(irq, dev_id)); | 1234 | kfree(__free_irq(irq, dev_id)); |
1009 | chip_bus_sync_unlock(desc); | 1235 | chip_bus_sync_unlock(desc); |
@@ -1074,7 +1300,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1074 | if (!desc) | 1300 | if (!desc) |
1075 | return -EINVAL; | 1301 | return -EINVAL; |
1076 | 1302 | ||
1077 | if (desc->status & IRQ_NOREQUEST) | 1303 | if (!irq_settings_can_request(desc)) |
1078 | return -EINVAL; | 1304 | return -EINVAL; |
1079 | 1305 | ||
1080 | if (!handler) { | 1306 | if (!handler) { |
@@ -1100,7 +1326,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1100 | if (retval) | 1326 | if (retval) |
1101 | kfree(action); | 1327 | kfree(action); |
1102 | 1328 | ||
1103 | #ifdef CONFIG_DEBUG_SHIRQ | 1329 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1104 | if (!retval && (irqflags & IRQF_SHARED)) { | 1330 | if (!retval && (irqflags & IRQF_SHARED)) { |
1105 | /* | 1331 | /* |
1106 | * It's a shared IRQ -- the driver ought to be prepared for it | 1332 | * It's a shared IRQ -- the driver ought to be prepared for it |
@@ -1149,7 +1375,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |||
1149 | if (!desc) | 1375 | if (!desc) |
1150 | return -EINVAL; | 1376 | return -EINVAL; |
1151 | 1377 | ||
1152 | if (desc->status & IRQ_NESTED_THREAD) { | 1378 | if (irq_settings_is_nested_thread(desc)) { |
1153 | ret = request_threaded_irq(irq, NULL, handler, | 1379 | ret = request_threaded_irq(irq, NULL, handler, |
1154 | flags, name, dev_id); | 1380 | flags, name, dev_id); |
1155 | return !ret ? IRQC_IS_NESTED : ret; | 1381 | return !ret ? IRQC_IS_NESTED : ret; |