diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 690 |
1 files changed, 469 insertions, 221 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c3003e9d91a3..0a7840aeb0fb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -17,6 +17,17 @@ | |||
17 | 17 | ||
18 | #include "internals.h" | 18 | #include "internals.h" |
19 | 19 | ||
20 | #ifdef CONFIG_IRQ_FORCED_THREADING | ||
21 | __read_mostly bool force_irqthreads; | ||
22 | |||
23 | static int __init setup_forced_irqthreads(char *arg) | ||
24 | { | ||
25 | force_irqthreads = true; | ||
26 | return 0; | ||
27 | } | ||
28 | early_param("threadirqs", setup_forced_irqthreads); | ||
29 | #endif | ||
30 | |||
20 | /** | 31 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 32 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
22 | * @irq: interrupt number to wait for | 33 | * @irq: interrupt number to wait for |
@@ -30,7 +41,7 @@ | |||
30 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
31 | { | 42 | { |
32 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
33 | unsigned int status; | 44 | bool inprogress; |
34 | 45 | ||
35 | if (!desc) | 46 | if (!desc) |
36 | return; | 47 | return; |
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq) | |||
42 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
43 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
44 | */ | 55 | */ |
45 | while (desc->status & IRQ_INPROGRESS) | 56 | while (irqd_irq_inprogress(&desc->irq_data)) |
46 | cpu_relax(); | 57 | cpu_relax(); |
47 | 58 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
49 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
50 | status = desc->status; | 61 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
51 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
52 | 63 | ||
53 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
54 | } while (status & IRQ_INPROGRESS); | 65 | } while (inprogress); |
55 | 66 | ||
56 | /* | 67 | /* |
57 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq) | |||
73 | { | 84 | { |
74 | struct irq_desc *desc = irq_to_desc(irq); | 85 | struct irq_desc *desc = irq_to_desc(irq); |
75 | 86 | ||
76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 87 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
77 | !desc->chip->set_affinity) | 88 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
78 | return 0; | 89 | return 0; |
79 | 90 | ||
80 | return 1; | 91 | return 1; |
@@ -100,66 +111,180 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
100 | } | 111 | } |
101 | } | 112 | } |
102 | 113 | ||
114 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
115 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | ||
116 | { | ||
117 | return irqd_can_move_in_process_context(data); | ||
118 | } | ||
119 | static inline bool irq_move_pending(struct irq_data *data) | ||
120 | { | ||
121 | return irqd_is_setaffinity_pending(data); | ||
122 | } | ||
123 | static inline void | ||
124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | ||
125 | { | ||
126 | cpumask_copy(desc->pending_mask, mask); | ||
127 | } | ||
128 | static inline void | ||
129 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | ||
130 | { | ||
131 | cpumask_copy(mask, desc->pending_mask); | ||
132 | } | ||
133 | #else | ||
134 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } | ||
135 | static inline bool irq_move_pending(struct irq_data *data) { return false; } | ||
136 | static inline void | ||
137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | ||
138 | static inline void | ||
139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | ||
140 | #endif | ||
141 | |||
142 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | ||
143 | { | ||
144 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
145 | struct irq_desc *desc = irq_data_to_desc(data); | ||
146 | int ret = 0; | ||
147 | |||
148 | if (!chip || !chip->irq_set_affinity) | ||
149 | return -EINVAL; | ||
150 | |||
151 | if (irq_can_move_pcntxt(data)) { | ||
152 | ret = chip->irq_set_affinity(data, mask, false); | ||
153 | switch (ret) { | ||
154 | case IRQ_SET_MASK_OK: | ||
155 | cpumask_copy(data->affinity, mask); | ||
156 | case IRQ_SET_MASK_OK_NOCOPY: | ||
157 | irq_set_thread_affinity(desc); | ||
158 | ret = 0; | ||
159 | } | ||
160 | } else { | ||
161 | irqd_set_move_pending(data); | ||
162 | irq_copy_pending(desc, mask); | ||
163 | } | ||
164 | |||
165 | if (desc->affinity_notify) { | ||
166 | kref_get(&desc->affinity_notify->kref); | ||
167 | schedule_work(&desc->affinity_notify->work); | ||
168 | } | ||
169 | irqd_set(data, IRQD_AFFINITY_SET); | ||
170 | |||
171 | return ret; | ||
172 | } | ||
173 | |||
103 | /** | 174 | /** |
104 | * irq_set_affinity - Set the irq affinity of a given irq | 175 | * irq_set_affinity - Set the irq affinity of a given irq |
105 | * @irq: Interrupt to set affinity | 176 | * @irq: Interrupt to set affinity |
106 | * @cpumask: cpumask | 177 | * @mask: cpumask |
107 | * | 178 | * |
108 | */ | 179 | */ |
109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 180 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) |
110 | { | 181 | { |
111 | struct irq_desc *desc = irq_to_desc(irq); | 182 | struct irq_desc *desc = irq_to_desc(irq); |
112 | unsigned long flags; | 183 | unsigned long flags; |
184 | int ret; | ||
113 | 185 | ||
114 | if (!desc->chip->set_affinity) | 186 | if (!desc) |
115 | return -EINVAL; | 187 | return -EINVAL; |
116 | 188 | ||
117 | raw_spin_lock_irqsave(&desc->lock, flags); | 189 | raw_spin_lock_irqsave(&desc->lock, flags); |
118 | 190 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | |
119 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
120 | if (desc->status & IRQ_MOVE_PCNTXT) { | ||
121 | if (!desc->chip->set_affinity(irq, cpumask)) { | ||
122 | cpumask_copy(desc->affinity, cpumask); | ||
123 | irq_set_thread_affinity(desc); | ||
124 | } | ||
125 | } | ||
126 | else { | ||
127 | desc->status |= IRQ_MOVE_PENDING; | ||
128 | cpumask_copy(desc->pending_mask, cpumask); | ||
129 | } | ||
130 | #else | ||
131 | if (!desc->chip->set_affinity(irq, cpumask)) { | ||
132 | cpumask_copy(desc->affinity, cpumask); | ||
133 | irq_set_thread_affinity(desc); | ||
134 | } | ||
135 | #endif | ||
136 | desc->status |= IRQ_AFFINITY_SET; | ||
137 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 191 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
138 | return 0; | 192 | return ret; |
139 | } | 193 | } |
140 | 194 | ||
141 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 195 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
142 | { | 196 | { |
197 | unsigned long flags; | ||
198 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
199 | |||
200 | if (!desc) | ||
201 | return -EINVAL; | ||
202 | desc->affinity_hint = m; | ||
203 | irq_put_desc_unlock(desc, flags); | ||
204 | return 0; | ||
205 | } | ||
206 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | ||
207 | |||
208 | static void irq_affinity_notify(struct work_struct *work) | ||
209 | { | ||
210 | struct irq_affinity_notify *notify = | ||
211 | container_of(work, struct irq_affinity_notify, work); | ||
212 | struct irq_desc *desc = irq_to_desc(notify->irq); | ||
213 | cpumask_var_t cpumask; | ||
214 | unsigned long flags; | ||
215 | |||
216 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | ||
217 | goto out; | ||
218 | |||
219 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
220 | if (irq_move_pending(&desc->irq_data)) | ||
221 | irq_get_pending(cpumask, desc); | ||
222 | else | ||
223 | cpumask_copy(cpumask, desc->irq_data.affinity); | ||
224 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
225 | |||
226 | notify->notify(notify, cpumask); | ||
227 | |||
228 | free_cpumask_var(cpumask); | ||
229 | out: | ||
230 | kref_put(¬ify->kref, notify->release); | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | ||
235 | * @irq: Interrupt for which to enable/disable notification | ||
236 | * @notify: Context for notification, or %NULL to disable | ||
237 | * notification. Function pointers must be initialised; | ||
238 | * the other fields will be initialised by this function. | ||
239 | * | ||
240 | * Must be called in process context. Notification may only be enabled | ||
241 | * after the IRQ is allocated and must be disabled before the IRQ is | ||
242 | * freed using free_irq(). | ||
243 | */ | ||
244 | int | ||
245 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | ||
246 | { | ||
143 | struct irq_desc *desc = irq_to_desc(irq); | 247 | struct irq_desc *desc = irq_to_desc(irq); |
248 | struct irq_affinity_notify *old_notify; | ||
144 | unsigned long flags; | 249 | unsigned long flags; |
145 | 250 | ||
251 | /* The release function is promised process context */ | ||
252 | might_sleep(); | ||
253 | |||
146 | if (!desc) | 254 | if (!desc) |
147 | return -EINVAL; | 255 | return -EINVAL; |
148 | 256 | ||
257 | /* Complete initialisation of *notify */ | ||
258 | if (notify) { | ||
259 | notify->irq = irq; | ||
260 | kref_init(¬ify->kref); | ||
261 | INIT_WORK(¬ify->work, irq_affinity_notify); | ||
262 | } | ||
263 | |||
149 | raw_spin_lock_irqsave(&desc->lock, flags); | 264 | raw_spin_lock_irqsave(&desc->lock, flags); |
150 | desc->affinity_hint = m; | 265 | old_notify = desc->affinity_notify; |
266 | desc->affinity_notify = notify; | ||
151 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 267 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
152 | 268 | ||
269 | if (old_notify) | ||
270 | kref_put(&old_notify->kref, old_notify->release); | ||
271 | |||
153 | return 0; | 272 | return 0; |
154 | } | 273 | } |
155 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 274 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
156 | 275 | ||
157 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 276 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
158 | /* | 277 | /* |
159 | * Generic version of the affinity autoselector. | 278 | * Generic version of the affinity autoselector. |
160 | */ | 279 | */ |
161 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) | 280 | static int |
281 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
162 | { | 282 | { |
283 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
284 | struct cpumask *set = irq_default_affinity; | ||
285 | int ret; | ||
286 | |||
287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | ||
163 | if (!irq_can_set_affinity(irq)) | 288 | if (!irq_can_set_affinity(irq)) |
164 | return 0; | 289 | return 0; |
165 | 290 | ||
@@ -167,22 +292,27 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
167 | * Preserve an userspace affinity setup, but make sure that | 292 | * Preserve an userspace affinity setup, but make sure that |
168 | * one of the targets is online. | 293 | * one of the targets is online. |
169 | */ | 294 | */ |
170 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 295 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
171 | if (cpumask_any_and(desc->affinity, cpu_online_mask) | 296 | if (cpumask_intersects(desc->irq_data.affinity, |
172 | < nr_cpu_ids) | 297 | cpu_online_mask)) |
173 | goto set_affinity; | 298 | set = desc->irq_data.affinity; |
174 | else | 299 | else |
175 | desc->status &= ~IRQ_AFFINITY_SET; | 300 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
176 | } | 301 | } |
177 | 302 | ||
178 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); | 303 | cpumask_and(mask, cpu_online_mask, set); |
179 | set_affinity: | 304 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
180 | desc->chip->set_affinity(irq, desc->affinity); | 305 | switch (ret) { |
181 | 306 | case IRQ_SET_MASK_OK: | |
307 | cpumask_copy(desc->irq_data.affinity, mask); | ||
308 | case IRQ_SET_MASK_OK_NOCOPY: | ||
309 | irq_set_thread_affinity(desc); | ||
310 | } | ||
182 | return 0; | 311 | return 0; |
183 | } | 312 | } |
184 | #else | 313 | #else |
185 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | 314 | static inline int |
315 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | ||
186 | { | 316 | { |
187 | return irq_select_affinity(irq); | 317 | return irq_select_affinity(irq); |
188 | } | 318 | } |
@@ -191,23 +321,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d) | |||
191 | /* | 321 | /* |
192 | * Called when affinity is set via /proc/irq | 322 | * Called when affinity is set via /proc/irq |
193 | */ | 323 | */ |
194 | int irq_select_affinity_usr(unsigned int irq) | 324 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
195 | { | 325 | { |
196 | struct irq_desc *desc = irq_to_desc(irq); | 326 | struct irq_desc *desc = irq_to_desc(irq); |
197 | unsigned long flags; | 327 | unsigned long flags; |
198 | int ret; | 328 | int ret; |
199 | 329 | ||
200 | raw_spin_lock_irqsave(&desc->lock, flags); | 330 | raw_spin_lock_irqsave(&desc->lock, flags); |
201 | ret = setup_affinity(irq, desc); | 331 | ret = setup_affinity(irq, desc, mask); |
202 | if (!ret) | ||
203 | irq_set_thread_affinity(desc); | ||
204 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 332 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
205 | |||
206 | return ret; | 333 | return ret; |
207 | } | 334 | } |
208 | 335 | ||
209 | #else | 336 | #else |
210 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | 337 | static inline int |
338 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | ||
211 | { | 339 | { |
212 | return 0; | 340 | return 0; |
213 | } | 341 | } |
@@ -218,13 +346,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
218 | if (suspend) { | 346 | if (suspend) { |
219 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 347 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
220 | return; | 348 | return; |
221 | desc->status |= IRQ_SUSPENDED; | 349 | desc->istate |= IRQS_SUSPENDED; |
222 | } | 350 | } |
223 | 351 | ||
224 | if (!desc->depth++) { | 352 | if (!desc->depth++) |
225 | desc->status |= IRQ_DISABLED; | 353 | irq_disable(desc); |
226 | desc->chip->disable(irq); | 354 | } |
227 | } | 355 | |
356 | static int __disable_irq_nosync(unsigned int irq) | ||
357 | { | ||
358 | unsigned long flags; | ||
359 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
360 | |||
361 | if (!desc) | ||
362 | return -EINVAL; | ||
363 | __disable_irq(desc, irq, false); | ||
364 | irq_put_desc_busunlock(desc, flags); | ||
365 | return 0; | ||
228 | } | 366 | } |
229 | 367 | ||
230 | /** | 368 | /** |
@@ -240,17 +378,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
240 | */ | 378 | */ |
241 | void disable_irq_nosync(unsigned int irq) | 379 | void disable_irq_nosync(unsigned int irq) |
242 | { | 380 | { |
243 | struct irq_desc *desc = irq_to_desc(irq); | 381 | __disable_irq_nosync(irq); |
244 | unsigned long flags; | ||
245 | |||
246 | if (!desc) | ||
247 | return; | ||
248 | |||
249 | chip_bus_lock(irq, desc); | ||
250 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
251 | __disable_irq(desc, irq, false); | ||
252 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
253 | chip_bus_sync_unlock(irq, desc); | ||
254 | } | 382 | } |
255 | EXPORT_SYMBOL(disable_irq_nosync); | 383 | EXPORT_SYMBOL(disable_irq_nosync); |
256 | 384 | ||
@@ -268,21 +396,24 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
268 | */ | 396 | */ |
269 | void disable_irq(unsigned int irq) | 397 | void disable_irq(unsigned int irq) |
270 | { | 398 | { |
271 | struct irq_desc *desc = irq_to_desc(irq); | 399 | if (!__disable_irq_nosync(irq)) |
272 | |||
273 | if (!desc) | ||
274 | return; | ||
275 | |||
276 | disable_irq_nosync(irq); | ||
277 | if (desc->action) | ||
278 | synchronize_irq(irq); | 400 | synchronize_irq(irq); |
279 | } | 401 | } |
280 | EXPORT_SYMBOL(disable_irq); | 402 | EXPORT_SYMBOL(disable_irq); |
281 | 403 | ||
282 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 404 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
283 | { | 405 | { |
284 | if (resume) | 406 | if (resume) { |
285 | desc->status &= ~IRQ_SUSPENDED; | 407 | if (!(desc->istate & IRQS_SUSPENDED)) { |
408 | if (!desc->action) | ||
409 | return; | ||
410 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
411 | return; | ||
412 | /* Pretend that it got disabled ! */ | ||
413 | desc->depth++; | ||
414 | } | ||
415 | desc->istate &= ~IRQS_SUSPENDED; | ||
416 | } | ||
286 | 417 | ||
287 | switch (desc->depth) { | 418 | switch (desc->depth) { |
288 | case 0: | 419 | case 0: |
@@ -290,12 +421,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
290 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 421 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
291 | break; | 422 | break; |
292 | case 1: { | 423 | case 1: { |
293 | unsigned int status = desc->status & ~IRQ_DISABLED; | 424 | if (desc->istate & IRQS_SUSPENDED) |
294 | |||
295 | if (desc->status & IRQ_SUSPENDED) | ||
296 | goto err_out; | 425 | goto err_out; |
297 | /* Prevent probing on this irq: */ | 426 | /* Prevent probing on this irq: */ |
298 | desc->status = status | IRQ_NOPROBE; | 427 | irq_settings_set_noprobe(desc); |
428 | irq_enable(desc); | ||
299 | check_irq_resend(desc, irq); | 429 | check_irq_resend(desc, irq); |
300 | /* fall-through */ | 430 | /* fall-through */ |
301 | } | 431 | } |
@@ -313,21 +443,22 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
313 | * IRQ line is re-enabled. | 443 | * IRQ line is re-enabled. |
314 | * | 444 | * |
315 | * This function may be called from IRQ context only when | 445 | * This function may be called from IRQ context only when |
316 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 446 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
317 | */ | 447 | */ |
318 | void enable_irq(unsigned int irq) | 448 | void enable_irq(unsigned int irq) |
319 | { | 449 | { |
320 | struct irq_desc *desc = irq_to_desc(irq); | ||
321 | unsigned long flags; | 450 | unsigned long flags; |
451 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
322 | 452 | ||
323 | if (!desc) | 453 | if (!desc) |
324 | return; | 454 | return; |
455 | if (WARN(!desc->irq_data.chip, | ||
456 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | ||
457 | goto out; | ||
325 | 458 | ||
326 | chip_bus_lock(irq, desc); | ||
327 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
328 | __enable_irq(desc, irq, false); | 459 | __enable_irq(desc, irq, false); |
329 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 460 | out: |
330 | chip_bus_sync_unlock(irq, desc); | 461 | irq_put_desc_busunlock(desc, flags); |
331 | } | 462 | } |
332 | EXPORT_SYMBOL(enable_irq); | 463 | EXPORT_SYMBOL(enable_irq); |
333 | 464 | ||
@@ -336,14 +467,14 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
336 | struct irq_desc *desc = irq_to_desc(irq); | 467 | struct irq_desc *desc = irq_to_desc(irq); |
337 | int ret = -ENXIO; | 468 | int ret = -ENXIO; |
338 | 469 | ||
339 | if (desc->chip->set_wake) | 470 | if (desc->irq_data.chip->irq_set_wake) |
340 | ret = desc->chip->set_wake(irq, on); | 471 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); |
341 | 472 | ||
342 | return ret; | 473 | return ret; |
343 | } | 474 | } |
344 | 475 | ||
345 | /** | 476 | /** |
346 | * set_irq_wake - control irq power management wakeup | 477 | * irq_set_irq_wake - control irq power management wakeup |
347 | * @irq: interrupt to control | 478 | * @irq: interrupt to control |
348 | * @on: enable/disable power management wakeup | 479 | * @on: enable/disable power management wakeup |
349 | * | 480 | * |
@@ -354,23 +485,25 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
354 | * Wakeup mode lets this IRQ wake the system from sleep | 485 | * Wakeup mode lets this IRQ wake the system from sleep |
355 | * states like "suspend to RAM". | 486 | * states like "suspend to RAM". |
356 | */ | 487 | */ |
357 | int set_irq_wake(unsigned int irq, unsigned int on) | 488 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
358 | { | 489 | { |
359 | struct irq_desc *desc = irq_to_desc(irq); | ||
360 | unsigned long flags; | 490 | unsigned long flags; |
491 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | ||
361 | int ret = 0; | 492 | int ret = 0; |
362 | 493 | ||
494 | if (!desc) | ||
495 | return -EINVAL; | ||
496 | |||
363 | /* wakeup-capable irqs can be shared between drivers that | 497 | /* wakeup-capable irqs can be shared between drivers that |
364 | * don't need to have the same sleep mode behaviors. | 498 | * don't need to have the same sleep mode behaviors. |
365 | */ | 499 | */ |
366 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
367 | if (on) { | 500 | if (on) { |
368 | if (desc->wake_depth++ == 0) { | 501 | if (desc->wake_depth++ == 0) { |
369 | ret = set_irq_wake_real(irq, on); | 502 | ret = set_irq_wake_real(irq, on); |
370 | if (ret) | 503 | if (ret) |
371 | desc->wake_depth = 0; | 504 | desc->wake_depth = 0; |
372 | else | 505 | else |
373 | desc->status |= IRQ_WAKEUP; | 506 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
374 | } | 507 | } |
375 | } else { | 508 | } else { |
376 | if (desc->wake_depth == 0) { | 509 | if (desc->wake_depth == 0) { |
@@ -380,14 +513,13 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
380 | if (ret) | 513 | if (ret) |
381 | desc->wake_depth = 1; | 514 | desc->wake_depth = 1; |
382 | else | 515 | else |
383 | desc->status &= ~IRQ_WAKEUP; | 516 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
384 | } | 517 | } |
385 | } | 518 | } |
386 | 519 | irq_put_desc_busunlock(desc, flags); | |
387 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
388 | return ret; | 520 | return ret; |
389 | } | 521 | } |
390 | EXPORT_SYMBOL(set_irq_wake); | 522 | EXPORT_SYMBOL(irq_set_irq_wake); |
391 | 523 | ||
392 | /* | 524 | /* |
393 | * Internal function that tells the architecture code whether a | 525 | * Internal function that tells the architecture code whether a |
@@ -396,45 +528,29 @@ EXPORT_SYMBOL(set_irq_wake); | |||
396 | */ | 528 | */ |
397 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 529 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
398 | { | 530 | { |
399 | struct irq_desc *desc = irq_to_desc(irq); | ||
400 | struct irqaction *action; | ||
401 | unsigned long flags; | 531 | unsigned long flags; |
532 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | ||
533 | int canrequest = 0; | ||
402 | 534 | ||
403 | if (!desc) | 535 | if (!desc) |
404 | return 0; | 536 | return 0; |
405 | 537 | ||
406 | if (desc->status & IRQ_NOREQUEST) | 538 | if (irq_settings_can_request(desc)) { |
407 | return 0; | 539 | if (desc->action) |
408 | 540 | if (irqflags & desc->action->flags & IRQF_SHARED) | |
409 | raw_spin_lock_irqsave(&desc->lock, flags); | 541 | canrequest =1; |
410 | action = desc->action; | 542 | } |
411 | if (action) | 543 | irq_put_desc_unlock(desc, flags); |
412 | if (irqflags & action->flags & IRQF_SHARED) | 544 | return canrequest; |
413 | action = NULL; | ||
414 | |||
415 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
416 | |||
417 | return !action; | ||
418 | } | ||
419 | |||
420 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) | ||
421 | { | ||
422 | /* | ||
423 | * If the architecture still has not overriden | ||
424 | * the flow handler then zap the default. This | ||
425 | * should catch incorrect flow-type setting. | ||
426 | */ | ||
427 | if (desc->handle_irq == &handle_bad_irq) | ||
428 | desc->handle_irq = NULL; | ||
429 | } | 545 | } |
430 | 546 | ||
431 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 547 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
432 | unsigned long flags) | 548 | unsigned long flags) |
433 | { | 549 | { |
434 | int ret; | 550 | struct irq_chip *chip = desc->irq_data.chip; |
435 | struct irq_chip *chip = desc->chip; | 551 | int ret, unmask = 0; |
436 | 552 | ||
437 | if (!chip || !chip->set_type) { | 553 | if (!chip || !chip->irq_set_type) { |
438 | /* | 554 | /* |
439 | * IRQF_TRIGGER_* but the PIC does not support multiple | 555 | * IRQF_TRIGGER_* but the PIC does not support multiple |
440 | * flow-types? | 556 | * flow-types? |
@@ -444,23 +560,41 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
444 | return 0; | 560 | return 0; |
445 | } | 561 | } |
446 | 562 | ||
447 | /* caller masked out all except trigger mode flags */ | 563 | flags &= IRQ_TYPE_SENSE_MASK; |
448 | ret = chip->set_type(irq, flags); | 564 | |
449 | 565 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | |
450 | if (ret) | 566 | if (!irqd_irq_masked(&desc->irq_data)) |
451 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 567 | mask_irq(desc); |
452 | (int)flags, irq, chip->set_type); | 568 | if (!irqd_irq_disabled(&desc->irq_data)) |
453 | else { | 569 | unmask = 1; |
454 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | ||
455 | flags |= IRQ_LEVEL; | ||
456 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | ||
457 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | ||
458 | desc->status |= flags; | ||
459 | |||
460 | if (chip != desc->chip) | ||
461 | irq_chip_set_defaults(desc->chip); | ||
462 | } | 570 | } |
463 | 571 | ||
572 | /* caller masked out all except trigger mode flags */ | ||
573 | ret = chip->irq_set_type(&desc->irq_data, flags); | ||
574 | |||
575 | switch (ret) { | ||
576 | case IRQ_SET_MASK_OK: | ||
577 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | ||
578 | irqd_set(&desc->irq_data, flags); | ||
579 | |||
580 | case IRQ_SET_MASK_OK_NOCOPY: | ||
581 | flags = irqd_get_trigger_type(&desc->irq_data); | ||
582 | irq_settings_set_trigger_mask(desc, flags); | ||
583 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | ||
584 | irq_settings_clr_level(desc); | ||
585 | if (flags & IRQ_TYPE_LEVEL_MASK) { | ||
586 | irq_settings_set_level(desc); | ||
587 | irqd_set(&desc->irq_data, IRQD_LEVEL); | ||
588 | } | ||
589 | |||
590 | ret = 0; | ||
591 | break; | ||
592 | default: | ||
593 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | ||
594 | flags, irq, chip->irq_set_type); | ||
595 | } | ||
596 | if (unmask) | ||
597 | unmask_irq(desc); | ||
464 | return ret; | 598 | return ret; |
465 | } | 599 | } |
466 | 600 | ||
@@ -504,10 +638,13 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
504 | * handler finished. unmask if the interrupt has not been disabled and | 638 | * handler finished. unmask if the interrupt has not been disabled and |
505 | * is marked MASKED. | 639 | * is marked MASKED. |
506 | */ | 640 | */ |
507 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 641 | static void irq_finalize_oneshot(struct irq_desc *desc, |
642 | struct irqaction *action, bool force) | ||
508 | { | 643 | { |
644 | if (!(desc->istate & IRQS_ONESHOT)) | ||
645 | return; | ||
509 | again: | 646 | again: |
510 | chip_bus_lock(irq, desc); | 647 | chip_bus_lock(desc); |
511 | raw_spin_lock_irq(&desc->lock); | 648 | raw_spin_lock_irq(&desc->lock); |
512 | 649 | ||
513 | /* | 650 | /* |
@@ -517,26 +654,42 @@ again: | |||
517 | * The thread is faster done than the hard interrupt handler | 654 | * The thread is faster done than the hard interrupt handler |
518 | * on the other CPU. If we unmask the irq line then the | 655 | * on the other CPU. If we unmask the irq line then the |
519 | * interrupt can come in again and masks the line, leaves due | 656 | * interrupt can come in again and masks the line, leaves due |
520 | * to IRQ_INPROGRESS and the irq line is masked forever. | 657 | * to IRQS_INPROGRESS and the irq line is masked forever. |
658 | * | ||
659 | * This also serializes the state of shared oneshot handlers | ||
660 | * versus "desc->threads_onehsot |= action->thread_mask;" in | ||
661 | * irq_wake_thread(). See the comment there which explains the | ||
662 | * serialization. | ||
521 | */ | 663 | */ |
522 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | 664 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
523 | raw_spin_unlock_irq(&desc->lock); | 665 | raw_spin_unlock_irq(&desc->lock); |
524 | chip_bus_sync_unlock(irq, desc); | 666 | chip_bus_sync_unlock(desc); |
525 | cpu_relax(); | 667 | cpu_relax(); |
526 | goto again; | 668 | goto again; |
527 | } | 669 | } |
528 | 670 | ||
529 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 671 | /* |
530 | desc->status &= ~IRQ_MASKED; | 672 | * Now check again, whether the thread should run. Otherwise |
531 | desc->chip->unmask(irq); | 673 | * we would clear the threads_oneshot bit of this thread which |
532 | } | 674 | * was just set. |
675 | */ | ||
676 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
677 | goto out_unlock; | ||
678 | |||
679 | desc->threads_oneshot &= ~action->thread_mask; | ||
680 | |||
681 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | ||
682 | irqd_irq_masked(&desc->irq_data)) | ||
683 | unmask_irq(desc); | ||
684 | |||
685 | out_unlock: | ||
533 | raw_spin_unlock_irq(&desc->lock); | 686 | raw_spin_unlock_irq(&desc->lock); |
534 | chip_bus_sync_unlock(irq, desc); | 687 | chip_bus_sync_unlock(desc); |
535 | } | 688 | } |
536 | 689 | ||
537 | #ifdef CONFIG_SMP | 690 | #ifdef CONFIG_SMP |
538 | /* | 691 | /* |
539 | * Check whether we need to change the affinity of the interrupt thread. | 692 | * Check whether we need to chasnge the affinity of the interrupt thread. |
540 | */ | 693 | */ |
541 | static void | 694 | static void |
542 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 695 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
@@ -556,7 +709,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
556 | } | 709 | } |
557 | 710 | ||
558 | raw_spin_lock_irq(&desc->lock); | 711 | raw_spin_lock_irq(&desc->lock); |
559 | cpumask_copy(mask, desc->affinity); | 712 | cpumask_copy(mask, desc->irq_data.affinity); |
560 | raw_spin_unlock_irq(&desc->lock); | 713 | raw_spin_unlock_irq(&desc->lock); |
561 | 714 | ||
562 | set_cpus_allowed_ptr(current, mask); | 715 | set_cpus_allowed_ptr(current, mask); |
@@ -568,14 +721,57 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
568 | #endif | 721 | #endif |
569 | 722 | ||
570 | /* | 723 | /* |
724 | * Interrupts which are not explicitely requested as threaded | ||
725 | * interrupts rely on the implicit bh/preempt disable of the hard irq | ||
726 | * context. So we need to disable bh here to avoid deadlocks and other | ||
727 | * side effects. | ||
728 | */ | ||
729 | static irqreturn_t | ||
730 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | ||
731 | { | ||
732 | irqreturn_t ret; | ||
733 | |||
734 | local_bh_disable(); | ||
735 | ret = action->thread_fn(action->irq, action->dev_id); | ||
736 | irq_finalize_oneshot(desc, action, false); | ||
737 | local_bh_enable(); | ||
738 | return ret; | ||
739 | } | ||
740 | |||
741 | /* | ||
742 | * Interrupts explicitely requested as threaded interupts want to be | ||
743 | * preemtible - many of them need to sleep and wait for slow busses to | ||
744 | * complete. | ||
745 | */ | ||
746 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, | ||
747 | struct irqaction *action) | ||
748 | { | ||
749 | irqreturn_t ret; | ||
750 | |||
751 | ret = action->thread_fn(action->irq, action->dev_id); | ||
752 | irq_finalize_oneshot(desc, action, false); | ||
753 | return ret; | ||
754 | } | ||
755 | |||
756 | /* | ||
571 | * Interrupt handler thread | 757 | * Interrupt handler thread |
572 | */ | 758 | */ |
573 | static int irq_thread(void *data) | 759 | static int irq_thread(void *data) |
574 | { | 760 | { |
575 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | 761 | static const struct sched_param param = { |
762 | .sched_priority = MAX_USER_RT_PRIO/2, | ||
763 | }; | ||
576 | struct irqaction *action = data; | 764 | struct irqaction *action = data; |
577 | struct irq_desc *desc = irq_to_desc(action->irq); | 765 | struct irq_desc *desc = irq_to_desc(action->irq); |
578 | int wake, oneshot = desc->status & IRQ_ONESHOT; | 766 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
767 | struct irqaction *action); | ||
768 | int wake; | ||
769 | |||
770 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | ||
771 | &action->thread_flags)) | ||
772 | handler_fn = irq_forced_thread_fn; | ||
773 | else | ||
774 | handler_fn = irq_thread_fn; | ||
579 | 775 | ||
580 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 776 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
581 | current->irqaction = action; | 777 | current->irqaction = action; |
@@ -587,23 +783,23 @@ static int irq_thread(void *data) | |||
587 | atomic_inc(&desc->threads_active); | 783 | atomic_inc(&desc->threads_active); |
588 | 784 | ||
589 | raw_spin_lock_irq(&desc->lock); | 785 | raw_spin_lock_irq(&desc->lock); |
590 | if (unlikely(desc->status & IRQ_DISABLED)) { | 786 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { |
591 | /* | 787 | /* |
592 | * CHECKME: We might need a dedicated | 788 | * CHECKME: We might need a dedicated |
593 | * IRQ_THREAD_PENDING flag here, which | 789 | * IRQ_THREAD_PENDING flag here, which |
594 | * retriggers the thread in check_irq_resend() | 790 | * retriggers the thread in check_irq_resend() |
595 | * but AFAICT IRQ_PENDING should be fine as it | 791 | * but AFAICT IRQS_PENDING should be fine as it |
596 | * retriggers the interrupt itself --- tglx | 792 | * retriggers the interrupt itself --- tglx |
597 | */ | 793 | */ |
598 | desc->status |= IRQ_PENDING; | 794 | desc->istate |= IRQS_PENDING; |
599 | raw_spin_unlock_irq(&desc->lock); | 795 | raw_spin_unlock_irq(&desc->lock); |
600 | } else { | 796 | } else { |
601 | raw_spin_unlock_irq(&desc->lock); | 797 | irqreturn_t action_ret; |
602 | |||
603 | action->thread_fn(action->irq, action->dev_id); | ||
604 | 798 | ||
605 | if (oneshot) | 799 | raw_spin_unlock_irq(&desc->lock); |
606 | irq_finalize_oneshot(action->irq, desc); | 800 | action_ret = handler_fn(desc, action); |
801 | if (!noirqdebug) | ||
802 | note_interrupt(action->irq, desc, action_ret); | ||
607 | } | 803 | } |
608 | 804 | ||
609 | wake = atomic_dec_and_test(&desc->threads_active); | 805 | wake = atomic_dec_and_test(&desc->threads_active); |
@@ -612,6 +808,9 @@ static int irq_thread(void *data) | |||
612 | wake_up(&desc->wait_for_threads); | 808 | wake_up(&desc->wait_for_threads); |
613 | } | 809 | } |
614 | 810 | ||
811 | /* Prevent a stale desc->threads_oneshot */ | ||
812 | irq_finalize_oneshot(desc, action, true); | ||
813 | |||
615 | /* | 814 | /* |
616 | * Clear irqaction. Otherwise exit_irq_thread() would make | 815 | * Clear irqaction. Otherwise exit_irq_thread() would make |
617 | * fuzz about an active irq thread going into nirvana. | 816 | * fuzz about an active irq thread going into nirvana. |
@@ -626,6 +825,7 @@ static int irq_thread(void *data) | |||
626 | void exit_irq_thread(void) | 825 | void exit_irq_thread(void) |
627 | { | 826 | { |
628 | struct task_struct *tsk = current; | 827 | struct task_struct *tsk = current; |
828 | struct irq_desc *desc; | ||
629 | 829 | ||
630 | if (!tsk->irqaction) | 830 | if (!tsk->irqaction) |
631 | return; | 831 | return; |
@@ -634,6 +834,14 @@ void exit_irq_thread(void) | |||
634 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 834 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
635 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 835 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); |
636 | 836 | ||
837 | desc = irq_to_desc(tsk->irqaction->irq); | ||
838 | |||
839 | /* | ||
840 | * Prevent a stale desc->threads_oneshot. Must be called | ||
841 | * before setting the IRQTF_DIED flag. | ||
842 | */ | ||
843 | irq_finalize_oneshot(desc, tsk->irqaction, true); | ||
844 | |||
637 | /* | 845 | /* |
638 | * Set the THREAD DIED flag to prevent further wakeups of the | 846 | * Set the THREAD DIED flag to prevent further wakeups of the |
639 | * soon to be gone threaded handler. | 847 | * soon to be gone threaded handler. |
@@ -641,6 +849,22 @@ void exit_irq_thread(void) | |||
641 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 849 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); |
642 | } | 850 | } |
643 | 851 | ||
852 | static void irq_setup_forced_threading(struct irqaction *new) | ||
853 | { | ||
854 | if (!force_irqthreads) | ||
855 | return; | ||
856 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | ||
857 | return; | ||
858 | |||
859 | new->flags |= IRQF_ONESHOT; | ||
860 | |||
861 | if (!new->thread_fn) { | ||
862 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | ||
863 | new->thread_fn = new->handler; | ||
864 | new->handler = irq_default_primary_handler; | ||
865 | } | ||
866 | } | ||
867 | |||
644 | /* | 868 | /* |
645 | * Internal function to register an irqaction - typically used to | 869 | * Internal function to register an irqaction - typically used to |
646 | * allocate special interrupts that are part of the architecture. | 870 | * allocate special interrupts that are part of the architecture. |
@@ -650,14 +874,14 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
650 | { | 874 | { |
651 | struct irqaction *old, **old_ptr; | 875 | struct irqaction *old, **old_ptr; |
652 | const char *old_name = NULL; | 876 | const char *old_name = NULL; |
653 | unsigned long flags; | 877 | unsigned long flags, thread_mask = 0; |
654 | int nested, shared = 0; | 878 | int ret, nested, shared = 0; |
655 | int ret; | 879 | cpumask_var_t mask; |
656 | 880 | ||
657 | if (!desc) | 881 | if (!desc) |
658 | return -EINVAL; | 882 | return -EINVAL; |
659 | 883 | ||
660 | if (desc->chip == &no_irq_chip) | 884 | if (desc->irq_data.chip == &no_irq_chip) |
661 | return -ENOSYS; | 885 | return -ENOSYS; |
662 | /* | 886 | /* |
663 | * Some drivers like serial.c use request_irq() heavily, | 887 | * Some drivers like serial.c use request_irq() heavily, |
@@ -676,15 +900,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
676 | rand_initialize_irq(irq); | 900 | rand_initialize_irq(irq); |
677 | } | 901 | } |
678 | 902 | ||
679 | /* Oneshot interrupts are not allowed with shared */ | ||
680 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | ||
681 | return -EINVAL; | ||
682 | |||
683 | /* | 903 | /* |
684 | * Check whether the interrupt nests into another interrupt | 904 | * Check whether the interrupt nests into another interrupt |
685 | * thread. | 905 | * thread. |
686 | */ | 906 | */ |
687 | nested = desc->status & IRQ_NESTED_THREAD; | 907 | nested = irq_settings_is_nested_thread(desc); |
688 | if (nested) { | 908 | if (nested) { |
689 | if (!new->thread_fn) | 909 | if (!new->thread_fn) |
690 | return -EINVAL; | 910 | return -EINVAL; |
@@ -694,6 +914,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
694 | * dummy function which warns when called. | 914 | * dummy function which warns when called. |
695 | */ | 915 | */ |
696 | new->handler = irq_nested_primary_handler; | 916 | new->handler = irq_nested_primary_handler; |
917 | } else { | ||
918 | if (irq_settings_can_thread(desc)) | ||
919 | irq_setup_forced_threading(new); | ||
697 | } | 920 | } |
698 | 921 | ||
699 | /* | 922 | /* |
@@ -717,6 +940,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
717 | new->thread = t; | 940 | new->thread = t; |
718 | } | 941 | } |
719 | 942 | ||
943 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
944 | ret = -ENOMEM; | ||
945 | goto out_thread; | ||
946 | } | ||
947 | |||
720 | /* | 948 | /* |
721 | * The following block of code has to be executed atomically | 949 | * The following block of code has to be executed atomically |
722 | */ | 950 | */ |
@@ -728,32 +956,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
728 | * Can't share interrupts unless both agree to and are | 956 | * Can't share interrupts unless both agree to and are |
729 | * the same type (level, edge, polarity). So both flag | 957 | * the same type (level, edge, polarity). So both flag |
730 | * fields must have IRQF_SHARED set and the bits which | 958 | * fields must have IRQF_SHARED set and the bits which |
731 | * set the trigger type must match. | 959 | * set the trigger type must match. Also all must |
960 | * agree on ONESHOT. | ||
732 | */ | 961 | */ |
733 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 962 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
734 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | 963 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
964 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) { | ||
735 | old_name = old->name; | 965 | old_name = old->name; |
736 | goto mismatch; | 966 | goto mismatch; |
737 | } | 967 | } |
738 | 968 | ||
739 | #if defined(CONFIG_IRQ_PER_CPU) | ||
740 | /* All handlers must agree on per-cpuness */ | 969 | /* All handlers must agree on per-cpuness */ |
741 | if ((old->flags & IRQF_PERCPU) != | 970 | if ((old->flags & IRQF_PERCPU) != |
742 | (new->flags & IRQF_PERCPU)) | 971 | (new->flags & IRQF_PERCPU)) |
743 | goto mismatch; | 972 | goto mismatch; |
744 | #endif | ||
745 | 973 | ||
746 | /* add new interrupt at end of irq queue */ | 974 | /* add new interrupt at end of irq queue */ |
747 | do { | 975 | do { |
976 | thread_mask |= old->thread_mask; | ||
748 | old_ptr = &old->next; | 977 | old_ptr = &old->next; |
749 | old = *old_ptr; | 978 | old = *old_ptr; |
750 | } while (old); | 979 | } while (old); |
751 | shared = 1; | 980 | shared = 1; |
752 | } | 981 | } |
753 | 982 | ||
754 | if (!shared) { | 983 | /* |
755 | irq_chip_set_defaults(desc->chip); | 984 | * Setup the thread mask for this irqaction. Unlikely to have |
985 | * 32 resp 64 irqs sharing one line, but who knows. | ||
986 | */ | ||
987 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | ||
988 | ret = -EBUSY; | ||
989 | goto out_mask; | ||
990 | } | ||
991 | new->thread_mask = 1 << ffz(thread_mask); | ||
756 | 992 | ||
993 | if (!shared) { | ||
757 | init_waitqueue_head(&desc->wait_for_threads); | 994 | init_waitqueue_head(&desc->wait_for_threads); |
758 | 995 | ||
759 | /* Setup the type (level, edge polarity) if configured: */ | 996 | /* Setup the type (level, edge polarity) if configured: */ |
@@ -762,42 +999,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
762 | new->flags & IRQF_TRIGGER_MASK); | 999 | new->flags & IRQF_TRIGGER_MASK); |
763 | 1000 | ||
764 | if (ret) | 1001 | if (ret) |
765 | goto out_thread; | 1002 | goto out_mask; |
766 | } else | 1003 | } |
767 | compat_irq_chip_set_default_handler(desc); | 1004 | |
768 | #if defined(CONFIG_IRQ_PER_CPU) | 1005 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
769 | if (new->flags & IRQF_PERCPU) | 1006 | IRQS_ONESHOT | IRQS_WAITING); |
770 | desc->status |= IRQ_PER_CPU; | 1007 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
771 | #endif | ||
772 | 1008 | ||
773 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | | 1009 | if (new->flags & IRQF_PERCPU) { |
774 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 1010 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
1011 | irq_settings_set_per_cpu(desc); | ||
1012 | } | ||
775 | 1013 | ||
776 | if (new->flags & IRQF_ONESHOT) | 1014 | if (new->flags & IRQF_ONESHOT) |
777 | desc->status |= IRQ_ONESHOT; | 1015 | desc->istate |= IRQS_ONESHOT; |
778 | 1016 | ||
779 | if (!(desc->status & IRQ_NOAUTOEN)) { | 1017 | if (irq_settings_can_autoenable(desc)) |
780 | desc->depth = 0; | 1018 | irq_startup(desc); |
781 | desc->status &= ~IRQ_DISABLED; | 1019 | else |
782 | desc->chip->startup(irq); | ||
783 | } else | ||
784 | /* Undo nested disables: */ | 1020 | /* Undo nested disables: */ |
785 | desc->depth = 1; | 1021 | desc->depth = 1; |
786 | 1022 | ||
787 | /* Exclude IRQ from balancing if requested */ | 1023 | /* Exclude IRQ from balancing if requested */ |
788 | if (new->flags & IRQF_NOBALANCING) | 1024 | if (new->flags & IRQF_NOBALANCING) { |
789 | desc->status |= IRQ_NO_BALANCING; | 1025 | irq_settings_set_no_balancing(desc); |
1026 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | ||
1027 | } | ||
790 | 1028 | ||
791 | /* Set default affinity mask once everything is setup */ | 1029 | /* Set default affinity mask once everything is setup */ |
792 | setup_affinity(irq, desc); | 1030 | setup_affinity(irq, desc, mask); |
793 | 1031 | ||
794 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 1032 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
795 | && (new->flags & IRQF_TRIGGER_MASK) | 1033 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
796 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | 1034 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
797 | /* hope the handler works with the actual trigger mode... */ | 1035 | |
798 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | 1036 | if (nmsk != omsk) |
799 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | 1037 | /* hope the handler works with current trigger mode */ |
800 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 1038 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", |
1039 | irq, nmsk, omsk); | ||
801 | } | 1040 | } |
802 | 1041 | ||
803 | new->irq = irq; | 1042 | new->irq = irq; |
@@ -811,8 +1050,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
811 | * Check whether we disabled the irq via the spurious handler | 1050 | * Check whether we disabled the irq via the spurious handler |
812 | * before. Reenable it and give it another chance. | 1051 | * before. Reenable it and give it another chance. |
813 | */ | 1052 | */ |
814 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 1053 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
815 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 1054 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
816 | __enable_irq(desc, irq, false); | 1055 | __enable_irq(desc, irq, false); |
817 | } | 1056 | } |
818 | 1057 | ||
@@ -828,6 +1067,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
828 | register_irq_proc(irq, desc); | 1067 | register_irq_proc(irq, desc); |
829 | new->dir = NULL; | 1068 | new->dir = NULL; |
830 | register_handler_proc(irq, new); | 1069 | register_handler_proc(irq, new); |
1070 | free_cpumask_var(mask); | ||
831 | 1071 | ||
832 | return 0; | 1072 | return 0; |
833 | 1073 | ||
@@ -842,8 +1082,11 @@ mismatch: | |||
842 | #endif | 1082 | #endif |
843 | ret = -EBUSY; | 1083 | ret = -EBUSY; |
844 | 1084 | ||
845 | out_thread: | 1085 | out_mask: |
846 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1086 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1087 | free_cpumask_var(mask); | ||
1088 | |||
1089 | out_thread: | ||
847 | if (new->thread) { | 1090 | if (new->thread) { |
848 | struct task_struct *t = new->thread; | 1091 | struct task_struct *t = new->thread; |
849 | 1092 | ||
@@ -864,9 +1107,14 @@ out_thread: | |||
864 | */ | 1107 | */ |
865 | int setup_irq(unsigned int irq, struct irqaction *act) | 1108 | int setup_irq(unsigned int irq, struct irqaction *act) |
866 | { | 1109 | { |
1110 | int retval; | ||
867 | struct irq_desc *desc = irq_to_desc(irq); | 1111 | struct irq_desc *desc = irq_to_desc(irq); |
868 | 1112 | ||
869 | return __setup_irq(irq, desc, act); | 1113 | chip_bus_lock(desc); |
1114 | retval = __setup_irq(irq, desc, act); | ||
1115 | chip_bus_sync_unlock(desc); | ||
1116 | |||
1117 | return retval; | ||
870 | } | 1118 | } |
871 | EXPORT_SYMBOL_GPL(setup_irq); | 1119 | EXPORT_SYMBOL_GPL(setup_irq); |
872 | 1120 | ||
@@ -912,18 +1160,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
912 | 1160 | ||
913 | /* Currently used only by UML, might disappear one day: */ | 1161 | /* Currently used only by UML, might disappear one day: */ |
914 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 1162 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
915 | if (desc->chip->release) | 1163 | if (desc->irq_data.chip->release) |
916 | desc->chip->release(irq, dev_id); | 1164 | desc->irq_data.chip->release(irq, dev_id); |
917 | #endif | 1165 | #endif |
918 | 1166 | ||
919 | /* If this was the last handler, shut down the IRQ line: */ | 1167 | /* If this was the last handler, shut down the IRQ line: */ |
920 | if (!desc->action) { | 1168 | if (!desc->action) |
921 | desc->status |= IRQ_DISABLED; | 1169 | irq_shutdown(desc); |
922 | if (desc->chip->shutdown) | ||
923 | desc->chip->shutdown(irq); | ||
924 | else | ||
925 | desc->chip->disable(irq); | ||
926 | } | ||
927 | 1170 | ||
928 | #ifdef CONFIG_SMP | 1171 | #ifdef CONFIG_SMP |
929 | /* make sure affinity_hint is cleaned up */ | 1172 | /* make sure affinity_hint is cleaned up */ |
@@ -997,9 +1240,14 @@ void free_irq(unsigned int irq, void *dev_id) | |||
997 | if (!desc) | 1240 | if (!desc) |
998 | return; | 1241 | return; |
999 | 1242 | ||
1000 | chip_bus_lock(irq, desc); | 1243 | #ifdef CONFIG_SMP |
1244 | if (WARN_ON(desc->affinity_notify)) | ||
1245 | desc->affinity_notify = NULL; | ||
1246 | #endif | ||
1247 | |||
1248 | chip_bus_lock(desc); | ||
1001 | kfree(__free_irq(irq, dev_id)); | 1249 | kfree(__free_irq(irq, dev_id)); |
1002 | chip_bus_sync_unlock(irq, desc); | 1250 | chip_bus_sync_unlock(desc); |
1003 | } | 1251 | } |
1004 | EXPORT_SYMBOL(free_irq); | 1252 | EXPORT_SYMBOL(free_irq); |
1005 | 1253 | ||
@@ -1067,7 +1315,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1067 | if (!desc) | 1315 | if (!desc) |
1068 | return -EINVAL; | 1316 | return -EINVAL; |
1069 | 1317 | ||
1070 | if (desc->status & IRQ_NOREQUEST) | 1318 | if (!irq_settings_can_request(desc)) |
1071 | return -EINVAL; | 1319 | return -EINVAL; |
1072 | 1320 | ||
1073 | if (!handler) { | 1321 | if (!handler) { |
@@ -1086,14 +1334,14 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1086 | action->name = devname; | 1334 | action->name = devname; |
1087 | action->dev_id = dev_id; | 1335 | action->dev_id = dev_id; |
1088 | 1336 | ||
1089 | chip_bus_lock(irq, desc); | 1337 | chip_bus_lock(desc); |
1090 | retval = __setup_irq(irq, desc, action); | 1338 | retval = __setup_irq(irq, desc, action); |
1091 | chip_bus_sync_unlock(irq, desc); | 1339 | chip_bus_sync_unlock(desc); |
1092 | 1340 | ||
1093 | if (retval) | 1341 | if (retval) |
1094 | kfree(action); | 1342 | kfree(action); |
1095 | 1343 | ||
1096 | #ifdef CONFIG_DEBUG_SHIRQ | 1344 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1097 | if (!retval && (irqflags & IRQF_SHARED)) { | 1345 | if (!retval && (irqflags & IRQF_SHARED)) { |
1098 | /* | 1346 | /* |
1099 | * It's a shared IRQ -- the driver ought to be prepared for it | 1347 | * It's a shared IRQ -- the driver ought to be prepared for it |
@@ -1142,7 +1390,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |||
1142 | if (!desc) | 1390 | if (!desc) |
1143 | return -EINVAL; | 1391 | return -EINVAL; |
1144 | 1392 | ||
1145 | if (desc->status & IRQ_NESTED_THREAD) { | 1393 | if (irq_settings_is_nested_thread(desc)) { |
1146 | ret = request_threaded_irq(irq, NULL, handler, | 1394 | ret = request_threaded_irq(irq, NULL, handler, |
1147 | flags, name, dev_id); | 1395 | flags, name, dev_id); |
1148 | return !ret ? IRQC_IS_NESTED : ret; | 1396 | return !ret ? IRQC_IS_NESTED : ret; |