diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /kernel/irq | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/autoprobe.c | 20 | ||||
-rw-r--r-- | kernel/irq/chip.c | 181 | ||||
-rw-r--r-- | kernel/irq/devres.c | 4 | ||||
-rw-r--r-- | kernel/irq/handle.c | 80 | ||||
-rw-r--r-- | kernel/irq/internals.h | 8 | ||||
-rw-r--r-- | kernel/irq/manage.c | 84 | ||||
-rw-r--r-- | kernel/irq/migration.c | 2 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 13 | ||||
-rw-r--r-- | kernel/irq/pm.c | 8 | ||||
-rw-r--r-- | kernel/irq/proc.c | 45 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 30 |
11 files changed, 273 insertions, 202 deletions
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 1de9700f416e..2295a31ef110 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void) | |||
45 | * flush such a longstanding irq before considering it as spurious. | 45 | * flush such a longstanding irq before considering it as spurious. |
46 | */ | 46 | */ |
47 | for_each_irq_desc_reverse(i, desc) { | 47 | for_each_irq_desc_reverse(i, desc) { |
48 | spin_lock_irq(&desc->lock); | 48 | raw_spin_lock_irq(&desc->lock); |
49 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 49 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
50 | /* | 50 | /* |
51 | * An old-style architecture might still have | 51 | * An old-style architecture might still have |
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void) | |||
61 | desc->chip->set_type(i, IRQ_TYPE_PROBE); | 61 | desc->chip->set_type(i, IRQ_TYPE_PROBE); |
62 | desc->chip->startup(i); | 62 | desc->chip->startup(i); |
63 | } | 63 | } |
64 | spin_unlock_irq(&desc->lock); | 64 | raw_spin_unlock_irq(&desc->lock); |
65 | } | 65 | } |
66 | 66 | ||
67 | /* Wait for longstanding interrupts to trigger. */ | 67 | /* Wait for longstanding interrupts to trigger. */ |
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void) | |||
73 | * happened in the previous stage, it may have masked itself) | 73 | * happened in the previous stage, it may have masked itself) |
74 | */ | 74 | */ |
75 | for_each_irq_desc_reverse(i, desc) { | 75 | for_each_irq_desc_reverse(i, desc) { |
76 | spin_lock_irq(&desc->lock); | 76 | raw_spin_lock_irq(&desc->lock); |
77 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 77 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
78 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; | 78 | desc->status |= IRQ_AUTODETECT | IRQ_WAITING; |
79 | if (desc->chip->startup(i)) | 79 | if (desc->chip->startup(i)) |
80 | desc->status |= IRQ_PENDING; | 80 | desc->status |= IRQ_PENDING; |
81 | } | 81 | } |
82 | spin_unlock_irq(&desc->lock); | 82 | raw_spin_unlock_irq(&desc->lock); |
83 | } | 83 | } |
84 | 84 | ||
85 | /* | 85 | /* |
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void) | |||
91 | * Now filter out any obviously spurious interrupts | 91 | * Now filter out any obviously spurious interrupts |
92 | */ | 92 | */ |
93 | for_each_irq_desc(i, desc) { | 93 | for_each_irq_desc(i, desc) { |
94 | spin_lock_irq(&desc->lock); | 94 | raw_spin_lock_irq(&desc->lock); |
95 | status = desc->status; | 95 | status = desc->status; |
96 | 96 | ||
97 | if (status & IRQ_AUTODETECT) { | 97 | if (status & IRQ_AUTODETECT) { |
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void) | |||
103 | if (i < 32) | 103 | if (i < 32) |
104 | mask |= 1 << i; | 104 | mask |= 1 << i; |
105 | } | 105 | } |
106 | spin_unlock_irq(&desc->lock); | 106 | raw_spin_unlock_irq(&desc->lock); |
107 | } | 107 | } |
108 | 108 | ||
109 | return mask; | 109 | return mask; |
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val) | |||
129 | int i; | 129 | int i; |
130 | 130 | ||
131 | for_each_irq_desc(i, desc) { | 131 | for_each_irq_desc(i, desc) { |
132 | spin_lock_irq(&desc->lock); | 132 | raw_spin_lock_irq(&desc->lock); |
133 | status = desc->status; | 133 | status = desc->status; |
134 | 134 | ||
135 | if (status & IRQ_AUTODETECT) { | 135 | if (status & IRQ_AUTODETECT) { |
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val) | |||
139 | desc->status = status & ~IRQ_AUTODETECT; | 139 | desc->status = status & ~IRQ_AUTODETECT; |
140 | desc->chip->shutdown(i); | 140 | desc->chip->shutdown(i); |
141 | } | 141 | } |
142 | spin_unlock_irq(&desc->lock); | 142 | raw_spin_unlock_irq(&desc->lock); |
143 | } | 143 | } |
144 | mutex_unlock(&probing_active); | 144 | mutex_unlock(&probing_active); |
145 | 145 | ||
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val) | |||
171 | unsigned int status; | 171 | unsigned int status; |
172 | 172 | ||
173 | for_each_irq_desc(i, desc) { | 173 | for_each_irq_desc(i, desc) { |
174 | spin_lock_irq(&desc->lock); | 174 | raw_spin_lock_irq(&desc->lock); |
175 | status = desc->status; | 175 | status = desc->status; |
176 | 176 | ||
177 | if (status & IRQ_AUTODETECT) { | 177 | if (status & IRQ_AUTODETECT) { |
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val) | |||
183 | desc->status = status & ~IRQ_AUTODETECT; | 183 | desc->status = status & ~IRQ_AUTODETECT; |
184 | desc->chip->shutdown(i); | 184 | desc->chip->shutdown(i); |
185 | } | 185 | } |
186 | spin_unlock_irq(&desc->lock); | 186 | raw_spin_unlock_irq(&desc->lock); |
187 | } | 187 | } |
188 | mutex_unlock(&probing_active); | 188 | mutex_unlock(&probing_active); |
189 | 189 | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c1660194d115..b7091d5ca2f8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -18,11 +18,7 @@ | |||
18 | 18 | ||
19 | #include "internals.h" | 19 | #include "internals.h" |
20 | 20 | ||
21 | /** | 21 | static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) |
22 | * dynamic_irq_init - initialize a dynamically allocated irq | ||
23 | * @irq: irq number to initialize | ||
24 | */ | ||
25 | void dynamic_irq_init(unsigned int irq) | ||
26 | { | 22 | { |
27 | struct irq_desc *desc; | 23 | struct irq_desc *desc; |
28 | unsigned long flags; | 24 | unsigned long flags; |
@@ -34,14 +30,15 @@ void dynamic_irq_init(unsigned int irq) | |||
34 | } | 30 | } |
35 | 31 | ||
36 | /* Ensure we don't have left over values from a previous use of this irq */ | 32 | /* Ensure we don't have left over values from a previous use of this irq */ |
37 | spin_lock_irqsave(&desc->lock, flags); | 33 | raw_spin_lock_irqsave(&desc->lock, flags); |
38 | desc->status = IRQ_DISABLED; | 34 | desc->status = IRQ_DISABLED; |
39 | desc->chip = &no_irq_chip; | 35 | desc->chip = &no_irq_chip; |
40 | desc->handle_irq = handle_bad_irq; | 36 | desc->handle_irq = handle_bad_irq; |
41 | desc->depth = 1; | 37 | desc->depth = 1; |
42 | desc->msi_desc = NULL; | 38 | desc->msi_desc = NULL; |
43 | desc->handler_data = NULL; | 39 | desc->handler_data = NULL; |
44 | desc->chip_data = NULL; | 40 | if (!keep_chip_data) |
41 | desc->chip_data = NULL; | ||
45 | desc->action = NULL; | 42 | desc->action = NULL; |
46 | desc->irq_count = 0; | 43 | desc->irq_count = 0; |
47 | desc->irqs_unhandled = 0; | 44 | desc->irqs_unhandled = 0; |
@@ -51,14 +48,30 @@ void dynamic_irq_init(unsigned int irq) | |||
51 | cpumask_clear(desc->pending_mask); | 48 | cpumask_clear(desc->pending_mask); |
52 | #endif | 49 | #endif |
53 | #endif | 50 | #endif |
54 | spin_unlock_irqrestore(&desc->lock, flags); | 51 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
55 | } | 52 | } |
56 | 53 | ||
57 | /** | 54 | /** |
58 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 55 | * dynamic_irq_init - initialize a dynamically allocated irq |
59 | * @irq: irq number to initialize | 56 | * @irq: irq number to initialize |
60 | */ | 57 | */ |
61 | void dynamic_irq_cleanup(unsigned int irq) | 58 | void dynamic_irq_init(unsigned int irq) |
59 | { | ||
60 | dynamic_irq_init_x(irq, false); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq | ||
65 | * @irq: irq number to initialize | ||
66 | * | ||
67 | * does not set irq_to_desc(irq)->chip_data to NULL | ||
68 | */ | ||
69 | void dynamic_irq_init_keep_chip_data(unsigned int irq) | ||
70 | { | ||
71 | dynamic_irq_init_x(irq, true); | ||
72 | } | ||
73 | |||
74 | static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) | ||
62 | { | 75 | { |
63 | struct irq_desc *desc = irq_to_desc(irq); | 76 | struct irq_desc *desc = irq_to_desc(irq); |
64 | unsigned long flags; | 77 | unsigned long flags; |
@@ -68,21 +81,42 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
68 | return; | 81 | return; |
69 | } | 82 | } |
70 | 83 | ||
71 | spin_lock_irqsave(&desc->lock, flags); | 84 | raw_spin_lock_irqsave(&desc->lock, flags); |
72 | if (desc->action) { | 85 | if (desc->action) { |
73 | spin_unlock_irqrestore(&desc->lock, flags); | 86 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
74 | WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", | 87 | WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", |
75 | irq); | 88 | irq); |
76 | return; | 89 | return; |
77 | } | 90 | } |
78 | desc->msi_desc = NULL; | 91 | desc->msi_desc = NULL; |
79 | desc->handler_data = NULL; | 92 | desc->handler_data = NULL; |
80 | desc->chip_data = NULL; | 93 | if (!keep_chip_data) |
94 | desc->chip_data = NULL; | ||
81 | desc->handle_irq = handle_bad_irq; | 95 | desc->handle_irq = handle_bad_irq; |
82 | desc->chip = &no_irq_chip; | 96 | desc->chip = &no_irq_chip; |
83 | desc->name = NULL; | 97 | desc->name = NULL; |
84 | clear_kstat_irqs(desc); | 98 | clear_kstat_irqs(desc); |
85 | spin_unlock_irqrestore(&desc->lock, flags); | 99 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
100 | } | ||
101 | |||
102 | /** | ||
103 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | ||
104 | * @irq: irq number to initialize | ||
105 | */ | ||
106 | void dynamic_irq_cleanup(unsigned int irq) | ||
107 | { | ||
108 | dynamic_irq_cleanup_x(irq, false); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq | ||
113 | * @irq: irq number to initialize | ||
114 | * | ||
115 | * does not set irq_to_desc(irq)->chip_data to NULL | ||
116 | */ | ||
117 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) | ||
118 | { | ||
119 | dynamic_irq_cleanup_x(irq, true); | ||
86 | } | 120 | } |
87 | 121 | ||
88 | 122 | ||
@@ -104,10 +138,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
104 | if (!chip) | 138 | if (!chip) |
105 | chip = &no_irq_chip; | 139 | chip = &no_irq_chip; |
106 | 140 | ||
107 | spin_lock_irqsave(&desc->lock, flags); | 141 | raw_spin_lock_irqsave(&desc->lock, flags); |
108 | irq_chip_set_defaults(chip); | 142 | irq_chip_set_defaults(chip); |
109 | desc->chip = chip; | 143 | desc->chip = chip; |
110 | spin_unlock_irqrestore(&desc->lock, flags); | 144 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
111 | 145 | ||
112 | return 0; | 146 | return 0; |
113 | } | 147 | } |
@@ -133,9 +167,9 @@ int set_irq_type(unsigned int irq, unsigned int type) | |||
133 | if (type == IRQ_TYPE_NONE) | 167 | if (type == IRQ_TYPE_NONE) |
134 | return 0; | 168 | return 0; |
135 | 169 | ||
136 | spin_lock_irqsave(&desc->lock, flags); | 170 | raw_spin_lock_irqsave(&desc->lock, flags); |
137 | ret = __irq_set_trigger(desc, irq, type); | 171 | ret = __irq_set_trigger(desc, irq, type); |
138 | spin_unlock_irqrestore(&desc->lock, flags); | 172 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
139 | return ret; | 173 | return ret; |
140 | } | 174 | } |
141 | EXPORT_SYMBOL(set_irq_type); | 175 | EXPORT_SYMBOL(set_irq_type); |
@@ -158,19 +192,19 @@ int set_irq_data(unsigned int irq, void *data) | |||
158 | return -EINVAL; | 192 | return -EINVAL; |
159 | } | 193 | } |
160 | 194 | ||
161 | spin_lock_irqsave(&desc->lock, flags); | 195 | raw_spin_lock_irqsave(&desc->lock, flags); |
162 | desc->handler_data = data; | 196 | desc->handler_data = data; |
163 | spin_unlock_irqrestore(&desc->lock, flags); | 197 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
164 | return 0; | 198 | return 0; |
165 | } | 199 | } |
166 | EXPORT_SYMBOL(set_irq_data); | 200 | EXPORT_SYMBOL(set_irq_data); |
167 | 201 | ||
168 | /** | 202 | /** |
169 | * set_irq_data - set irq type data for an irq | 203 | * set_irq_msi - set MSI descriptor data for an irq |
170 | * @irq: Interrupt number | 204 | * @irq: Interrupt number |
171 | * @entry: Pointer to MSI descriptor data | 205 | * @entry: Pointer to MSI descriptor data |
172 | * | 206 | * |
173 | * Set the hardware irq controller data for an irq | 207 | * Set the MSI descriptor entry for an irq |
174 | */ | 208 | */ |
175 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) | 209 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) |
176 | { | 210 | { |
@@ -183,11 +217,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) | |||
183 | return -EINVAL; | 217 | return -EINVAL; |
184 | } | 218 | } |
185 | 219 | ||
186 | spin_lock_irqsave(&desc->lock, flags); | 220 | raw_spin_lock_irqsave(&desc->lock, flags); |
187 | desc->msi_desc = entry; | 221 | desc->msi_desc = entry; |
188 | if (entry) | 222 | if (entry) |
189 | entry->irq = irq; | 223 | entry->irq = irq; |
190 | spin_unlock_irqrestore(&desc->lock, flags); | 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
191 | return 0; | 225 | return 0; |
192 | } | 226 | } |
193 | 227 | ||
@@ -214,9 +248,9 @@ int set_irq_chip_data(unsigned int irq, void *data) | |||
214 | return -EINVAL; | 248 | return -EINVAL; |
215 | } | 249 | } |
216 | 250 | ||
217 | spin_lock_irqsave(&desc->lock, flags); | 251 | raw_spin_lock_irqsave(&desc->lock, flags); |
218 | desc->chip_data = data; | 252 | desc->chip_data = data; |
219 | spin_unlock_irqrestore(&desc->lock, flags); | 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
220 | 254 | ||
221 | return 0; | 255 | return 0; |
222 | } | 256 | } |
@@ -241,12 +275,12 @@ void set_irq_nested_thread(unsigned int irq, int nest) | |||
241 | if (!desc) | 275 | if (!desc) |
242 | return; | 276 | return; |
243 | 277 | ||
244 | spin_lock_irqsave(&desc->lock, flags); | 278 | raw_spin_lock_irqsave(&desc->lock, flags); |
245 | if (nest) | 279 | if (nest) |
246 | desc->status |= IRQ_NESTED_THREAD; | 280 | desc->status |= IRQ_NESTED_THREAD; |
247 | else | 281 | else |
248 | desc->status &= ~IRQ_NESTED_THREAD; | 282 | desc->status &= ~IRQ_NESTED_THREAD; |
249 | spin_unlock_irqrestore(&desc->lock, flags); | 283 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
250 | } | 284 | } |
251 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); | 285 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); |
252 | 286 | ||
@@ -325,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
325 | if (desc->chip->ack) | 359 | if (desc->chip->ack) |
326 | desc->chip->ack(irq); | 360 | desc->chip->ack(irq); |
327 | } | 361 | } |
362 | desc->status |= IRQ_MASKED; | ||
363 | } | ||
364 | |||
365 | static inline void mask_irq(struct irq_desc *desc, int irq) | ||
366 | { | ||
367 | if (desc->chip->mask) { | ||
368 | desc->chip->mask(irq); | ||
369 | desc->status |= IRQ_MASKED; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | static inline void unmask_irq(struct irq_desc *desc, int irq) | ||
374 | { | ||
375 | if (desc->chip->unmask) { | ||
376 | desc->chip->unmask(irq); | ||
377 | desc->status &= ~IRQ_MASKED; | ||
378 | } | ||
328 | } | 379 | } |
329 | 380 | ||
330 | /* | 381 | /* |
@@ -343,7 +394,7 @@ void handle_nested_irq(unsigned int irq) | |||
343 | 394 | ||
344 | might_sleep(); | 395 | might_sleep(); |
345 | 396 | ||
346 | spin_lock_irq(&desc->lock); | 397 | raw_spin_lock_irq(&desc->lock); |
347 | 398 | ||
348 | kstat_incr_irqs_this_cpu(irq, desc); | 399 | kstat_incr_irqs_this_cpu(irq, desc); |
349 | 400 | ||
@@ -352,17 +403,17 @@ void handle_nested_irq(unsigned int irq) | |||
352 | goto out_unlock; | 403 | goto out_unlock; |
353 | 404 | ||
354 | desc->status |= IRQ_INPROGRESS; | 405 | desc->status |= IRQ_INPROGRESS; |
355 | spin_unlock_irq(&desc->lock); | 406 | raw_spin_unlock_irq(&desc->lock); |
356 | 407 | ||
357 | action_ret = action->thread_fn(action->irq, action->dev_id); | 408 | action_ret = action->thread_fn(action->irq, action->dev_id); |
358 | if (!noirqdebug) | 409 | if (!noirqdebug) |
359 | note_interrupt(irq, desc, action_ret); | 410 | note_interrupt(irq, desc, action_ret); |
360 | 411 | ||
361 | spin_lock_irq(&desc->lock); | 412 | raw_spin_lock_irq(&desc->lock); |
362 | desc->status &= ~IRQ_INPROGRESS; | 413 | desc->status &= ~IRQ_INPROGRESS; |
363 | 414 | ||
364 | out_unlock: | 415 | out_unlock: |
365 | spin_unlock_irq(&desc->lock); | 416 | raw_spin_unlock_irq(&desc->lock); |
366 | } | 417 | } |
367 | EXPORT_SYMBOL_GPL(handle_nested_irq); | 418 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
368 | 419 | ||
@@ -384,7 +435,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
384 | struct irqaction *action; | 435 | struct irqaction *action; |
385 | irqreturn_t action_ret; | 436 | irqreturn_t action_ret; |
386 | 437 | ||
387 | spin_lock(&desc->lock); | 438 | raw_spin_lock(&desc->lock); |
388 | 439 | ||
389 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 440 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
390 | goto out_unlock; | 441 | goto out_unlock; |
@@ -396,16 +447,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
396 | goto out_unlock; | 447 | goto out_unlock; |
397 | 448 | ||
398 | desc->status |= IRQ_INPROGRESS; | 449 | desc->status |= IRQ_INPROGRESS; |
399 | spin_unlock(&desc->lock); | 450 | raw_spin_unlock(&desc->lock); |
400 | 451 | ||
401 | action_ret = handle_IRQ_event(irq, action); | 452 | action_ret = handle_IRQ_event(irq, action); |
402 | if (!noirqdebug) | 453 | if (!noirqdebug) |
403 | note_interrupt(irq, desc, action_ret); | 454 | note_interrupt(irq, desc, action_ret); |
404 | 455 | ||
405 | spin_lock(&desc->lock); | 456 | raw_spin_lock(&desc->lock); |
406 | desc->status &= ~IRQ_INPROGRESS; | 457 | desc->status &= ~IRQ_INPROGRESS; |
407 | out_unlock: | 458 | out_unlock: |
408 | spin_unlock(&desc->lock); | 459 | raw_spin_unlock(&desc->lock); |
409 | } | 460 | } |
410 | 461 | ||
411 | /** | 462 | /** |
@@ -424,7 +475,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
424 | struct irqaction *action; | 475 | struct irqaction *action; |
425 | irqreturn_t action_ret; | 476 | irqreturn_t action_ret; |
426 | 477 | ||
427 | spin_lock(&desc->lock); | 478 | raw_spin_lock(&desc->lock); |
428 | mask_ack_irq(desc, irq); | 479 | mask_ack_irq(desc, irq); |
429 | 480 | ||
430 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 481 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
@@ -441,21 +492,19 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
441 | goto out_unlock; | 492 | goto out_unlock; |
442 | 493 | ||
443 | desc->status |= IRQ_INPROGRESS; | 494 | desc->status |= IRQ_INPROGRESS; |
444 | spin_unlock(&desc->lock); | 495 | raw_spin_unlock(&desc->lock); |
445 | 496 | ||
446 | action_ret = handle_IRQ_event(irq, action); | 497 | action_ret = handle_IRQ_event(irq, action); |
447 | if (!noirqdebug) | 498 | if (!noirqdebug) |
448 | note_interrupt(irq, desc, action_ret); | 499 | note_interrupt(irq, desc, action_ret); |
449 | 500 | ||
450 | spin_lock(&desc->lock); | 501 | raw_spin_lock(&desc->lock); |
451 | desc->status &= ~IRQ_INPROGRESS; | 502 | desc->status &= ~IRQ_INPROGRESS; |
452 | 503 | ||
453 | if (unlikely(desc->status & IRQ_ONESHOT)) | 504 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) |
454 | desc->status |= IRQ_MASKED; | 505 | unmask_irq(desc, irq); |
455 | else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | ||
456 | desc->chip->unmask(irq); | ||
457 | out_unlock: | 506 | out_unlock: |
458 | spin_unlock(&desc->lock); | 507 | raw_spin_unlock(&desc->lock); |
459 | } | 508 | } |
460 | EXPORT_SYMBOL_GPL(handle_level_irq); | 509 | EXPORT_SYMBOL_GPL(handle_level_irq); |
461 | 510 | ||
@@ -475,7 +524,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
475 | struct irqaction *action; | 524 | struct irqaction *action; |
476 | irqreturn_t action_ret; | 525 | irqreturn_t action_ret; |
477 | 526 | ||
478 | spin_lock(&desc->lock); | 527 | raw_spin_lock(&desc->lock); |
479 | 528 | ||
480 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 529 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
481 | goto out; | 530 | goto out; |
@@ -490,25 +539,24 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
490 | action = desc->action; | 539 | action = desc->action; |
491 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 540 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { |
492 | desc->status |= IRQ_PENDING; | 541 | desc->status |= IRQ_PENDING; |
493 | if (desc->chip->mask) | 542 | mask_irq(desc, irq); |
494 | desc->chip->mask(irq); | ||
495 | goto out; | 543 | goto out; |
496 | } | 544 | } |
497 | 545 | ||
498 | desc->status |= IRQ_INPROGRESS; | 546 | desc->status |= IRQ_INPROGRESS; |
499 | desc->status &= ~IRQ_PENDING; | 547 | desc->status &= ~IRQ_PENDING; |
500 | spin_unlock(&desc->lock); | 548 | raw_spin_unlock(&desc->lock); |
501 | 549 | ||
502 | action_ret = handle_IRQ_event(irq, action); | 550 | action_ret = handle_IRQ_event(irq, action); |
503 | if (!noirqdebug) | 551 | if (!noirqdebug) |
504 | note_interrupt(irq, desc, action_ret); | 552 | note_interrupt(irq, desc, action_ret); |
505 | 553 | ||
506 | spin_lock(&desc->lock); | 554 | raw_spin_lock(&desc->lock); |
507 | desc->status &= ~IRQ_INPROGRESS; | 555 | desc->status &= ~IRQ_INPROGRESS; |
508 | out: | 556 | out: |
509 | desc->chip->eoi(irq); | 557 | desc->chip->eoi(irq); |
510 | 558 | ||
511 | spin_unlock(&desc->lock); | 559 | raw_spin_unlock(&desc->lock); |
512 | } | 560 | } |
513 | 561 | ||
514 | /** | 562 | /** |
@@ -520,7 +568,7 @@ out: | |||
520 | * signal. The occurence is latched into the irq controller hardware | 568 | * signal. The occurence is latched into the irq controller hardware |
521 | * and must be acked in order to be reenabled. After the ack another | 569 | * and must be acked in order to be reenabled. After the ack another |
522 | * interrupt can happen on the same source even before the first one | 570 | * interrupt can happen on the same source even before the first one |
523 | * is handled by the assosiacted event handler. If this happens it | 571 | * is handled by the associated event handler. If this happens it |
524 | * might be necessary to disable (mask) the interrupt depending on the | 572 | * might be necessary to disable (mask) the interrupt depending on the |
525 | * controller hardware. This requires to reenable the interrupt inside | 573 | * controller hardware. This requires to reenable the interrupt inside |
526 | * of the loop which handles the interrupts which have arrived while | 574 | * of the loop which handles the interrupts which have arrived while |
@@ -530,7 +578,7 @@ out: | |||
530 | void | 578 | void |
531 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) | 579 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
532 | { | 580 | { |
533 | spin_lock(&desc->lock); | 581 | raw_spin_lock(&desc->lock); |
534 | 582 | ||
535 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 583 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
536 | 584 | ||
@@ -559,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
559 | irqreturn_t action_ret; | 607 | irqreturn_t action_ret; |
560 | 608 | ||
561 | if (unlikely(!action)) { | 609 | if (unlikely(!action)) { |
562 | desc->chip->mask(irq); | 610 | mask_irq(desc, irq); |
563 | goto out_unlock; | 611 | goto out_unlock; |
564 | } | 612 | } |
565 | 613 | ||
@@ -571,26 +619,25 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
571 | if (unlikely((desc->status & | 619 | if (unlikely((desc->status & |
572 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == | 620 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == |
573 | (IRQ_PENDING | IRQ_MASKED))) { | 621 | (IRQ_PENDING | IRQ_MASKED))) { |
574 | desc->chip->unmask(irq); | 622 | unmask_irq(desc, irq); |
575 | desc->status &= ~IRQ_MASKED; | ||
576 | } | 623 | } |
577 | 624 | ||
578 | desc->status &= ~IRQ_PENDING; | 625 | desc->status &= ~IRQ_PENDING; |
579 | spin_unlock(&desc->lock); | 626 | raw_spin_unlock(&desc->lock); |
580 | action_ret = handle_IRQ_event(irq, action); | 627 | action_ret = handle_IRQ_event(irq, action); |
581 | if (!noirqdebug) | 628 | if (!noirqdebug) |
582 | note_interrupt(irq, desc, action_ret); | 629 | note_interrupt(irq, desc, action_ret); |
583 | spin_lock(&desc->lock); | 630 | raw_spin_lock(&desc->lock); |
584 | 631 | ||
585 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | 632 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); |
586 | 633 | ||
587 | desc->status &= ~IRQ_INPROGRESS; | 634 | desc->status &= ~IRQ_INPROGRESS; |
588 | out_unlock: | 635 | out_unlock: |
589 | spin_unlock(&desc->lock); | 636 | raw_spin_unlock(&desc->lock); |
590 | } | 637 | } |
591 | 638 | ||
592 | /** | 639 | /** |
593 | * handle_percpu_IRQ - Per CPU local irq handler | 640 | * handle_percpu_irq - Per CPU local irq handler |
594 | * @irq: the interrupt number | 641 | * @irq: the interrupt number |
595 | * @desc: the interrupt description structure for this irq | 642 | * @desc: the interrupt description structure for this irq |
596 | * | 643 | * |
@@ -643,7 +690,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
643 | } | 690 | } |
644 | 691 | ||
645 | chip_bus_lock(irq, desc); | 692 | chip_bus_lock(irq, desc); |
646 | spin_lock_irqsave(&desc->lock, flags); | 693 | raw_spin_lock_irqsave(&desc->lock, flags); |
647 | 694 | ||
648 | /* Uninstall? */ | 695 | /* Uninstall? */ |
649 | if (handle == handle_bad_irq) { | 696 | if (handle == handle_bad_irq) { |
@@ -661,7 +708,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
661 | desc->depth = 0; | 708 | desc->depth = 0; |
662 | desc->chip->startup(irq); | 709 | desc->chip->startup(irq); |
663 | } | 710 | } |
664 | spin_unlock_irqrestore(&desc->lock, flags); | 711 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
665 | chip_bus_sync_unlock(irq, desc); | 712 | chip_bus_sync_unlock(irq, desc); |
666 | } | 713 | } |
667 | EXPORT_SYMBOL_GPL(__set_irq_handler); | 714 | EXPORT_SYMBOL_GPL(__set_irq_handler); |
@@ -682,7 +729,7 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
682 | __set_irq_handler(irq, handle, 0, name); | 729 | __set_irq_handler(irq, handle, 0, name); |
683 | } | 730 | } |
684 | 731 | ||
685 | void __init set_irq_noprobe(unsigned int irq) | 732 | void set_irq_noprobe(unsigned int irq) |
686 | { | 733 | { |
687 | struct irq_desc *desc = irq_to_desc(irq); | 734 | struct irq_desc *desc = irq_to_desc(irq); |
688 | unsigned long flags; | 735 | unsigned long flags; |
@@ -692,12 +739,12 @@ void __init set_irq_noprobe(unsigned int irq) | |||
692 | return; | 739 | return; |
693 | } | 740 | } |
694 | 741 | ||
695 | spin_lock_irqsave(&desc->lock, flags); | 742 | raw_spin_lock_irqsave(&desc->lock, flags); |
696 | desc->status |= IRQ_NOPROBE; | 743 | desc->status |= IRQ_NOPROBE; |
697 | spin_unlock_irqrestore(&desc->lock, flags); | 744 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
698 | } | 745 | } |
699 | 746 | ||
700 | void __init set_irq_probe(unsigned int irq) | 747 | void set_irq_probe(unsigned int irq) |
701 | { | 748 | { |
702 | struct irq_desc *desc = irq_to_desc(irq); | 749 | struct irq_desc *desc = irq_to_desc(irq); |
703 | unsigned long flags; | 750 | unsigned long flags; |
@@ -707,7 +754,7 @@ void __init set_irq_probe(unsigned int irq) | |||
707 | return; | 754 | return; |
708 | } | 755 | } |
709 | 756 | ||
710 | spin_lock_irqsave(&desc->lock, flags); | 757 | raw_spin_lock_irqsave(&desc->lock, flags); |
711 | desc->status &= ~IRQ_NOPROBE; | 758 | desc->status &= ~IRQ_NOPROBE; |
712 | spin_unlock_irqrestore(&desc->lock, flags); | 759 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
713 | } | 760 | } |
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index d06df9c41cba..1ef4ffcdfa55 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
@@ -42,7 +42,7 @@ static int devm_irq_match(struct device *dev, void *res, void *data) | |||
42 | * automatically freed on driver detach. | 42 | * automatically freed on driver detach. |
43 | * | 43 | * |
44 | * If an IRQ allocated with this function needs to be freed | 44 | * If an IRQ allocated with this function needs to be freed |
45 | * separately, dev_free_irq() must be used. | 45 | * separately, devm_free_irq() must be used. |
46 | */ | 46 | */ |
47 | int devm_request_threaded_irq(struct device *dev, unsigned int irq, | 47 | int devm_request_threaded_irq(struct device *dev, unsigned int irq, |
48 | irq_handler_t handler, irq_handler_t thread_fn, | 48 | irq_handler_t handler, irq_handler_t thread_fn, |
@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_request_threaded_irq); | |||
81 | * Except for the extra @dev argument, this function takes the | 81 | * Except for the extra @dev argument, this function takes the |
82 | * same arguments and performs the same function as free_irq(). | 82 | * same arguments and performs the same function as free_irq(). |
83 | * This function instead of free_irq() should be used to manually | 83 | * This function instead of free_irq() should be used to manually |
84 | * free IRQs allocated with dev_request_irq(). | 84 | * free IRQs allocated with devm_request_irq(). |
85 | */ | 85 | */ |
86 | void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) | 86 | void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) |
87 | { | 87 | { |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 17c71bb565c6..76d5a671bfe1 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/kernel_stat.h> | 19 | #include <linux/kernel_stat.h> |
20 | #include <linux/rculist.h> | 20 | #include <linux/rculist.h> |
21 | #include <linux/hash.h> | 21 | #include <linux/hash.h> |
22 | #include <linux/bootmem.h> | 22 | #include <linux/radix-tree.h> |
23 | #include <trace/events/irq.h> | 23 | #include <trace/events/irq.h> |
24 | 24 | ||
25 | #include "internals.h" | 25 | #include "internals.h" |
@@ -80,19 +80,15 @@ static struct irq_desc irq_desc_init = { | |||
80 | .chip = &no_irq_chip, | 80 | .chip = &no_irq_chip, |
81 | .handle_irq = handle_bad_irq, | 81 | .handle_irq = handle_bad_irq, |
82 | .depth = 1, | 82 | .depth = 1, |
83 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 83 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
84 | }; | 84 | }; |
85 | 85 | ||
86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) | 86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) |
87 | { | 87 | { |
88 | void *ptr; | 88 | void *ptr; |
89 | 89 | ||
90 | if (slab_is_available()) | 90 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), |
91 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), | 91 | GFP_ATOMIC, node); |
92 | GFP_ATOMIC, node); | ||
93 | else | ||
94 | ptr = alloc_bootmem_node(NODE_DATA(node), | ||
95 | nr * sizeof(*desc->kstat_irqs)); | ||
96 | 92 | ||
97 | /* | 93 | /* |
98 | * don't overwite if can not get new one | 94 | * don't overwite if can not get new one |
@@ -108,7 +104,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | |||
108 | { | 104 | { |
109 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | 105 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); |
110 | 106 | ||
111 | spin_lock_init(&desc->lock); | 107 | raw_spin_lock_init(&desc->lock); |
112 | desc->irq = irq; | 108 | desc->irq = irq; |
113 | #ifdef CONFIG_SMP | 109 | #ifdef CONFIG_SMP |
114 | desc->node = node; | 110 | desc->node = node; |
@@ -130,9 +126,28 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) | |||
130 | /* | 126 | /* |
131 | * Protect the sparse_irqs: | 127 | * Protect the sparse_irqs: |
132 | */ | 128 | */ |
133 | DEFINE_SPINLOCK(sparse_irq_lock); | 129 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); |
134 | 130 | ||
135 | struct irq_desc **irq_desc_ptrs __read_mostly; | 131 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); |
132 | |||
133 | static void set_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
134 | { | ||
135 | radix_tree_insert(&irq_desc_tree, irq, desc); | ||
136 | } | ||
137 | |||
138 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
139 | { | ||
140 | return radix_tree_lookup(&irq_desc_tree, irq); | ||
141 | } | ||
142 | |||
143 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
144 | { | ||
145 | void **ptr; | ||
146 | |||
147 | ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); | ||
148 | if (ptr) | ||
149 | radix_tree_replace_slot(ptr, desc); | ||
150 | } | ||
136 | 151 | ||
137 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 152 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
138 | [0 ... NR_IRQS_LEGACY-1] = { | 153 | [0 ... NR_IRQS_LEGACY-1] = { |
@@ -141,7 +156,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm | |||
141 | .chip = &no_irq_chip, | 156 | .chip = &no_irq_chip, |
142 | .handle_irq = handle_bad_irq, | 157 | .handle_irq = handle_bad_irq, |
143 | .depth = 1, | 158 | .depth = 1, |
144 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 159 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
145 | } | 160 | } |
146 | }; | 161 | }; |
147 | 162 | ||
@@ -164,9 +179,6 @@ int __init early_irq_init(void) | |||
164 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 179 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
165 | node = first_online_node; | 180 | node = first_online_node; |
166 | 181 | ||
167 | /* allocate irq_desc_ptrs array based on nr_irqs */ | ||
168 | irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); | ||
169 | |||
170 | /* allocate based on nr_cpu_ids */ | 182 | /* allocate based on nr_cpu_ids */ |
171 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * | 183 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * |
172 | sizeof(int), GFP_NOWAIT, node); | 184 | sizeof(int), GFP_NOWAIT, node); |
@@ -180,23 +192,12 @@ int __init early_irq_init(void) | |||
180 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 192 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
181 | alloc_desc_masks(&desc[i], node, true); | 193 | alloc_desc_masks(&desc[i], node, true); |
182 | init_desc_masks(&desc[i]); | 194 | init_desc_masks(&desc[i]); |
183 | irq_desc_ptrs[i] = desc + i; | 195 | set_irq_desc(i, &desc[i]); |
184 | } | 196 | } |
185 | 197 | ||
186 | for (i = legacy_count; i < nr_irqs; i++) | ||
187 | irq_desc_ptrs[i] = NULL; | ||
188 | |||
189 | return arch_early_irq_init(); | 198 | return arch_early_irq_init(); |
190 | } | 199 | } |
191 | 200 | ||
192 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
193 | { | ||
194 | if (irq_desc_ptrs && irq < nr_irqs) | ||
195 | return irq_desc_ptrs[irq]; | ||
196 | |||
197 | return NULL; | ||
198 | } | ||
199 | |||
200 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | 201 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
201 | { | 202 | { |
202 | struct irq_desc *desc; | 203 | struct irq_desc *desc; |
@@ -208,21 +209,18 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | |||
208 | return NULL; | 209 | return NULL; |
209 | } | 210 | } |
210 | 211 | ||
211 | desc = irq_desc_ptrs[irq]; | 212 | desc = irq_to_desc(irq); |
212 | if (desc) | 213 | if (desc) |
213 | return desc; | 214 | return desc; |
214 | 215 | ||
215 | spin_lock_irqsave(&sparse_irq_lock, flags); | 216 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
216 | 217 | ||
217 | /* We have to check it to avoid races with another CPU */ | 218 | /* We have to check it to avoid races with another CPU */ |
218 | desc = irq_desc_ptrs[irq]; | 219 | desc = irq_to_desc(irq); |
219 | if (desc) | 220 | if (desc) |
220 | goto out_unlock; | 221 | goto out_unlock; |
221 | 222 | ||
222 | if (slab_is_available()) | 223 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
223 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
224 | else | ||
225 | desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); | ||
226 | 224 | ||
227 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); | 225 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); |
228 | if (!desc) { | 226 | if (!desc) { |
@@ -231,10 +229,10 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) | |||
231 | } | 229 | } |
232 | init_one_irq_desc(irq, desc, node); | 230 | init_one_irq_desc(irq, desc, node); |
233 | 231 | ||
234 | irq_desc_ptrs[irq] = desc; | 232 | set_irq_desc(irq, desc); |
235 | 233 | ||
236 | out_unlock: | 234 | out_unlock: |
237 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 235 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
238 | 236 | ||
239 | return desc; | 237 | return desc; |
240 | } | 238 | } |
@@ -247,7 +245,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
247 | .chip = &no_irq_chip, | 245 | .chip = &no_irq_chip, |
248 | .handle_irq = handle_bad_irq, | 246 | .handle_irq = handle_bad_irq, |
249 | .depth = 1, | 247 | .depth = 1, |
250 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 248 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
251 | } | 249 | } |
252 | }; | 250 | }; |
253 | 251 | ||
@@ -473,7 +471,7 @@ unsigned int __do_IRQ(unsigned int irq) | |||
473 | return 1; | 471 | return 1; |
474 | } | 472 | } |
475 | 473 | ||
476 | spin_lock(&desc->lock); | 474 | raw_spin_lock(&desc->lock); |
477 | if (desc->chip->ack) | 475 | if (desc->chip->ack) |
478 | desc->chip->ack(irq); | 476 | desc->chip->ack(irq); |
479 | /* | 477 | /* |
@@ -517,13 +515,13 @@ unsigned int __do_IRQ(unsigned int irq) | |||
517 | for (;;) { | 515 | for (;;) { |
518 | irqreturn_t action_ret; | 516 | irqreturn_t action_ret; |
519 | 517 | ||
520 | spin_unlock(&desc->lock); | 518 | raw_spin_unlock(&desc->lock); |
521 | 519 | ||
522 | action_ret = handle_IRQ_event(irq, action); | 520 | action_ret = handle_IRQ_event(irq, action); |
523 | if (!noirqdebug) | 521 | if (!noirqdebug) |
524 | note_interrupt(irq, desc, action_ret); | 522 | note_interrupt(irq, desc, action_ret); |
525 | 523 | ||
526 | spin_lock(&desc->lock); | 524 | raw_spin_lock(&desc->lock); |
527 | if (likely(!(desc->status & IRQ_PENDING))) | 525 | if (likely(!(desc->status & IRQ_PENDING))) |
528 | break; | 526 | break; |
529 | desc->status &= ~IRQ_PENDING; | 527 | desc->status &= ~IRQ_PENDING; |
@@ -536,7 +534,7 @@ out: | |||
536 | * disabled while the handler was running. | 534 | * disabled while the handler was running. |
537 | */ | 535 | */ |
538 | desc->chip->end(irq); | 536 | desc->chip->end(irq); |
539 | spin_unlock(&desc->lock); | 537 | raw_spin_unlock(&desc->lock); |
540 | 538 | ||
541 | return 1; | 539 | return 1; |
542 | } | 540 | } |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 1b5d742c6a77..c63f3bc88f0b 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -18,14 +18,10 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | |||
18 | extern struct lock_class_key irq_desc_lock_class; | 18 | extern struct lock_class_key irq_desc_lock_class; |
19 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 19 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
20 | extern void clear_kstat_irqs(struct irq_desc *desc); | 20 | extern void clear_kstat_irqs(struct irq_desc *desc); |
21 | extern spinlock_t sparse_irq_lock; | 21 | extern raw_spinlock_t sparse_irq_lock; |
22 | 22 | ||
23 | #ifdef CONFIG_SPARSE_IRQ | 23 | #ifdef CONFIG_SPARSE_IRQ |
24 | /* irq_desc_ptrs allocated at boot time */ | 24 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc); |
25 | extern struct irq_desc **irq_desc_ptrs; | ||
26 | #else | ||
27 | /* irq_desc_ptrs is a fixed size array */ | ||
28 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; | ||
29 | #endif | 25 | #endif |
30 | 26 | ||
31 | #ifdef CONFIG_PROC_FS | 27 | #ifdef CONFIG_PROC_FS |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index bde4c667d24d..704e488730a5 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq) | |||
46 | cpu_relax(); | 46 | cpu_relax(); |
47 | 47 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | 48 | /* Ok, that indicated we're done: double-check carefully. */ |
49 | spin_lock_irqsave(&desc->lock, flags); | 49 | raw_spin_lock_irqsave(&desc->lock, flags); |
50 | status = desc->status; | 50 | status = desc->status; |
51 | spin_unlock_irqrestore(&desc->lock, flags); | 51 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
52 | 52 | ||
53 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
54 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
114 | if (!desc->chip->set_affinity) | 114 | if (!desc->chip->set_affinity) |
115 | return -EINVAL; | 115 | return -EINVAL; |
116 | 116 | ||
117 | spin_lock_irqsave(&desc->lock, flags); | 117 | raw_spin_lock_irqsave(&desc->lock, flags); |
118 | 118 | ||
119 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
120 | if (desc->status & IRQ_MOVE_PCNTXT) { | 120 | if (desc->status & IRQ_MOVE_PCNTXT) { |
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
134 | } | 134 | } |
135 | #endif | 135 | #endif |
136 | desc->status |= IRQ_AFFINITY_SET; | 136 | desc->status |= IRQ_AFFINITY_SET; |
137 | spin_unlock_irqrestore(&desc->lock, flags); | 137 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
138 | return 0; | 138 | return 0; |
139 | } | 139 | } |
140 | 140 | ||
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq) | |||
181 | unsigned long flags; | 181 | unsigned long flags; |
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | spin_lock_irqsave(&desc->lock, flags); | 184 | raw_spin_lock_irqsave(&desc->lock, flags); |
185 | ret = setup_affinity(irq, desc); | 185 | ret = setup_affinity(irq, desc); |
186 | if (!ret) | 186 | if (!ret) |
187 | irq_set_thread_affinity(desc); | 187 | irq_set_thread_affinity(desc); |
188 | spin_unlock_irqrestore(&desc->lock, flags); | 188 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
189 | 189 | ||
190 | return ret; | 190 | return ret; |
191 | } | 191 | } |
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq) | |||
231 | return; | 231 | return; |
232 | 232 | ||
233 | chip_bus_lock(irq, desc); | 233 | chip_bus_lock(irq, desc); |
234 | spin_lock_irqsave(&desc->lock, flags); | 234 | raw_spin_lock_irqsave(&desc->lock, flags); |
235 | __disable_irq(desc, irq, false); | 235 | __disable_irq(desc, irq, false); |
236 | spin_unlock_irqrestore(&desc->lock, flags); | 236 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
237 | chip_bus_sync_unlock(irq, desc); | 237 | chip_bus_sync_unlock(irq, desc); |
238 | } | 238 | } |
239 | EXPORT_SYMBOL(disable_irq_nosync); | 239 | EXPORT_SYMBOL(disable_irq_nosync); |
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq) | |||
308 | return; | 308 | return; |
309 | 309 | ||
310 | chip_bus_lock(irq, desc); | 310 | chip_bus_lock(irq, desc); |
311 | spin_lock_irqsave(&desc->lock, flags); | 311 | raw_spin_lock_irqsave(&desc->lock, flags); |
312 | __enable_irq(desc, irq, false); | 312 | __enable_irq(desc, irq, false); |
313 | spin_unlock_irqrestore(&desc->lock, flags); | 313 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
314 | chip_bus_sync_unlock(irq, desc); | 314 | chip_bus_sync_unlock(irq, desc); |
315 | } | 315 | } |
316 | EXPORT_SYMBOL(enable_irq); | 316 | EXPORT_SYMBOL(enable_irq); |
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
347 | /* wakeup-capable irqs can be shared between drivers that | 347 | /* wakeup-capable irqs can be shared between drivers that |
348 | * don't need to have the same sleep mode behaviors. | 348 | * don't need to have the same sleep mode behaviors. |
349 | */ | 349 | */ |
350 | spin_lock_irqsave(&desc->lock, flags); | 350 | raw_spin_lock_irqsave(&desc->lock, flags); |
351 | if (on) { | 351 | if (on) { |
352 | if (desc->wake_depth++ == 0) { | 352 | if (desc->wake_depth++ == 0) { |
353 | ret = set_irq_wake_real(irq, on); | 353 | ret = set_irq_wake_real(irq, on); |
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on) | |||
368 | } | 368 | } |
369 | } | 369 | } |
370 | 370 | ||
371 | spin_unlock_irqrestore(&desc->lock, flags); | 371 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
372 | return ret; | 372 | return ret; |
373 | } | 373 | } |
374 | EXPORT_SYMBOL(set_irq_wake); | 374 | EXPORT_SYMBOL(set_irq_wake); |
@@ -382,6 +382,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
382 | { | 382 | { |
383 | struct irq_desc *desc = irq_to_desc(irq); | 383 | struct irq_desc *desc = irq_to_desc(irq); |
384 | struct irqaction *action; | 384 | struct irqaction *action; |
385 | unsigned long flags; | ||
385 | 386 | ||
386 | if (!desc) | 387 | if (!desc) |
387 | return 0; | 388 | return 0; |
@@ -389,11 +390,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
389 | if (desc->status & IRQ_NOREQUEST) | 390 | if (desc->status & IRQ_NOREQUEST) |
390 | return 0; | 391 | return 0; |
391 | 392 | ||
393 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
392 | action = desc->action; | 394 | action = desc->action; |
393 | if (action) | 395 | if (action) |
394 | if (irqflags & action->flags & IRQF_SHARED) | 396 | if (irqflags & action->flags & IRQF_SHARED) |
395 | action = NULL; | 397 | action = NULL; |
396 | 398 | ||
399 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
400 | |||
397 | return !action; | 401 | return !action; |
398 | } | 402 | } |
399 | 403 | ||
@@ -483,13 +487,31 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
483 | */ | 487 | */ |
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 488 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) |
485 | { | 489 | { |
490 | again: | ||
486 | chip_bus_lock(irq, desc); | 491 | chip_bus_lock(irq, desc); |
487 | spin_lock_irq(&desc->lock); | 492 | raw_spin_lock_irq(&desc->lock); |
493 | |||
494 | /* | ||
495 | * Implausible though it may be we need to protect us against | ||
496 | * the following scenario: | ||
497 | * | ||
498 | * The thread is faster done than the hard interrupt handler | ||
499 | * on the other CPU. If we unmask the irq line then the | ||
500 | * interrupt can come in again and masks the line, leaves due | ||
501 | * to IRQ_INPROGRESS and the irq line is masked forever. | ||
502 | */ | ||
503 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | ||
504 | raw_spin_unlock_irq(&desc->lock); | ||
505 | chip_bus_sync_unlock(irq, desc); | ||
506 | cpu_relax(); | ||
507 | goto again; | ||
508 | } | ||
509 | |||
488 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 510 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
489 | desc->status &= ~IRQ_MASKED; | 511 | desc->status &= ~IRQ_MASKED; |
490 | desc->chip->unmask(irq); | 512 | desc->chip->unmask(irq); |
491 | } | 513 | } |
492 | spin_unlock_irq(&desc->lock); | 514 | raw_spin_unlock_irq(&desc->lock); |
493 | chip_bus_sync_unlock(irq, desc); | 515 | chip_bus_sync_unlock(irq, desc); |
494 | } | 516 | } |
495 | 517 | ||
@@ -514,9 +536,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
514 | return; | 536 | return; |
515 | } | 537 | } |
516 | 538 | ||
517 | spin_lock_irq(&desc->lock); | 539 | raw_spin_lock_irq(&desc->lock); |
518 | cpumask_copy(mask, desc->affinity); | 540 | cpumask_copy(mask, desc->affinity); |
519 | spin_unlock_irq(&desc->lock); | 541 | raw_spin_unlock_irq(&desc->lock); |
520 | 542 | ||
521 | set_cpus_allowed_ptr(current, mask); | 543 | set_cpus_allowed_ptr(current, mask); |
522 | free_cpumask_var(mask); | 544 | free_cpumask_var(mask); |
@@ -545,7 +567,7 @@ static int irq_thread(void *data) | |||
545 | 567 | ||
546 | atomic_inc(&desc->threads_active); | 568 | atomic_inc(&desc->threads_active); |
547 | 569 | ||
548 | spin_lock_irq(&desc->lock); | 570 | raw_spin_lock_irq(&desc->lock); |
549 | if (unlikely(desc->status & IRQ_DISABLED)) { | 571 | if (unlikely(desc->status & IRQ_DISABLED)) { |
550 | /* | 572 | /* |
551 | * CHECKME: We might need a dedicated | 573 | * CHECKME: We might need a dedicated |
@@ -555,9 +577,9 @@ static int irq_thread(void *data) | |||
555 | * retriggers the interrupt itself --- tglx | 577 | * retriggers the interrupt itself --- tglx |
556 | */ | 578 | */ |
557 | desc->status |= IRQ_PENDING; | 579 | desc->status |= IRQ_PENDING; |
558 | spin_unlock_irq(&desc->lock); | 580 | raw_spin_unlock_irq(&desc->lock); |
559 | } else { | 581 | } else { |
560 | spin_unlock_irq(&desc->lock); | 582 | raw_spin_unlock_irq(&desc->lock); |
561 | 583 | ||
562 | action->thread_fn(action->irq, action->dev_id); | 584 | action->thread_fn(action->irq, action->dev_id); |
563 | 585 | ||
@@ -679,7 +701,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
679 | /* | 701 | /* |
680 | * The following block of code has to be executed atomically | 702 | * The following block of code has to be executed atomically |
681 | */ | 703 | */ |
682 | spin_lock_irqsave(&desc->lock, flags); | 704 | raw_spin_lock_irqsave(&desc->lock, flags); |
683 | old_ptr = &desc->action; | 705 | old_ptr = &desc->action; |
684 | old = *old_ptr; | 706 | old = *old_ptr; |
685 | if (old) { | 707 | if (old) { |
@@ -735,6 +757,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
735 | if (new->flags & IRQF_ONESHOT) | 757 | if (new->flags & IRQF_ONESHOT) |
736 | desc->status |= IRQ_ONESHOT; | 758 | desc->status |= IRQ_ONESHOT; |
737 | 759 | ||
760 | /* | ||
761 | * Force MSI interrupts to run with interrupts | ||
762 | * disabled. The multi vector cards can cause stack | ||
763 | * overflows due to nested interrupts when enough of | ||
764 | * them are directed to a core and fire at the same | ||
765 | * time. | ||
766 | */ | ||
767 | if (desc->msi_desc) | ||
768 | new->flags |= IRQF_DISABLED; | ||
769 | |||
738 | if (!(desc->status & IRQ_NOAUTOEN)) { | 770 | if (!(desc->status & IRQ_NOAUTOEN)) { |
739 | desc->depth = 0; | 771 | desc->depth = 0; |
740 | desc->status &= ~IRQ_DISABLED; | 772 | desc->status &= ~IRQ_DISABLED; |
@@ -775,7 +807,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
775 | __enable_irq(desc, irq, false); | 807 | __enable_irq(desc, irq, false); |
776 | } | 808 | } |
777 | 809 | ||
778 | spin_unlock_irqrestore(&desc->lock, flags); | 810 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
779 | 811 | ||
780 | /* | 812 | /* |
781 | * Strictly no need to wake it up, but hung_task complains | 813 | * Strictly no need to wake it up, but hung_task complains |
@@ -802,7 +834,7 @@ mismatch: | |||
802 | ret = -EBUSY; | 834 | ret = -EBUSY; |
803 | 835 | ||
804 | out_thread: | 836 | out_thread: |
805 | spin_unlock_irqrestore(&desc->lock, flags); | 837 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
806 | if (new->thread) { | 838 | if (new->thread) { |
807 | struct task_struct *t = new->thread; | 839 | struct task_struct *t = new->thread; |
808 | 840 | ||
@@ -844,7 +876,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
844 | if (!desc) | 876 | if (!desc) |
845 | return NULL; | 877 | return NULL; |
846 | 878 | ||
847 | spin_lock_irqsave(&desc->lock, flags); | 879 | raw_spin_lock_irqsave(&desc->lock, flags); |
848 | 880 | ||
849 | /* | 881 | /* |
850 | * There can be multiple actions per IRQ descriptor, find the right | 882 | * There can be multiple actions per IRQ descriptor, find the right |
@@ -856,7 +888,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
856 | 888 | ||
857 | if (!action) { | 889 | if (!action) { |
858 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | 890 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
859 | spin_unlock_irqrestore(&desc->lock, flags); | 891 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
860 | 892 | ||
861 | return NULL; | 893 | return NULL; |
862 | } | 894 | } |
@@ -884,7 +916,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
884 | desc->chip->disable(irq); | 916 | desc->chip->disable(irq); |
885 | } | 917 | } |
886 | 918 | ||
887 | spin_unlock_irqrestore(&desc->lock, flags); | 919 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
888 | 920 | ||
889 | unregister_handler_proc(irq, action); | 921 | unregister_handler_proc(irq, action); |
890 | 922 | ||
@@ -1067,7 +1099,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1067 | kfree(action); | 1099 | kfree(action); |
1068 | 1100 | ||
1069 | #ifdef CONFIG_DEBUG_SHIRQ | 1101 | #ifdef CONFIG_DEBUG_SHIRQ |
1070 | if (irqflags & IRQF_SHARED) { | 1102 | if (!retval && (irqflags & IRQF_SHARED)) { |
1071 | /* | 1103 | /* |
1072 | * It's a shared IRQ -- the driver ought to be prepared for it | 1104 | * It's a shared IRQ -- the driver ought to be prepared for it |
1073 | * to happen immediately, so let's make sure.... | 1105 | * to happen immediately, so let's make sure.... |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index fcb6c96f2627..241962280836 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -27,7 +27,7 @@ void move_masked_irq(int irq) | |||
27 | if (!desc->chip->set_affinity) | 27 | if (!desc->chip->set_affinity) |
28 | return; | 28 | return; |
29 | 29 | ||
30 | assert_spin_locked(&desc->lock); | 30 | assert_raw_spin_locked(&desc->lock); |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * If there was a valid mask to work with, please | 33 | * If there was a valid mask to work with, please |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 3fd30197da2e..65d3845665ac 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/irq.h> | 8 | #include <linux/irq.h> |
9 | #include <linux/slab.h> | ||
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
10 | #include <linux/random.h> | 11 | #include <linux/random.h> |
11 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
@@ -42,7 +43,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | |||
42 | "for migration.\n", irq); | 43 | "for migration.\n", irq); |
43 | return false; | 44 | return false; |
44 | } | 45 | } |
45 | spin_lock_init(&desc->lock); | 46 | raw_spin_lock_init(&desc->lock); |
46 | desc->node = node; | 47 | desc->node = node; |
47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 48 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
48 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); | 49 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); |
@@ -67,10 +68,10 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
67 | 68 | ||
68 | irq = old_desc->irq; | 69 | irq = old_desc->irq; |
69 | 70 | ||
70 | spin_lock_irqsave(&sparse_irq_lock, flags); | 71 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); |
71 | 72 | ||
72 | /* We have to check it to avoid races with another CPU */ | 73 | /* We have to check it to avoid races with another CPU */ |
73 | desc = irq_desc_ptrs[irq]; | 74 | desc = irq_to_desc(irq); |
74 | 75 | ||
75 | if (desc && old_desc != desc) | 76 | if (desc && old_desc != desc) |
76 | goto out_unlock; | 77 | goto out_unlock; |
@@ -90,8 +91,8 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
90 | goto out_unlock; | 91 | goto out_unlock; |
91 | } | 92 | } |
92 | 93 | ||
93 | irq_desc_ptrs[irq] = desc; | 94 | replace_irq_desc(irq, desc); |
94 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 95 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
95 | 96 | ||
96 | /* free the old one */ | 97 | /* free the old one */ |
97 | free_one_irq_desc(old_desc, desc); | 98 | free_one_irq_desc(old_desc, desc); |
@@ -100,7 +101,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
100 | return desc; | 101 | return desc; |
101 | 102 | ||
102 | out_unlock: | 103 | out_unlock: |
103 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 104 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); |
104 | 105 | ||
105 | return desc; | 106 | return desc; |
106 | } | 107 | } |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index a0bb09e79867..0d4005d85b03 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -28,9 +28,9 @@ void suspend_device_irqs(void) | |||
28 | for_each_irq_desc(irq, desc) { | 28 | for_each_irq_desc(irq, desc) { |
29 | unsigned long flags; | 29 | unsigned long flags; |
30 | 30 | ||
31 | spin_lock_irqsave(&desc->lock, flags); | 31 | raw_spin_lock_irqsave(&desc->lock, flags); |
32 | __disable_irq(desc, irq, true); | 32 | __disable_irq(desc, irq, true); |
33 | spin_unlock_irqrestore(&desc->lock, flags); | 33 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
34 | } | 34 | } |
35 | 35 | ||
36 | for_each_irq_desc(irq, desc) | 36 | for_each_irq_desc(irq, desc) |
@@ -56,9 +56,9 @@ void resume_device_irqs(void) | |||
56 | if (!(desc->status & IRQ_SUSPENDED)) | 56 | if (!(desc->status & IRQ_SUSPENDED)) |
57 | continue; | 57 | continue; |
58 | 58 | ||
59 | spin_lock_irqsave(&desc->lock, flags); | 59 | raw_spin_lock_irqsave(&desc->lock, flags); |
60 | __enable_irq(desc, irq, true); | 60 | __enable_irq(desc, irq, true); |
61 | spin_unlock_irqrestore(&desc->lock, flags); | 61 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
62 | } | 62 | } |
63 | } | 63 | } |
64 | EXPORT_SYMBOL_GPL(resume_device_irqs); | 64 | EXPORT_SYMBOL_GPL(resume_device_irqs); |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 692363dd591f..7a6eb04ef6b5 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/gfp.h> | ||
10 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
11 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
12 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
@@ -136,7 +137,7 @@ out: | |||
136 | 137 | ||
137 | static int default_affinity_open(struct inode *inode, struct file *file) | 138 | static int default_affinity_open(struct inode *inode, struct file *file) |
138 | { | 139 | { |
139 | return single_open(file, default_affinity_show, NULL); | 140 | return single_open(file, default_affinity_show, PDE(inode)->data); |
140 | } | 141 | } |
141 | 142 | ||
142 | static const struct file_operations default_affinity_proc_fops = { | 143 | static const struct file_operations default_affinity_proc_fops = { |
@@ -148,18 +149,28 @@ static const struct file_operations default_affinity_proc_fops = { | |||
148 | }; | 149 | }; |
149 | #endif | 150 | #endif |
150 | 151 | ||
151 | static int irq_spurious_read(char *page, char **start, off_t off, | 152 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
152 | int count, int *eof, void *data) | ||
153 | { | 153 | { |
154 | struct irq_desc *desc = irq_to_desc((long) data); | 154 | struct irq_desc *desc = irq_to_desc((long) m->private); |
155 | return sprintf(page, "count %u\n" | 155 | |
156 | "unhandled %u\n" | 156 | seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", |
157 | "last_unhandled %u ms\n", | 157 | desc->irq_count, desc->irqs_unhandled, |
158 | desc->irq_count, | 158 | jiffies_to_msecs(desc->last_unhandled)); |
159 | desc->irqs_unhandled, | 159 | return 0; |
160 | jiffies_to_msecs(desc->last_unhandled)); | 160 | } |
161 | |||
162 | static int irq_spurious_proc_open(struct inode *inode, struct file *file) | ||
163 | { | ||
164 | return single_open(file, irq_spurious_proc_show, NULL); | ||
161 | } | 165 | } |
162 | 166 | ||
167 | static const struct file_operations irq_spurious_proc_fops = { | ||
168 | .open = irq_spurious_proc_open, | ||
169 | .read = seq_read, | ||
170 | .llseek = seq_lseek, | ||
171 | .release = single_release, | ||
172 | }; | ||
173 | |||
163 | #define MAX_NAMELEN 128 | 174 | #define MAX_NAMELEN 128 |
164 | 175 | ||
165 | static int name_unique(unsigned int irq, struct irqaction *new_action) | 176 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
@@ -169,7 +180,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) | |||
169 | unsigned long flags; | 180 | unsigned long flags; |
170 | int ret = 1; | 181 | int ret = 1; |
171 | 182 | ||
172 | spin_lock_irqsave(&desc->lock, flags); | 183 | raw_spin_lock_irqsave(&desc->lock, flags); |
173 | for (action = desc->action ; action; action = action->next) { | 184 | for (action = desc->action ; action; action = action->next) { |
174 | if ((action != new_action) && action->name && | 185 | if ((action != new_action) && action->name && |
175 | !strcmp(new_action->name, action->name)) { | 186 | !strcmp(new_action->name, action->name)) { |
@@ -177,7 +188,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) | |||
177 | break; | 188 | break; |
178 | } | 189 | } |
179 | } | 190 | } |
180 | spin_unlock_irqrestore(&desc->lock, flags); | 191 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
181 | return ret; | 192 | return ret; |
182 | } | 193 | } |
183 | 194 | ||
@@ -204,7 +215,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) | |||
204 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) | 215 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
205 | { | 216 | { |
206 | char name [MAX_NAMELEN]; | 217 | char name [MAX_NAMELEN]; |
207 | struct proc_dir_entry *entry; | ||
208 | 218 | ||
209 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) | 219 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) |
210 | return; | 220 | return; |
@@ -214,6 +224,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
214 | 224 | ||
215 | /* create /proc/irq/1234 */ | 225 | /* create /proc/irq/1234 */ |
216 | desc->dir = proc_mkdir(name, root_irq_dir); | 226 | desc->dir = proc_mkdir(name, root_irq_dir); |
227 | if (!desc->dir) | ||
228 | return; | ||
217 | 229 | ||
218 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
219 | /* create /proc/irq/<irq>/smp_affinity */ | 231 | /* create /proc/irq/<irq>/smp_affinity */ |
@@ -221,11 +233,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
221 | &irq_affinity_proc_fops, (void *)(long)irq); | 233 | &irq_affinity_proc_fops, (void *)(long)irq); |
222 | #endif | 234 | #endif |
223 | 235 | ||
224 | entry = create_proc_entry("spurious", 0444, desc->dir); | 236 | proc_create_data("spurious", 0444, desc->dir, |
225 | if (entry) { | 237 | &irq_spurious_proc_fops, (void *)(long)irq); |
226 | entry->data = (void *)(long)irq; | ||
227 | entry->read_proc = irq_spurious_read; | ||
228 | } | ||
229 | } | 238 | } |
230 | 239 | ||
231 | #undef MAX_NAMELEN | 240 | #undef MAX_NAMELEN |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index bd7273e6282e..89fb90ae534f 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
28 | struct irqaction *action; | 28 | struct irqaction *action; |
29 | int ok = 0, work = 0; | 29 | int ok = 0, work = 0; |
30 | 30 | ||
31 | spin_lock(&desc->lock); | 31 | raw_spin_lock(&desc->lock); |
32 | /* Already running on another processor */ | 32 | /* Already running on another processor */ |
33 | if (desc->status & IRQ_INPROGRESS) { | 33 | if (desc->status & IRQ_INPROGRESS) { |
34 | /* | 34 | /* |
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
37 | */ | 37 | */ |
38 | if (desc->action && (desc->action->flags & IRQF_SHARED)) | 38 | if (desc->action && (desc->action->flags & IRQF_SHARED)) |
39 | desc->status |= IRQ_PENDING; | 39 | desc->status |= IRQ_PENDING; |
40 | spin_unlock(&desc->lock); | 40 | raw_spin_unlock(&desc->lock); |
41 | return ok; | 41 | return ok; |
42 | } | 42 | } |
43 | /* Honour the normal IRQ locking */ | 43 | /* Honour the normal IRQ locking */ |
44 | desc->status |= IRQ_INPROGRESS; | 44 | desc->status |= IRQ_INPROGRESS; |
45 | action = desc->action; | 45 | action = desc->action; |
46 | spin_unlock(&desc->lock); | 46 | raw_spin_unlock(&desc->lock); |
47 | 47 | ||
48 | while (action) { | 48 | while (action) { |
49 | /* Only shared IRQ handlers are safe to call */ | 49 | /* Only shared IRQ handlers are safe to call */ |
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
56 | } | 56 | } |
57 | local_irq_disable(); | 57 | local_irq_disable(); |
58 | /* Now clean up the flags */ | 58 | /* Now clean up the flags */ |
59 | spin_lock(&desc->lock); | 59 | raw_spin_lock(&desc->lock); |
60 | action = desc->action; | 60 | action = desc->action; |
61 | 61 | ||
62 | /* | 62 | /* |
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
68 | * Perform real IRQ processing for the IRQ we deferred | 68 | * Perform real IRQ processing for the IRQ we deferred |
69 | */ | 69 | */ |
70 | work = 1; | 70 | work = 1; |
71 | spin_unlock(&desc->lock); | 71 | raw_spin_unlock(&desc->lock); |
72 | handle_IRQ_event(irq, action); | 72 | handle_IRQ_event(irq, action); |
73 | spin_lock(&desc->lock); | 73 | raw_spin_lock(&desc->lock); |
74 | desc->status &= ~IRQ_PENDING; | 74 | desc->status &= ~IRQ_PENDING; |
75 | } | 75 | } |
76 | desc->status &= ~IRQ_INPROGRESS; | 76 | desc->status &= ~IRQ_INPROGRESS; |
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
80 | */ | 80 | */ |
81 | if (work && desc->chip && desc->chip->end) | 81 | if (work && desc->chip && desc->chip->end) |
82 | desc->chip->end(irq); | 82 | desc->chip->end(irq); |
83 | spin_unlock(&desc->lock); | 83 | raw_spin_unlock(&desc->lock); |
84 | 84 | ||
85 | return ok; | 85 | return ok; |
86 | } | 86 | } |
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq) | |||
104 | return ok; | 104 | return ok; |
105 | } | 105 | } |
106 | 106 | ||
107 | static void poll_all_shared_irqs(void) | 107 | static void poll_spurious_irqs(unsigned long dummy) |
108 | { | 108 | { |
109 | struct irq_desc *desc; | 109 | struct irq_desc *desc; |
110 | int i; | 110 | int i; |
@@ -125,23 +125,11 @@ static void poll_all_shared_irqs(void) | |||
125 | try_one_irq(i, desc); | 125 | try_one_irq(i, desc); |
126 | local_irq_enable(); | 126 | local_irq_enable(); |
127 | } | 127 | } |
128 | } | ||
129 | |||
130 | static void poll_spurious_irqs(unsigned long dummy) | ||
131 | { | ||
132 | poll_all_shared_irqs(); | ||
133 | 128 | ||
134 | mod_timer(&poll_spurious_irq_timer, | 129 | mod_timer(&poll_spurious_irq_timer, |
135 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 130 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
136 | } | 131 | } |
137 | 132 | ||
138 | #ifdef CONFIG_DEBUG_SHIRQ | ||
139 | void debug_poll_all_shared_irqs(void) | ||
140 | { | ||
141 | poll_all_shared_irqs(); | ||
142 | } | ||
143 | #endif | ||
144 | |||
145 | /* | 133 | /* |
146 | * If 99,900 of the previous 100,000 interrupts have not been handled | 134 | * If 99,900 of the previous 100,000 interrupts have not been handled |
147 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 135 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
@@ -232,7 +220,7 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
232 | /* | 220 | /* |
233 | * If we are seeing only the odd spurious IRQ caused by | 221 | * If we are seeing only the odd spurious IRQ caused by |
234 | * bus asynchronicity then don't eventually trigger an error, | 222 | * bus asynchronicity then don't eventually trigger an error, |
235 | * otherwise the couter becomes a doomsday timer for otherwise | 223 | * otherwise the counter becomes a doomsday timer for otherwise |
236 | * working systems | 224 | * working systems |
237 | */ | 225 | */ |
238 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) | 226 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |