diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 1 | ||||
-rw-r--r-- | kernel/irq/autoprobe.c | 48 | ||||
-rw-r--r-- | kernel/irq/chip.c | 114 | ||||
-rw-r--r-- | kernel/irq/handle.c | 210 | ||||
-rw-r--r-- | kernel/irq/internals.h | 14 | ||||
-rw-r--r-- | kernel/irq/manage.c | 206 | ||||
-rw-r--r-- | kernel/irq/migration.c | 21 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 122 | ||||
-rw-r--r-- | kernel/irq/proc.c | 53 | ||||
-rw-r--r-- | kernel/irq/resend.c | 6 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 167 |
11 files changed, 703 insertions, 259 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 681c52dbfe22..4dd5b1edac98 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -3,3 +3,4 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o | |||
3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o | ||
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 533068cfb607..650ce4102a63 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
@@ -30,16 +30,18 @@ static DEFINE_MUTEX(probing_active); | |||
30 | unsigned long probe_irq_on(void) | 30 | unsigned long probe_irq_on(void) |
31 | { | 31 | { |
32 | struct irq_desc *desc; | 32 | struct irq_desc *desc; |
33 | unsigned long mask; | 33 | unsigned long mask = 0; |
34 | unsigned int i; | 34 | unsigned int status; |
35 | int i; | ||
35 | 36 | ||
36 | mutex_lock(&probing_active); | 37 | mutex_lock(&probing_active); |
37 | /* | 38 | /* |
38 | * something may have generated an irq long ago and we want to | 39 | * something may have generated an irq long ago and we want to |
39 | * flush such a longstanding irq before considering it as spurious. | 40 | * flush such a longstanding irq before considering it as spurious. |
40 | */ | 41 | */ |
41 | for (i = NR_IRQS-1; i > 0; i--) { | 42 | for_each_irq_desc_reverse(i, desc) { |
42 | desc = irq_desc + i; | 43 | if (!desc) |
44 | continue; | ||
43 | 45 | ||
44 | spin_lock_irq(&desc->lock); | 46 | spin_lock_irq(&desc->lock); |
45 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 47 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
@@ -68,8 +70,9 @@ unsigned long probe_irq_on(void) | |||
68 | * (we must startup again here because if a longstanding irq | 70 | * (we must startup again here because if a longstanding irq |
69 | * happened in the previous stage, it may have masked itself) | 71 | * happened in the previous stage, it may have masked itself) |
70 | */ | 72 | */ |
71 | for (i = NR_IRQS-1; i > 0; i--) { | 73 | for_each_irq_desc_reverse(i, desc) { |
72 | desc = irq_desc + i; | 74 | if (!desc) |
75 | continue; | ||
73 | 76 | ||
74 | spin_lock_irq(&desc->lock); | 77 | spin_lock_irq(&desc->lock); |
75 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { | 78 | if (!desc->action && !(desc->status & IRQ_NOPROBE)) { |
@@ -88,11 +91,10 @@ unsigned long probe_irq_on(void) | |||
88 | /* | 91 | /* |
89 | * Now filter out any obviously spurious interrupts | 92 | * Now filter out any obviously spurious interrupts |
90 | */ | 93 | */ |
91 | mask = 0; | 94 | for_each_irq_desc(i, desc) { |
92 | for (i = 0; i < NR_IRQS; i++) { | 95 | if (!desc) |
93 | unsigned int status; | 96 | continue; |
94 | 97 | ||
95 | desc = irq_desc + i; | ||
96 | spin_lock_irq(&desc->lock); | 98 | spin_lock_irq(&desc->lock); |
97 | status = desc->status; | 99 | status = desc->status; |
98 | 100 | ||
@@ -126,13 +128,13 @@ EXPORT_SYMBOL(probe_irq_on); | |||
126 | */ | 128 | */ |
127 | unsigned int probe_irq_mask(unsigned long val) | 129 | unsigned int probe_irq_mask(unsigned long val) |
128 | { | 130 | { |
129 | unsigned int mask; | 131 | unsigned int status, mask = 0; |
132 | struct irq_desc *desc; | ||
130 | int i; | 133 | int i; |
131 | 134 | ||
132 | mask = 0; | 135 | for_each_irq_desc(i, desc) { |
133 | for (i = 0; i < NR_IRQS; i++) { | 136 | if (!desc) |
134 | struct irq_desc *desc = irq_desc + i; | 137 | continue; |
135 | unsigned int status; | ||
136 | 138 | ||
137 | spin_lock_irq(&desc->lock); | 139 | spin_lock_irq(&desc->lock); |
138 | status = desc->status; | 140 | status = desc->status; |
@@ -171,20 +173,22 @@ EXPORT_SYMBOL(probe_irq_mask); | |||
171 | */ | 173 | */ |
172 | int probe_irq_off(unsigned long val) | 174 | int probe_irq_off(unsigned long val) |
173 | { | 175 | { |
174 | int i, irq_found = 0, nr_irqs = 0; | 176 | int i, irq_found = 0, nr_of_irqs = 0; |
177 | struct irq_desc *desc; | ||
178 | unsigned int status; | ||
175 | 179 | ||
176 | for (i = 0; i < NR_IRQS; i++) { | 180 | for_each_irq_desc(i, desc) { |
177 | struct irq_desc *desc = irq_desc + i; | 181 | if (!desc) |
178 | unsigned int status; | 182 | continue; |
179 | 183 | ||
180 | spin_lock_irq(&desc->lock); | 184 | spin_lock_irq(&desc->lock); |
181 | status = desc->status; | 185 | status = desc->status; |
182 | 186 | ||
183 | if (status & IRQ_AUTODETECT) { | 187 | if (status & IRQ_AUTODETECT) { |
184 | if (!(status & IRQ_WAITING)) { | 188 | if (!(status & IRQ_WAITING)) { |
185 | if (!nr_irqs) | 189 | if (!nr_of_irqs) |
186 | irq_found = i; | 190 | irq_found = i; |
187 | nr_irqs++; | 191 | nr_of_irqs++; |
188 | } | 192 | } |
189 | desc->status = status & ~IRQ_AUTODETECT; | 193 | desc->status = status & ~IRQ_AUTODETECT; |
190 | desc->chip->shutdown(i); | 194 | desc->chip->shutdown(i); |
@@ -193,7 +197,7 @@ int probe_irq_off(unsigned long val) | |||
193 | } | 197 | } |
194 | mutex_unlock(&probing_active); | 198 | mutex_unlock(&probing_active); |
195 | 199 | ||
196 | if (nr_irqs > 1) | 200 | if (nr_of_irqs > 1) |
197 | irq_found = -irq_found; | 201 | irq_found = -irq_found; |
198 | 202 | ||
199 | return irq_found; | 203 | return irq_found; |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 3cd441ebf5d2..6eb3c7952b64 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -27,13 +27,13 @@ void dynamic_irq_init(unsigned int irq) | |||
27 | struct irq_desc *desc; | 27 | struct irq_desc *desc; |
28 | unsigned long flags; | 28 | unsigned long flags; |
29 | 29 | ||
30 | if (irq >= NR_IRQS) { | 30 | desc = irq_to_desc(irq); |
31 | if (!desc) { | ||
31 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); | 32 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); |
32 | return; | 33 | return; |
33 | } | 34 | } |
34 | 35 | ||
35 | /* Ensure we don't have left over values from a previous use of this irq */ | 36 | /* Ensure we don't have left over values from a previous use of this irq */ |
36 | desc = irq_desc + irq; | ||
37 | spin_lock_irqsave(&desc->lock, flags); | 37 | spin_lock_irqsave(&desc->lock, flags); |
38 | desc->status = IRQ_DISABLED; | 38 | desc->status = IRQ_DISABLED; |
39 | desc->chip = &no_irq_chip; | 39 | desc->chip = &no_irq_chip; |
@@ -57,15 +57,14 @@ void dynamic_irq_init(unsigned int irq) | |||
57 | */ | 57 | */ |
58 | void dynamic_irq_cleanup(unsigned int irq) | 58 | void dynamic_irq_cleanup(unsigned int irq) |
59 | { | 59 | { |
60 | struct irq_desc *desc; | 60 | struct irq_desc *desc = irq_to_desc(irq); |
61 | unsigned long flags; | 61 | unsigned long flags; |
62 | 62 | ||
63 | if (irq >= NR_IRQS) { | 63 | if (!desc) { |
64 | WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); | 64 | WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); |
65 | return; | 65 | return; |
66 | } | 66 | } |
67 | 67 | ||
68 | desc = irq_desc + irq; | ||
69 | spin_lock_irqsave(&desc->lock, flags); | 68 | spin_lock_irqsave(&desc->lock, flags); |
70 | if (desc->action) { | 69 | if (desc->action) { |
71 | spin_unlock_irqrestore(&desc->lock, flags); | 70 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -78,6 +77,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
78 | desc->chip_data = NULL; | 77 | desc->chip_data = NULL; |
79 | desc->handle_irq = handle_bad_irq; | 78 | desc->handle_irq = handle_bad_irq; |
80 | desc->chip = &no_irq_chip; | 79 | desc->chip = &no_irq_chip; |
80 | desc->name = NULL; | ||
81 | spin_unlock_irqrestore(&desc->lock, flags); | 81 | spin_unlock_irqrestore(&desc->lock, flags); |
82 | } | 82 | } |
83 | 83 | ||
@@ -89,10 +89,10 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
89 | */ | 89 | */ |
90 | int set_irq_chip(unsigned int irq, struct irq_chip *chip) | 90 | int set_irq_chip(unsigned int irq, struct irq_chip *chip) |
91 | { | 91 | { |
92 | struct irq_desc *desc; | 92 | struct irq_desc *desc = irq_to_desc(irq); |
93 | unsigned long flags; | 93 | unsigned long flags; |
94 | 94 | ||
95 | if (irq >= NR_IRQS) { | 95 | if (!desc) { |
96 | WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); | 96 | WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); |
97 | return -EINVAL; | 97 | return -EINVAL; |
98 | } | 98 | } |
@@ -100,7 +100,6 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
100 | if (!chip) | 100 | if (!chip) |
101 | chip = &no_irq_chip; | 101 | chip = &no_irq_chip; |
102 | 102 | ||
103 | desc = irq_desc + irq; | ||
104 | spin_lock_irqsave(&desc->lock, flags); | 103 | spin_lock_irqsave(&desc->lock, flags); |
105 | irq_chip_set_defaults(chip); | 104 | irq_chip_set_defaults(chip); |
106 | desc->chip = chip; | 105 | desc->chip = chip; |
@@ -111,27 +110,28 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
111 | EXPORT_SYMBOL(set_irq_chip); | 110 | EXPORT_SYMBOL(set_irq_chip); |
112 | 111 | ||
113 | /** | 112 | /** |
114 | * set_irq_type - set the irq type for an irq | 113 | * set_irq_type - set the irq trigger type for an irq |
115 | * @irq: irq number | 114 | * @irq: irq number |
116 | * @type: interrupt type - see include/linux/interrupt.h | 115 | * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h |
117 | */ | 116 | */ |
118 | int set_irq_type(unsigned int irq, unsigned int type) | 117 | int set_irq_type(unsigned int irq, unsigned int type) |
119 | { | 118 | { |
120 | struct irq_desc *desc; | 119 | struct irq_desc *desc = irq_to_desc(irq); |
121 | unsigned long flags; | 120 | unsigned long flags; |
122 | int ret = -ENXIO; | 121 | int ret = -ENXIO; |
123 | 122 | ||
124 | if (irq >= NR_IRQS) { | 123 | if (!desc) { |
125 | printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); | 124 | printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); |
126 | return -ENODEV; | 125 | return -ENODEV; |
127 | } | 126 | } |
128 | 127 | ||
129 | desc = irq_desc + irq; | 128 | type &= IRQ_TYPE_SENSE_MASK; |
130 | if (desc->chip->set_type) { | 129 | if (type == IRQ_TYPE_NONE) |
131 | spin_lock_irqsave(&desc->lock, flags); | 130 | return 0; |
132 | ret = desc->chip->set_type(irq, type); | 131 | |
133 | spin_unlock_irqrestore(&desc->lock, flags); | 132 | spin_lock_irqsave(&desc->lock, flags); |
134 | } | 133 | ret = __irq_set_trigger(desc, irq, type); |
134 | spin_unlock_irqrestore(&desc->lock, flags); | ||
135 | return ret; | 135 | return ret; |
136 | } | 136 | } |
137 | EXPORT_SYMBOL(set_irq_type); | 137 | EXPORT_SYMBOL(set_irq_type); |
@@ -145,16 +145,15 @@ EXPORT_SYMBOL(set_irq_type); | |||
145 | */ | 145 | */ |
146 | int set_irq_data(unsigned int irq, void *data) | 146 | int set_irq_data(unsigned int irq, void *data) |
147 | { | 147 | { |
148 | struct irq_desc *desc; | 148 | struct irq_desc *desc = irq_to_desc(irq); |
149 | unsigned long flags; | 149 | unsigned long flags; |
150 | 150 | ||
151 | if (irq >= NR_IRQS) { | 151 | if (!desc) { |
152 | printk(KERN_ERR | 152 | printk(KERN_ERR |
153 | "Trying to install controller data for IRQ%d\n", irq); | 153 | "Trying to install controller data for IRQ%d\n", irq); |
154 | return -EINVAL; | 154 | return -EINVAL; |
155 | } | 155 | } |
156 | 156 | ||
157 | desc = irq_desc + irq; | ||
158 | spin_lock_irqsave(&desc->lock, flags); | 157 | spin_lock_irqsave(&desc->lock, flags); |
159 | desc->handler_data = data; | 158 | desc->handler_data = data; |
160 | spin_unlock_irqrestore(&desc->lock, flags); | 159 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -171,15 +170,15 @@ EXPORT_SYMBOL(set_irq_data); | |||
171 | */ | 170 | */ |
172 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) | 171 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) |
173 | { | 172 | { |
174 | struct irq_desc *desc; | 173 | struct irq_desc *desc = irq_to_desc(irq); |
175 | unsigned long flags; | 174 | unsigned long flags; |
176 | 175 | ||
177 | if (irq >= NR_IRQS) { | 176 | if (!desc) { |
178 | printk(KERN_ERR | 177 | printk(KERN_ERR |
179 | "Trying to install msi data for IRQ%d\n", irq); | 178 | "Trying to install msi data for IRQ%d\n", irq); |
180 | return -EINVAL; | 179 | return -EINVAL; |
181 | } | 180 | } |
182 | desc = irq_desc + irq; | 181 | |
183 | spin_lock_irqsave(&desc->lock, flags); | 182 | spin_lock_irqsave(&desc->lock, flags); |
184 | desc->msi_desc = entry; | 183 | desc->msi_desc = entry; |
185 | if (entry) | 184 | if (entry) |
@@ -197,10 +196,16 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) | |||
197 | */ | 196 | */ |
198 | int set_irq_chip_data(unsigned int irq, void *data) | 197 | int set_irq_chip_data(unsigned int irq, void *data) |
199 | { | 198 | { |
200 | struct irq_desc *desc = irq_desc + irq; | 199 | struct irq_desc *desc = irq_to_desc(irq); |
201 | unsigned long flags; | 200 | unsigned long flags; |
202 | 201 | ||
203 | if (irq >= NR_IRQS || !desc->chip) { | 202 | if (!desc) { |
203 | printk(KERN_ERR | ||
204 | "Trying to install chip data for IRQ%d\n", irq); | ||
205 | return -EINVAL; | ||
206 | } | ||
207 | |||
208 | if (!desc->chip) { | ||
204 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); | 209 | printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); |
205 | return -EINVAL; | 210 | return -EINVAL; |
206 | } | 211 | } |
@@ -218,7 +223,7 @@ EXPORT_SYMBOL(set_irq_chip_data); | |||
218 | */ | 223 | */ |
219 | static void default_enable(unsigned int irq) | 224 | static void default_enable(unsigned int irq) |
220 | { | 225 | { |
221 | struct irq_desc *desc = irq_desc + irq; | 226 | struct irq_desc *desc = irq_to_desc(irq); |
222 | 227 | ||
223 | desc->chip->unmask(irq); | 228 | desc->chip->unmask(irq); |
224 | desc->status &= ~IRQ_MASKED; | 229 | desc->status &= ~IRQ_MASKED; |
@@ -236,8 +241,9 @@ static void default_disable(unsigned int irq) | |||
236 | */ | 241 | */ |
237 | static unsigned int default_startup(unsigned int irq) | 242 | static unsigned int default_startup(unsigned int irq) |
238 | { | 243 | { |
239 | irq_desc[irq].chip->enable(irq); | 244 | struct irq_desc *desc = irq_to_desc(irq); |
240 | 245 | ||
246 | desc->chip->enable(irq); | ||
241 | return 0; | 247 | return 0; |
242 | } | 248 | } |
243 | 249 | ||
@@ -246,7 +252,7 @@ static unsigned int default_startup(unsigned int irq) | |||
246 | */ | 252 | */ |
247 | static void default_shutdown(unsigned int irq) | 253 | static void default_shutdown(unsigned int irq) |
248 | { | 254 | { |
249 | struct irq_desc *desc = irq_desc + irq; | 255 | struct irq_desc *desc = irq_to_desc(irq); |
250 | 256 | ||
251 | desc->chip->mask(irq); | 257 | desc->chip->mask(irq); |
252 | desc->status |= IRQ_MASKED; | 258 | desc->status |= IRQ_MASKED; |
@@ -305,14 +311,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
305 | { | 311 | { |
306 | struct irqaction *action; | 312 | struct irqaction *action; |
307 | irqreturn_t action_ret; | 313 | irqreturn_t action_ret; |
308 | const unsigned int cpu = smp_processor_id(); | ||
309 | 314 | ||
310 | spin_lock(&desc->lock); | 315 | spin_lock(&desc->lock); |
311 | 316 | ||
312 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 317 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
313 | goto out_unlock; | 318 | goto out_unlock; |
314 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 319 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
315 | kstat_cpu(cpu).irqs[irq]++; | 320 | kstat_incr_irqs_this_cpu(irq, desc); |
316 | 321 | ||
317 | action = desc->action; | 322 | action = desc->action; |
318 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | 323 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) |
@@ -344,17 +349,17 @@ out_unlock: | |||
344 | void | 349 | void |
345 | handle_level_irq(unsigned int irq, struct irq_desc *desc) | 350 | handle_level_irq(unsigned int irq, struct irq_desc *desc) |
346 | { | 351 | { |
347 | unsigned int cpu = smp_processor_id(); | ||
348 | struct irqaction *action; | 352 | struct irqaction *action; |
349 | irqreturn_t action_ret; | 353 | irqreturn_t action_ret; |
350 | 354 | ||
351 | spin_lock(&desc->lock); | 355 | spin_lock(&desc->lock); |
352 | mask_ack_irq(desc, irq); | 356 | mask_ack_irq(desc, irq); |
357 | desc = irq_remap_to_desc(irq, desc); | ||
353 | 358 | ||
354 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 359 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
355 | goto out_unlock; | 360 | goto out_unlock; |
356 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 361 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
357 | kstat_cpu(cpu).irqs[irq]++; | 362 | kstat_incr_irqs_this_cpu(irq, desc); |
358 | 363 | ||
359 | /* | 364 | /* |
360 | * If its disabled or no action available | 365 | * If its disabled or no action available |
@@ -392,7 +397,6 @@ out_unlock: | |||
392 | void | 397 | void |
393 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | 398 | handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) |
394 | { | 399 | { |
395 | unsigned int cpu = smp_processor_id(); | ||
396 | struct irqaction *action; | 400 | struct irqaction *action; |
397 | irqreturn_t action_ret; | 401 | irqreturn_t action_ret; |
398 | 402 | ||
@@ -402,7 +406,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
402 | goto out; | 406 | goto out; |
403 | 407 | ||
404 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 408 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
405 | kstat_cpu(cpu).irqs[irq]++; | 409 | kstat_incr_irqs_this_cpu(irq, desc); |
406 | 410 | ||
407 | /* | 411 | /* |
408 | * If its disabled or no action available | 412 | * If its disabled or no action available |
@@ -428,6 +432,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
428 | desc->status &= ~IRQ_INPROGRESS; | 432 | desc->status &= ~IRQ_INPROGRESS; |
429 | out: | 433 | out: |
430 | desc->chip->eoi(irq); | 434 | desc->chip->eoi(irq); |
435 | desc = irq_remap_to_desc(irq, desc); | ||
431 | 436 | ||
432 | spin_unlock(&desc->lock); | 437 | spin_unlock(&desc->lock); |
433 | } | 438 | } |
@@ -451,8 +456,6 @@ out: | |||
451 | void | 456 | void |
452 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) | 457 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
453 | { | 458 | { |
454 | const unsigned int cpu = smp_processor_id(); | ||
455 | |||
456 | spin_lock(&desc->lock); | 459 | spin_lock(&desc->lock); |
457 | 460 | ||
458 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 461 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
@@ -466,13 +469,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
466 | !desc->action)) { | 469 | !desc->action)) { |
467 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 470 | desc->status |= (IRQ_PENDING | IRQ_MASKED); |
468 | mask_ack_irq(desc, irq); | 471 | mask_ack_irq(desc, irq); |
472 | desc = irq_remap_to_desc(irq, desc); | ||
469 | goto out_unlock; | 473 | goto out_unlock; |
470 | } | 474 | } |
471 | 475 | kstat_incr_irqs_this_cpu(irq, desc); | |
472 | kstat_cpu(cpu).irqs[irq]++; | ||
473 | 476 | ||
474 | /* Start handling the irq */ | 477 | /* Start handling the irq */ |
475 | desc->chip->ack(irq); | 478 | desc->chip->ack(irq); |
479 | desc = irq_remap_to_desc(irq, desc); | ||
476 | 480 | ||
477 | /* Mark the IRQ currently in progress.*/ | 481 | /* Mark the IRQ currently in progress.*/ |
478 | desc->status |= IRQ_INPROGRESS; | 482 | desc->status |= IRQ_INPROGRESS; |
@@ -524,7 +528,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
524 | { | 528 | { |
525 | irqreturn_t action_ret; | 529 | irqreturn_t action_ret; |
526 | 530 | ||
527 | kstat_this_cpu.irqs[irq]++; | 531 | kstat_incr_irqs_this_cpu(irq, desc); |
528 | 532 | ||
529 | if (desc->chip->ack) | 533 | if (desc->chip->ack) |
530 | desc->chip->ack(irq); | 534 | desc->chip->ack(irq); |
@@ -533,25 +537,25 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
533 | if (!noirqdebug) | 537 | if (!noirqdebug) |
534 | note_interrupt(irq, desc, action_ret); | 538 | note_interrupt(irq, desc, action_ret); |
535 | 539 | ||
536 | if (desc->chip->eoi) | 540 | if (desc->chip->eoi) { |
537 | desc->chip->eoi(irq); | 541 | desc->chip->eoi(irq); |
542 | desc = irq_remap_to_desc(irq, desc); | ||
543 | } | ||
538 | } | 544 | } |
539 | 545 | ||
540 | void | 546 | void |
541 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 547 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
542 | const char *name) | 548 | const char *name) |
543 | { | 549 | { |
544 | struct irq_desc *desc; | 550 | struct irq_desc *desc = irq_to_desc(irq); |
545 | unsigned long flags; | 551 | unsigned long flags; |
546 | 552 | ||
547 | if (irq >= NR_IRQS) { | 553 | if (!desc) { |
548 | printk(KERN_ERR | 554 | printk(KERN_ERR |
549 | "Trying to install type control for IRQ%d\n", irq); | 555 | "Trying to install type control for IRQ%d\n", irq); |
550 | return; | 556 | return; |
551 | } | 557 | } |
552 | 558 | ||
553 | desc = irq_desc + irq; | ||
554 | |||
555 | if (!handle) | 559 | if (!handle) |
556 | handle = handle_bad_irq; | 560 | handle = handle_bad_irq; |
557 | else if (desc->chip == &no_irq_chip) { | 561 | else if (desc->chip == &no_irq_chip) { |
@@ -571,8 +575,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
571 | 575 | ||
572 | /* Uninstall? */ | 576 | /* Uninstall? */ |
573 | if (handle == handle_bad_irq) { | 577 | if (handle == handle_bad_irq) { |
574 | if (desc->chip != &no_irq_chip) | 578 | if (desc->chip != &no_irq_chip) { |
575 | mask_ack_irq(desc, irq); | 579 | mask_ack_irq(desc, irq); |
580 | desc = irq_remap_to_desc(irq, desc); | ||
581 | } | ||
576 | desc->status |= IRQ_DISABLED; | 582 | desc->status |= IRQ_DISABLED; |
577 | desc->depth = 1; | 583 | desc->depth = 1; |
578 | } | 584 | } |
@@ -583,7 +589,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
583 | desc->status &= ~IRQ_DISABLED; | 589 | desc->status &= ~IRQ_DISABLED; |
584 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; | 590 | desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; |
585 | desc->depth = 0; | 591 | desc->depth = 0; |
586 | desc->chip->unmask(irq); | 592 | desc->chip->startup(irq); |
587 | } | 593 | } |
588 | spin_unlock_irqrestore(&desc->lock, flags); | 594 | spin_unlock_irqrestore(&desc->lock, flags); |
589 | } | 595 | } |
@@ -606,17 +612,14 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
606 | 612 | ||
607 | void __init set_irq_noprobe(unsigned int irq) | 613 | void __init set_irq_noprobe(unsigned int irq) |
608 | { | 614 | { |
609 | struct irq_desc *desc; | 615 | struct irq_desc *desc = irq_to_desc(irq); |
610 | unsigned long flags; | 616 | unsigned long flags; |
611 | 617 | ||
612 | if (irq >= NR_IRQS) { | 618 | if (!desc) { |
613 | printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); | 619 | printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); |
614 | |||
615 | return; | 620 | return; |
616 | } | 621 | } |
617 | 622 | ||
618 | desc = irq_desc + irq; | ||
619 | |||
620 | spin_lock_irqsave(&desc->lock, flags); | 623 | spin_lock_irqsave(&desc->lock, flags); |
621 | desc->status |= IRQ_NOPROBE; | 624 | desc->status |= IRQ_NOPROBE; |
622 | spin_unlock_irqrestore(&desc->lock, flags); | 625 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -624,17 +627,14 @@ void __init set_irq_noprobe(unsigned int irq) | |||
624 | 627 | ||
625 | void __init set_irq_probe(unsigned int irq) | 628 | void __init set_irq_probe(unsigned int irq) |
626 | { | 629 | { |
627 | struct irq_desc *desc; | 630 | struct irq_desc *desc = irq_to_desc(irq); |
628 | unsigned long flags; | 631 | unsigned long flags; |
629 | 632 | ||
630 | if (irq >= NR_IRQS) { | 633 | if (!desc) { |
631 | printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); | 634 | printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); |
632 | |||
633 | return; | 635 | return; |
634 | } | 636 | } |
635 | 637 | ||
636 | desc = irq_desc + irq; | ||
637 | |||
638 | spin_lock_irqsave(&desc->lock, flags); | 638 | spin_lock_irqsave(&desc->lock, flags); |
639 | desc->status &= ~IRQ_NOPROBE; | 639 | desc->status &= ~IRQ_NOPROBE; |
640 | spin_unlock_irqrestore(&desc->lock, flags); | 640 | spin_unlock_irqrestore(&desc->lock, flags); |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 5fa6198e9139..6492400cb50d 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -15,9 +15,16 @@ | |||
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | ||
19 | #include <linux/hash.h> | ||
18 | 20 | ||
19 | #include "internals.h" | 21 | #include "internals.h" |
20 | 22 | ||
23 | /* | ||
24 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
25 | */ | ||
26 | struct lock_class_key irq_desc_lock_class; | ||
27 | |||
21 | /** | 28 | /** |
22 | * handle_bad_irq - handle spurious and unhandled irqs | 29 | * handle_bad_irq - handle spurious and unhandled irqs |
23 | * @irq: the interrupt number | 30 | * @irq: the interrupt number |
@@ -25,11 +32,10 @@ | |||
25 | * | 32 | * |
26 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. | 33 | * Handles spurious and unhandled IRQ's. It also prints a debugmessage. |
27 | */ | 34 | */ |
28 | void | 35 | void handle_bad_irq(unsigned int irq, struct irq_desc *desc) |
29 | handle_bad_irq(unsigned int irq, struct irq_desc *desc) | ||
30 | { | 36 | { |
31 | print_irq_desc(irq, desc); | 37 | print_irq_desc(irq, desc); |
32 | kstat_this_cpu.irqs[irq]++; | 38 | kstat_incr_irqs_this_cpu(irq, desc); |
33 | ack_bad_irq(irq); | 39 | ack_bad_irq(irq); |
34 | } | 40 | } |
35 | 41 | ||
@@ -47,6 +53,158 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
47 | * | 53 | * |
48 | * Controller mappings for all interrupt sources: | 54 | * Controller mappings for all interrupt sources: |
49 | */ | 55 | */ |
56 | int nr_irqs = NR_IRQS; | ||
57 | EXPORT_SYMBOL_GPL(nr_irqs); | ||
58 | |||
59 | void __init __attribute__((weak)) arch_early_irq_init(void) | ||
60 | { | ||
61 | } | ||
62 | |||
63 | #ifdef CONFIG_SPARSE_IRQ | ||
64 | static struct irq_desc irq_desc_init = { | ||
65 | .irq = -1, | ||
66 | .status = IRQ_DISABLED, | ||
67 | .chip = &no_irq_chip, | ||
68 | .handle_irq = handle_bad_irq, | ||
69 | .depth = 1, | ||
70 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
71 | #ifdef CONFIG_SMP | ||
72 | .affinity = CPU_MASK_ALL | ||
73 | #endif | ||
74 | }; | ||
75 | |||
76 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | ||
77 | { | ||
78 | unsigned long bytes; | ||
79 | char *ptr; | ||
80 | int node; | ||
81 | |||
82 | /* Compute how many bytes we need per irq and allocate them */ | ||
83 | bytes = nr * sizeof(unsigned int); | ||
84 | |||
85 | node = cpu_to_node(cpu); | ||
86 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | ||
87 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | ||
88 | |||
89 | if (ptr) | ||
90 | desc->kstat_irqs = (unsigned int *)ptr; | ||
91 | } | ||
92 | |||
93 | void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) | ||
94 | { | ||
95 | } | ||
96 | |||
97 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | ||
98 | { | ||
99 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | ||
100 | desc->irq = irq; | ||
101 | #ifdef CONFIG_SMP | ||
102 | desc->cpu = cpu; | ||
103 | #endif | ||
104 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
105 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | ||
106 | if (!desc->kstat_irqs) { | ||
107 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | ||
108 | BUG_ON(1); | ||
109 | } | ||
110 | arch_init_chip_data(desc, cpu); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Protect the sparse_irqs: | ||
115 | */ | ||
116 | DEFINE_SPINLOCK(sparse_irq_lock); | ||
117 | |||
118 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | ||
119 | |||
120 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | ||
121 | [0 ... NR_IRQS_LEGACY-1] = { | ||
122 | .irq = -1, | ||
123 | .status = IRQ_DISABLED, | ||
124 | .chip = &no_irq_chip, | ||
125 | .handle_irq = handle_bad_irq, | ||
126 | .depth = 1, | ||
127 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
128 | #ifdef CONFIG_SMP | ||
129 | .affinity = CPU_MASK_ALL | ||
130 | #endif | ||
131 | } | ||
132 | }; | ||
133 | |||
134 | /* FIXME: use bootmem alloc ...*/ | ||
135 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
136 | |||
137 | void __init early_irq_init(void) | ||
138 | { | ||
139 | struct irq_desc *desc; | ||
140 | int legacy_count; | ||
141 | int i; | ||
142 | |||
143 | desc = irq_desc_legacy; | ||
144 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | ||
145 | |||
146 | for (i = 0; i < legacy_count; i++) { | ||
147 | desc[i].irq = i; | ||
148 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | ||
149 | |||
150 | irq_desc_ptrs[i] = desc + i; | ||
151 | } | ||
152 | |||
153 | for (i = legacy_count; i < NR_IRQS; i++) | ||
154 | irq_desc_ptrs[i] = NULL; | ||
155 | |||
156 | arch_early_irq_init(); | ||
157 | } | ||
158 | |||
159 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
160 | { | ||
161 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | ||
162 | } | ||
163 | |||
164 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
165 | { | ||
166 | struct irq_desc *desc; | ||
167 | unsigned long flags; | ||
168 | int node; | ||
169 | |||
170 | if (irq >= NR_IRQS) { | ||
171 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | ||
172 | irq, NR_IRQS); | ||
173 | WARN_ON(1); | ||
174 | return NULL; | ||
175 | } | ||
176 | |||
177 | desc = irq_desc_ptrs[irq]; | ||
178 | if (desc) | ||
179 | return desc; | ||
180 | |||
181 | spin_lock_irqsave(&sparse_irq_lock, flags); | ||
182 | |||
183 | /* We have to check it to avoid races with another CPU */ | ||
184 | desc = irq_desc_ptrs[irq]; | ||
185 | if (desc) | ||
186 | goto out_unlock; | ||
187 | |||
188 | node = cpu_to_node(cpu); | ||
189 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
190 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | ||
191 | irq, cpu, node); | ||
192 | if (!desc) { | ||
193 | printk(KERN_ERR "can not alloc irq_desc\n"); | ||
194 | BUG_ON(1); | ||
195 | } | ||
196 | init_one_irq_desc(irq, desc, cpu); | ||
197 | |||
198 | irq_desc_ptrs[irq] = desc; | ||
199 | |||
200 | out_unlock: | ||
201 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
202 | |||
203 | return desc; | ||
204 | } | ||
205 | |||
206 | #else | ||
207 | |||
50 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 208 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
51 | [0 ... NR_IRQS-1] = { | 209 | [0 ... NR_IRQS-1] = { |
52 | .status = IRQ_DISABLED, | 210 | .status = IRQ_DISABLED, |
@@ -60,13 +218,17 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
60 | } | 218 | } |
61 | }; | 219 | }; |
62 | 220 | ||
221 | #endif | ||
222 | |||
63 | /* | 223 | /* |
64 | * What should we do if we get a hw irq event on an illegal vector? | 224 | * What should we do if we get a hw irq event on an illegal vector? |
65 | * Each architecture has to answer this themself. | 225 | * Each architecture has to answer this themself. |
66 | */ | 226 | */ |
67 | static void ack_bad(unsigned int irq) | 227 | static void ack_bad(unsigned int irq) |
68 | { | 228 | { |
69 | print_irq_desc(irq, irq_desc + irq); | 229 | struct irq_desc *desc = irq_to_desc(irq); |
230 | |||
231 | print_irq_desc(irq, desc); | ||
70 | ack_bad_irq(irq); | 232 | ack_bad_irq(irq); |
71 | } | 233 | } |
72 | 234 | ||
@@ -131,8 +293,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
131 | irqreturn_t ret, retval = IRQ_NONE; | 293 | irqreturn_t ret, retval = IRQ_NONE; |
132 | unsigned int status = 0; | 294 | unsigned int status = 0; |
133 | 295 | ||
134 | handle_dynamic_tick(action); | ||
135 | |||
136 | if (!(action->flags & IRQF_DISABLED)) | 296 | if (!(action->flags & IRQF_DISABLED)) |
137 | local_irq_enable_in_hardirq(); | 297 | local_irq_enable_in_hardirq(); |
138 | 298 | ||
@@ -165,19 +325,23 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
165 | */ | 325 | */ |
166 | unsigned int __do_IRQ(unsigned int irq) | 326 | unsigned int __do_IRQ(unsigned int irq) |
167 | { | 327 | { |
168 | struct irq_desc *desc = irq_desc + irq; | 328 | struct irq_desc *desc = irq_to_desc(irq); |
169 | struct irqaction *action; | 329 | struct irqaction *action; |
170 | unsigned int status; | 330 | unsigned int status; |
171 | 331 | ||
172 | kstat_this_cpu.irqs[irq]++; | 332 | kstat_incr_irqs_this_cpu(irq, desc); |
333 | |||
173 | if (CHECK_IRQ_PER_CPU(desc->status)) { | 334 | if (CHECK_IRQ_PER_CPU(desc->status)) { |
174 | irqreturn_t action_ret; | 335 | irqreturn_t action_ret; |
175 | 336 | ||
176 | /* | 337 | /* |
177 | * No locking required for CPU-local interrupts: | 338 | * No locking required for CPU-local interrupts: |
178 | */ | 339 | */ |
179 | if (desc->chip->ack) | 340 | if (desc->chip->ack) { |
180 | desc->chip->ack(irq); | 341 | desc->chip->ack(irq); |
342 | /* get new one */ | ||
343 | desc = irq_remap_to_desc(irq, desc); | ||
344 | } | ||
181 | if (likely(!(desc->status & IRQ_DISABLED))) { | 345 | if (likely(!(desc->status & IRQ_DISABLED))) { |
182 | action_ret = handle_IRQ_event(irq, desc->action); | 346 | action_ret = handle_IRQ_event(irq, desc->action); |
183 | if (!noirqdebug) | 347 | if (!noirqdebug) |
@@ -188,8 +352,10 @@ unsigned int __do_IRQ(unsigned int irq) | |||
188 | } | 352 | } |
189 | 353 | ||
190 | spin_lock(&desc->lock); | 354 | spin_lock(&desc->lock); |
191 | if (desc->chip->ack) | 355 | if (desc->chip->ack) { |
192 | desc->chip->ack(irq); | 356 | desc->chip->ack(irq); |
357 | desc = irq_remap_to_desc(irq, desc); | ||
358 | } | ||
193 | /* | 359 | /* |
194 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 360 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
195 | * WAITING is used by probe to mark irqs that are being tested | 361 | * WAITING is used by probe to mark irqs that are being tested |
@@ -256,19 +422,25 @@ out: | |||
256 | } | 422 | } |
257 | #endif | 423 | #endif |
258 | 424 | ||
259 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
260 | |||
261 | /* | ||
262 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
263 | */ | ||
264 | static struct lock_class_key irq_desc_lock_class; | ||
265 | |||
266 | void early_init_irq_lock_class(void) | 425 | void early_init_irq_lock_class(void) |
267 | { | 426 | { |
427 | struct irq_desc *desc; | ||
268 | int i; | 428 | int i; |
269 | 429 | ||
270 | for (i = 0; i < NR_IRQS; i++) | 430 | for_each_irq_desc(i, desc) { |
271 | lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); | 431 | if (!desc) |
432 | continue; | ||
433 | |||
434 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
435 | } | ||
272 | } | 436 | } |
273 | 437 | ||
438 | #ifdef CONFIG_SPARSE_IRQ | ||
439 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | ||
440 | { | ||
441 | struct irq_desc *desc = irq_to_desc(irq); | ||
442 | return desc->kstat_irqs[cpu]; | ||
443 | } | ||
274 | #endif | 444 | #endif |
445 | EXPORT_SYMBOL(kstat_irqs_cpu); | ||
446 | |||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 08a849a22447..e6d0a43cc125 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -10,18 +10,28 @@ extern void irq_chip_set_defaults(struct irq_chip *chip); | |||
10 | /* Set default handler: */ | 10 | /* Set default handler: */ |
11 | extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); | 11 | extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); |
12 | 12 | ||
13 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | ||
14 | unsigned long flags); | ||
15 | |||
16 | extern struct lock_class_key irq_desc_lock_class; | ||
17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | ||
18 | extern spinlock_t sparse_irq_lock; | ||
19 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; | ||
20 | |||
13 | #ifdef CONFIG_PROC_FS | 21 | #ifdef CONFIG_PROC_FS |
14 | extern void register_irq_proc(unsigned int irq); | 22 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
15 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); | 23 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
16 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); | 24 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); |
17 | #else | 25 | #else |
18 | static inline void register_irq_proc(unsigned int irq) { } | 26 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
19 | static inline void register_handler_proc(unsigned int irq, | 27 | static inline void register_handler_proc(unsigned int irq, |
20 | struct irqaction *action) { } | 28 | struct irqaction *action) { } |
21 | static inline void unregister_handler_proc(unsigned int irq, | 29 | static inline void unregister_handler_proc(unsigned int irq, |
22 | struct irqaction *action) { } | 30 | struct irqaction *action) { } |
23 | #endif | 31 | #endif |
24 | 32 | ||
33 | extern int irq_select_affinity_usr(unsigned int irq); | ||
34 | |||
25 | /* | 35 | /* |
26 | * Debugging printout: | 36 | * Debugging printout: |
27 | */ | 37 | */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 60c49e324390..540f6c49f3fa 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -31,10 +31,10 @@ cpumask_t irq_default_affinity = CPU_MASK_ALL; | |||
31 | */ | 31 | */ |
32 | void synchronize_irq(unsigned int irq) | 32 | void synchronize_irq(unsigned int irq) |
33 | { | 33 | { |
34 | struct irq_desc *desc = irq_desc + irq; | 34 | struct irq_desc *desc = irq_to_desc(irq); |
35 | unsigned int status; | 35 | unsigned int status; |
36 | 36 | ||
37 | if (irq >= NR_IRQS) | 37 | if (!desc) |
38 | return; | 38 | return; |
39 | 39 | ||
40 | do { | 40 | do { |
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(synchronize_irq); | |||
64 | */ | 64 | */ |
65 | int irq_can_set_affinity(unsigned int irq) | 65 | int irq_can_set_affinity(unsigned int irq) |
66 | { | 66 | { |
67 | struct irq_desc *desc = irq_desc + irq; | 67 | struct irq_desc *desc = irq_to_desc(irq); |
68 | 68 | ||
69 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 69 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || |
70 | !desc->chip->set_affinity) | 70 | !desc->chip->set_affinity) |
@@ -81,26 +81,28 @@ int irq_can_set_affinity(unsigned int irq) | |||
81 | */ | 81 | */ |
82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) |
83 | { | 83 | { |
84 | struct irq_desc *desc = irq_desc + irq; | 84 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; | ||
85 | 86 | ||
86 | if (!desc->chip->set_affinity) | 87 | if (!desc->chip->set_affinity) |
87 | return -EINVAL; | 88 | return -EINVAL; |
88 | 89 | ||
89 | set_balance_irq_affinity(irq, cpumask); | 90 | spin_lock_irqsave(&desc->lock, flags); |
90 | 91 | ||
91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 92 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
92 | if (desc->status & IRQ_MOVE_PCNTXT) { | 93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
93 | unsigned long flags; | 94 | desc->affinity = cpumask; |
94 | |||
95 | spin_lock_irqsave(&desc->lock, flags); | ||
96 | desc->chip->set_affinity(irq, cpumask); | 95 | desc->chip->set_affinity(irq, cpumask); |
97 | spin_unlock_irqrestore(&desc->lock, flags); | 96 | } else { |
98 | } else | 97 | desc->status |= IRQ_MOVE_PENDING; |
99 | set_pending_irq(irq, cpumask); | 98 | desc->pending_mask = cpumask; |
99 | } | ||
100 | #else | 100 | #else |
101 | desc->affinity = cpumask; | 101 | desc->affinity = cpumask; |
102 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
103 | #endif | 103 | #endif |
104 | desc->status |= IRQ_AFFINITY_SET; | ||
105 | spin_unlock_irqrestore(&desc->lock, flags); | ||
104 | return 0; | 106 | return 0; |
105 | } | 107 | } |
106 | 108 | ||
@@ -108,7 +110,7 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
108 | /* | 110 | /* |
109 | * Generic version of the affinity autoselector. | 111 | * Generic version of the affinity autoselector. |
110 | */ | 112 | */ |
111 | int irq_select_affinity(unsigned int irq) | 113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
112 | { | 114 | { |
113 | cpumask_t mask; | 115 | cpumask_t mask; |
114 | 116 | ||
@@ -117,14 +119,50 @@ int irq_select_affinity(unsigned int irq) | |||
117 | 119 | ||
118 | cpus_and(mask, cpu_online_map, irq_default_affinity); | 120 | cpus_and(mask, cpu_online_map, irq_default_affinity); |
119 | 121 | ||
120 | irq_desc[irq].affinity = mask; | 122 | /* |
121 | irq_desc[irq].chip->set_affinity(irq, mask); | 123 | * Preserve an userspace affinity setup, but make sure that |
124 | * one of the targets is online. | ||
125 | */ | ||
126 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | ||
127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | ||
128 | mask = desc->affinity; | ||
129 | else | ||
130 | desc->status &= ~IRQ_AFFINITY_SET; | ||
131 | } | ||
132 | |||
133 | desc->affinity = mask; | ||
134 | desc->chip->set_affinity(irq, mask); | ||
122 | 135 | ||
123 | set_balance_irq_affinity(irq, mask); | ||
124 | return 0; | 136 | return 0; |
125 | } | 137 | } |
138 | #else | ||
139 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | ||
140 | { | ||
141 | return irq_select_affinity(irq); | ||
142 | } | ||
126 | #endif | 143 | #endif |
127 | 144 | ||
145 | /* | ||
146 | * Called when affinity is set via /proc/irq | ||
147 | */ | ||
148 | int irq_select_affinity_usr(unsigned int irq) | ||
149 | { | ||
150 | struct irq_desc *desc = irq_to_desc(irq); | ||
151 | unsigned long flags; | ||
152 | int ret; | ||
153 | |||
154 | spin_lock_irqsave(&desc->lock, flags); | ||
155 | ret = do_irq_select_affinity(irq, desc); | ||
156 | spin_unlock_irqrestore(&desc->lock, flags); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | #else | ||
162 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | ||
163 | { | ||
164 | return 0; | ||
165 | } | ||
128 | #endif | 166 | #endif |
129 | 167 | ||
130 | /** | 168 | /** |
@@ -140,10 +178,10 @@ int irq_select_affinity(unsigned int irq) | |||
140 | */ | 178 | */ |
141 | void disable_irq_nosync(unsigned int irq) | 179 | void disable_irq_nosync(unsigned int irq) |
142 | { | 180 | { |
143 | struct irq_desc *desc = irq_desc + irq; | 181 | struct irq_desc *desc = irq_to_desc(irq); |
144 | unsigned long flags; | 182 | unsigned long flags; |
145 | 183 | ||
146 | if (irq >= NR_IRQS) | 184 | if (!desc) |
147 | return; | 185 | return; |
148 | 186 | ||
149 | spin_lock_irqsave(&desc->lock, flags); | 187 | spin_lock_irqsave(&desc->lock, flags); |
@@ -169,9 +207,9 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
169 | */ | 207 | */ |
170 | void disable_irq(unsigned int irq) | 208 | void disable_irq(unsigned int irq) |
171 | { | 209 | { |
172 | struct irq_desc *desc = irq_desc + irq; | 210 | struct irq_desc *desc = irq_to_desc(irq); |
173 | 211 | ||
174 | if (irq >= NR_IRQS) | 212 | if (!desc) |
175 | return; | 213 | return; |
176 | 214 | ||
177 | disable_irq_nosync(irq); | 215 | disable_irq_nosync(irq); |
@@ -211,10 +249,10 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq) | |||
211 | */ | 249 | */ |
212 | void enable_irq(unsigned int irq) | 250 | void enable_irq(unsigned int irq) |
213 | { | 251 | { |
214 | struct irq_desc *desc = irq_desc + irq; | 252 | struct irq_desc *desc = irq_to_desc(irq); |
215 | unsigned long flags; | 253 | unsigned long flags; |
216 | 254 | ||
217 | if (irq >= NR_IRQS) | 255 | if (!desc) |
218 | return; | 256 | return; |
219 | 257 | ||
220 | spin_lock_irqsave(&desc->lock, flags); | 258 | spin_lock_irqsave(&desc->lock, flags); |
@@ -223,9 +261,9 @@ void enable_irq(unsigned int irq) | |||
223 | } | 261 | } |
224 | EXPORT_SYMBOL(enable_irq); | 262 | EXPORT_SYMBOL(enable_irq); |
225 | 263 | ||
226 | int set_irq_wake_real(unsigned int irq, unsigned int on) | 264 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
227 | { | 265 | { |
228 | struct irq_desc *desc = irq_desc + irq; | 266 | struct irq_desc *desc = irq_to_desc(irq); |
229 | int ret = -ENXIO; | 267 | int ret = -ENXIO; |
230 | 268 | ||
231 | if (desc->chip->set_wake) | 269 | if (desc->chip->set_wake) |
@@ -248,7 +286,7 @@ int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
248 | */ | 286 | */ |
249 | int set_irq_wake(unsigned int irq, unsigned int on) | 287 | int set_irq_wake(unsigned int irq, unsigned int on) |
250 | { | 288 | { |
251 | struct irq_desc *desc = irq_desc + irq; | 289 | struct irq_desc *desc = irq_to_desc(irq); |
252 | unsigned long flags; | 290 | unsigned long flags; |
253 | int ret = 0; | 291 | int ret = 0; |
254 | 292 | ||
@@ -288,12 +326,16 @@ EXPORT_SYMBOL(set_irq_wake); | |||
288 | */ | 326 | */ |
289 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 327 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
290 | { | 328 | { |
329 | struct irq_desc *desc = irq_to_desc(irq); | ||
291 | struct irqaction *action; | 330 | struct irqaction *action; |
292 | 331 | ||
293 | if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST) | 332 | if (!desc) |
333 | return 0; | ||
334 | |||
335 | if (desc->status & IRQ_NOREQUEST) | ||
294 | return 0; | 336 | return 0; |
295 | 337 | ||
296 | action = irq_desc[irq].action; | 338 | action = desc->action; |
297 | if (action) | 339 | if (action) |
298 | if (irqflags & action->flags & IRQF_SHARED) | 340 | if (irqflags & action->flags & IRQF_SHARED) |
299 | action = NULL; | 341 | action = NULL; |
@@ -312,27 +354,35 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) | |||
312 | desc->handle_irq = NULL; | 354 | desc->handle_irq = NULL; |
313 | } | 355 | } |
314 | 356 | ||
315 | static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | 357 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
316 | unsigned long flags) | 358 | unsigned long flags) |
317 | { | 359 | { |
318 | int ret; | 360 | int ret; |
361 | struct irq_chip *chip = desc->chip; | ||
319 | 362 | ||
320 | if (!chip || !chip->set_type) { | 363 | if (!chip || !chip->set_type) { |
321 | /* | 364 | /* |
322 | * IRQF_TRIGGER_* but the PIC does not support multiple | 365 | * IRQF_TRIGGER_* but the PIC does not support multiple |
323 | * flow-types? | 366 | * flow-types? |
324 | */ | 367 | */ |
325 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | 368 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
326 | chip ? (chip->name ? : "unknown") : "unknown"); | 369 | chip ? (chip->name ? : "unknown") : "unknown"); |
327 | return 0; | 370 | return 0; |
328 | } | 371 | } |
329 | 372 | ||
330 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | 373 | /* caller masked out all except trigger mode flags */ |
374 | ret = chip->set_type(irq, flags); | ||
331 | 375 | ||
332 | if (ret) | 376 | if (ret) |
333 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 377 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", |
334 | (int)(flags & IRQF_TRIGGER_MASK), | 378 | (int)flags, irq, chip->set_type); |
335 | irq, chip->set_type); | 379 | else { |
380 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | ||
381 | flags |= IRQ_LEVEL; | ||
382 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | ||
383 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | ||
384 | desc->status |= flags; | ||
385 | } | ||
336 | 386 | ||
337 | return ret; | 387 | return ret; |
338 | } | 388 | } |
@@ -341,16 +391,16 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | |||
341 | * Internal function to register an irqaction - typically used to | 391 | * Internal function to register an irqaction - typically used to |
342 | * allocate special interrupts that are part of the architecture. | 392 | * allocate special interrupts that are part of the architecture. |
343 | */ | 393 | */ |
344 | int setup_irq(unsigned int irq, struct irqaction *new) | 394 | static int |
395 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | ||
345 | { | 396 | { |
346 | struct irq_desc *desc = irq_desc + irq; | ||
347 | struct irqaction *old, **p; | 397 | struct irqaction *old, **p; |
348 | const char *old_name = NULL; | 398 | const char *old_name = NULL; |
349 | unsigned long flags; | 399 | unsigned long flags; |
350 | int shared = 0; | 400 | int shared = 0; |
351 | int ret; | 401 | int ret; |
352 | 402 | ||
353 | if (irq >= NR_IRQS) | 403 | if (!desc) |
354 | return -EINVAL; | 404 | return -EINVAL; |
355 | 405 | ||
356 | if (desc->chip == &no_irq_chip) | 406 | if (desc->chip == &no_irq_chip) |
@@ -411,7 +461,8 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
411 | 461 | ||
412 | /* Setup the type (level, edge polarity) if configured: */ | 462 | /* Setup the type (level, edge polarity) if configured: */ |
413 | if (new->flags & IRQF_TRIGGER_MASK) { | 463 | if (new->flags & IRQF_TRIGGER_MASK) { |
414 | ret = __irq_set_trigger(desc->chip, irq, new->flags); | 464 | ret = __irq_set_trigger(desc, irq, |
465 | new->flags & IRQF_TRIGGER_MASK); | ||
415 | 466 | ||
416 | if (ret) { | 467 | if (ret) { |
417 | spin_unlock_irqrestore(&desc->lock, flags); | 468 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -430,24 +481,29 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
430 | if (!(desc->status & IRQ_NOAUTOEN)) { | 481 | if (!(desc->status & IRQ_NOAUTOEN)) { |
431 | desc->depth = 0; | 482 | desc->depth = 0; |
432 | desc->status &= ~IRQ_DISABLED; | 483 | desc->status &= ~IRQ_DISABLED; |
433 | if (desc->chip->startup) | 484 | desc->chip->startup(irq); |
434 | desc->chip->startup(irq); | ||
435 | else | ||
436 | desc->chip->enable(irq); | ||
437 | } else | 485 | } else |
438 | /* Undo nested disables: */ | 486 | /* Undo nested disables: */ |
439 | desc->depth = 1; | 487 | desc->depth = 1; |
440 | 488 | ||
489 | /* Exclude IRQ from balancing if requested */ | ||
490 | if (new->flags & IRQF_NOBALANCING) | ||
491 | desc->status |= IRQ_NO_BALANCING; | ||
492 | |||
441 | /* Set default affinity mask once everything is setup */ | 493 | /* Set default affinity mask once everything is setup */ |
442 | irq_select_affinity(irq); | 494 | do_irq_select_affinity(irq, desc); |
495 | |||
496 | } else if ((new->flags & IRQF_TRIGGER_MASK) | ||
497 | && (new->flags & IRQF_TRIGGER_MASK) | ||
498 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | ||
499 | /* hope the handler works with the actual trigger mode... */ | ||
500 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | ||
501 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | ||
502 | (int)(new->flags & IRQF_TRIGGER_MASK)); | ||
443 | } | 503 | } |
444 | 504 | ||
445 | *p = new; | 505 | *p = new; |
446 | 506 | ||
447 | /* Exclude IRQ from balancing */ | ||
448 | if (new->flags & IRQF_NOBALANCING) | ||
449 | desc->status |= IRQ_NO_BALANCING; | ||
450 | |||
451 | /* Reset broken irq detection when installing new handler */ | 507 | /* Reset broken irq detection when installing new handler */ |
452 | desc->irq_count = 0; | 508 | desc->irq_count = 0; |
453 | desc->irqs_unhandled = 0; | 509 | desc->irqs_unhandled = 0; |
@@ -464,7 +520,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
464 | spin_unlock_irqrestore(&desc->lock, flags); | 520 | spin_unlock_irqrestore(&desc->lock, flags); |
465 | 521 | ||
466 | new->irq = irq; | 522 | new->irq = irq; |
467 | register_irq_proc(irq); | 523 | register_irq_proc(irq, desc); |
468 | new->dir = NULL; | 524 | new->dir = NULL; |
469 | register_handler_proc(irq, new); | 525 | register_handler_proc(irq, new); |
470 | 526 | ||
@@ -484,6 +540,20 @@ mismatch: | |||
484 | } | 540 | } |
485 | 541 | ||
486 | /** | 542 | /** |
543 | * setup_irq - setup an interrupt | ||
544 | * @irq: Interrupt line to setup | ||
545 | * @act: irqaction for the interrupt | ||
546 | * | ||
547 | * Used to statically setup interrupts in the early boot process. | ||
548 | */ | ||
549 | int setup_irq(unsigned int irq, struct irqaction *act) | ||
550 | { | ||
551 | struct irq_desc *desc = irq_to_desc(irq); | ||
552 | |||
553 | return __setup_irq(irq, desc, act); | ||
554 | } | ||
555 | |||
556 | /** | ||
487 | * free_irq - free an interrupt | 557 | * free_irq - free an interrupt |
488 | * @irq: Interrupt line to free | 558 | * @irq: Interrupt line to free |
489 | * @dev_id: Device identity to free | 559 | * @dev_id: Device identity to free |
@@ -499,15 +569,15 @@ mismatch: | |||
499 | */ | 569 | */ |
500 | void free_irq(unsigned int irq, void *dev_id) | 570 | void free_irq(unsigned int irq, void *dev_id) |
501 | { | 571 | { |
502 | struct irq_desc *desc; | 572 | struct irq_desc *desc = irq_to_desc(irq); |
503 | struct irqaction **p; | 573 | struct irqaction **p; |
504 | unsigned long flags; | 574 | unsigned long flags; |
505 | 575 | ||
506 | WARN_ON(in_interrupt()); | 576 | WARN_ON(in_interrupt()); |
507 | if (irq >= NR_IRQS) | 577 | |
578 | if (!desc) | ||
508 | return; | 579 | return; |
509 | 580 | ||
510 | desc = irq_desc + irq; | ||
511 | spin_lock_irqsave(&desc->lock, flags); | 581 | spin_lock_irqsave(&desc->lock, flags); |
512 | p = &desc->action; | 582 | p = &desc->action; |
513 | for (;;) { | 583 | for (;;) { |
@@ -596,14 +666,28 @@ EXPORT_SYMBOL(free_irq); | |||
596 | * IRQF_SHARED Interrupt is shared | 666 | * IRQF_SHARED Interrupt is shared |
597 | * IRQF_DISABLED Disable local interrupts while processing | 667 | * IRQF_DISABLED Disable local interrupts while processing |
598 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | 668 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
669 | * IRQF_TRIGGER_* Specify active edge(s) or level | ||
599 | * | 670 | * |
600 | */ | 671 | */ |
601 | int request_irq(unsigned int irq, irq_handler_t handler, | 672 | int request_irq(unsigned int irq, irq_handler_t handler, |
602 | unsigned long irqflags, const char *devname, void *dev_id) | 673 | unsigned long irqflags, const char *devname, void *dev_id) |
603 | { | 674 | { |
604 | struct irqaction *action; | 675 | struct irqaction *action; |
676 | struct irq_desc *desc; | ||
605 | int retval; | 677 | int retval; |
606 | 678 | ||
679 | /* | ||
680 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
681 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
682 | * the behavior is classified as "will not fix" so we need to | ||
683 | * start nudging drivers away from using that idiom. | ||
684 | */ | ||
685 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | ||
686 | == (IRQF_SHARED|IRQF_DISABLED)) | ||
687 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | ||
688 | "guaranteed on shared IRQs\n", | ||
689 | irq, devname); | ||
690 | |||
607 | #ifdef CONFIG_LOCKDEP | 691 | #ifdef CONFIG_LOCKDEP |
608 | /* | 692 | /* |
609 | * Lockdep wants atomic interrupt handlers: | 693 | * Lockdep wants atomic interrupt handlers: |
@@ -618,9 +702,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
618 | */ | 702 | */ |
619 | if ((irqflags & IRQF_SHARED) && !dev_id) | 703 | if ((irqflags & IRQF_SHARED) && !dev_id) |
620 | return -EINVAL; | 704 | return -EINVAL; |
621 | if (irq >= NR_IRQS) | 705 | |
706 | desc = irq_to_desc(irq); | ||
707 | if (!desc) | ||
622 | return -EINVAL; | 708 | return -EINVAL; |
623 | if (irq_desc[irq].status & IRQ_NOREQUEST) | 709 | |
710 | if (desc->status & IRQ_NOREQUEST) | ||
624 | return -EINVAL; | 711 | return -EINVAL; |
625 | if (!handler) | 712 | if (!handler) |
626 | return -EINVAL; | 713 | return -EINVAL; |
@@ -636,26 +723,29 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
636 | action->next = NULL; | 723 | action->next = NULL; |
637 | action->dev_id = dev_id; | 724 | action->dev_id = dev_id; |
638 | 725 | ||
726 | retval = __setup_irq(irq, desc, action); | ||
727 | if (retval) | ||
728 | kfree(action); | ||
729 | |||
639 | #ifdef CONFIG_DEBUG_SHIRQ | 730 | #ifdef CONFIG_DEBUG_SHIRQ |
640 | if (irqflags & IRQF_SHARED) { | 731 | if (irqflags & IRQF_SHARED) { |
641 | /* | 732 | /* |
642 | * It's a shared IRQ -- the driver ought to be prepared for it | 733 | * It's a shared IRQ -- the driver ought to be prepared for it |
643 | * to happen immediately, so let's make sure.... | 734 | * to happen immediately, so let's make sure.... |
644 | * We do this before actually registering it, to make sure that | 735 | * We disable the irq to make sure that a 'real' IRQ doesn't |
645 | * a 'real' IRQ doesn't run in parallel with our fake | 736 | * run in parallel with our fake. |
646 | */ | 737 | */ |
647 | unsigned long flags; | 738 | unsigned long flags; |
648 | 739 | ||
740 | disable_irq(irq); | ||
649 | local_irq_save(flags); | 741 | local_irq_save(flags); |
742 | |||
650 | handler(irq, dev_id); | 743 | handler(irq, dev_id); |
744 | |||
651 | local_irq_restore(flags); | 745 | local_irq_restore(flags); |
746 | enable_irq(irq); | ||
652 | } | 747 | } |
653 | #endif | 748 | #endif |
654 | |||
655 | retval = setup_irq(irq, action); | ||
656 | if (retval) | ||
657 | kfree(action); | ||
658 | |||
659 | return retval; | 749 | return retval; |
660 | } | 750 | } |
661 | EXPORT_SYMBOL(request_irq); | 751 | EXPORT_SYMBOL(request_irq); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 77b7acc875c5..9db681d95814 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -1,20 +1,9 @@ | |||
1 | 1 | ||
2 | #include <linux/irq.h> | 2 | #include <linux/irq.h> |
3 | 3 | ||
4 | void set_pending_irq(unsigned int irq, cpumask_t mask) | ||
5 | { | ||
6 | struct irq_desc *desc = irq_desc + irq; | ||
7 | unsigned long flags; | ||
8 | |||
9 | spin_lock_irqsave(&desc->lock, flags); | ||
10 | desc->status |= IRQ_MOVE_PENDING; | ||
11 | irq_desc[irq].pending_mask = mask; | ||
12 | spin_unlock_irqrestore(&desc->lock, flags); | ||
13 | } | ||
14 | |||
15 | void move_masked_irq(int irq) | 4 | void move_masked_irq(int irq) |
16 | { | 5 | { |
17 | struct irq_desc *desc = irq_desc + irq; | 6 | struct irq_desc *desc = irq_to_desc(irq); |
18 | cpumask_t tmp; | 7 | cpumask_t tmp; |
19 | 8 | ||
20 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 9 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) |
@@ -30,7 +19,7 @@ void move_masked_irq(int irq) | |||
30 | 19 | ||
31 | desc->status &= ~IRQ_MOVE_PENDING; | 20 | desc->status &= ~IRQ_MOVE_PENDING; |
32 | 21 | ||
33 | if (unlikely(cpus_empty(irq_desc[irq].pending_mask))) | 22 | if (unlikely(cpus_empty(desc->pending_mask))) |
34 | return; | 23 | return; |
35 | 24 | ||
36 | if (!desc->chip->set_affinity) | 25 | if (!desc->chip->set_affinity) |
@@ -38,7 +27,7 @@ void move_masked_irq(int irq) | |||
38 | 27 | ||
39 | assert_spin_locked(&desc->lock); | 28 | assert_spin_locked(&desc->lock); |
40 | 29 | ||
41 | cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map); | 30 | cpus_and(tmp, desc->pending_mask, cpu_online_map); |
42 | 31 | ||
43 | /* | 32 | /* |
44 | * If there was a valid mask to work with, please | 33 | * If there was a valid mask to work with, please |
@@ -55,12 +44,12 @@ void move_masked_irq(int irq) | |||
55 | if (likely(!cpus_empty(tmp))) { | 44 | if (likely(!cpus_empty(tmp))) { |
56 | desc->chip->set_affinity(irq,tmp); | 45 | desc->chip->set_affinity(irq,tmp); |
57 | } | 46 | } |
58 | cpus_clear(irq_desc[irq].pending_mask); | 47 | cpus_clear(desc->pending_mask); |
59 | } | 48 | } |
60 | 49 | ||
61 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
62 | { | 51 | { |
63 | struct irq_desc *desc = irq_desc + irq; | 52 | struct irq_desc *desc = irq_to_desc(irq); |
64 | 53 | ||
65 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 54 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) |
66 | return; | 55 | return; |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c new file mode 100644 index 000000000000..089c3746358a --- /dev/null +++ b/kernel/irq/numa_migrate.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * NUMA irq-desc migration code | ||
3 | * | ||
4 | * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to | ||
5 | * the new "home node" of the IRQ. | ||
6 | */ | ||
7 | |||
8 | #include <linux/irq.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/random.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/kernel_stat.h> | ||
13 | |||
14 | #include "internals.h" | ||
15 | |||
16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | ||
17 | struct irq_desc *desc, | ||
18 | int cpu, int nr) | ||
19 | { | ||
20 | unsigned long bytes; | ||
21 | |||
22 | init_kstat_irqs(desc, cpu, nr); | ||
23 | |||
24 | if (desc->kstat_irqs != old_desc->kstat_irqs) { | ||
25 | /* Compute how many bytes we need per irq and allocate them */ | ||
26 | bytes = nr * sizeof(unsigned int); | ||
27 | |||
28 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes); | ||
29 | } | ||
30 | } | ||
31 | |||
32 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | ||
33 | { | ||
34 | if (old_desc->kstat_irqs == desc->kstat_irqs) | ||
35 | return; | ||
36 | |||
37 | kfree(old_desc->kstat_irqs); | ||
38 | old_desc->kstat_irqs = NULL; | ||
39 | } | ||
40 | |||
41 | static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | ||
42 | struct irq_desc *desc, int cpu) | ||
43 | { | ||
44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | ||
45 | desc->cpu = cpu; | ||
46 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
47 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | ||
48 | arch_init_copy_chip_data(old_desc, desc, cpu); | ||
49 | } | ||
50 | |||
51 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | ||
52 | { | ||
53 | free_kstat_irqs(old_desc, desc); | ||
54 | arch_free_chip_data(old_desc, desc); | ||
55 | } | ||
56 | |||
57 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | ||
58 | int cpu) | ||
59 | { | ||
60 | struct irq_desc *desc; | ||
61 | unsigned int irq; | ||
62 | unsigned long flags; | ||
63 | int node; | ||
64 | |||
65 | irq = old_desc->irq; | ||
66 | |||
67 | spin_lock_irqsave(&sparse_irq_lock, flags); | ||
68 | |||
69 | /* We have to check it to avoid races with another CPU */ | ||
70 | desc = irq_desc_ptrs[irq]; | ||
71 | |||
72 | if (desc && old_desc != desc) | ||
73 | goto out_unlock; | ||
74 | |||
75 | node = cpu_to_node(cpu); | ||
76 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
77 | printk(KERN_DEBUG " move irq_desc for %d to cpu %d node %d\n", | ||
78 | irq, cpu, node); | ||
79 | if (!desc) { | ||
80 | printk(KERN_ERR "can not get new irq_desc for moving\n"); | ||
81 | /* still use old one */ | ||
82 | desc = old_desc; | ||
83 | goto out_unlock; | ||
84 | } | ||
85 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | ||
86 | |||
87 | irq_desc_ptrs[irq] = desc; | ||
88 | |||
89 | /* free the old one */ | ||
90 | free_one_irq_desc(old_desc, desc); | ||
91 | kfree(old_desc); | ||
92 | |||
93 | out_unlock: | ||
94 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
95 | |||
96 | return desc; | ||
97 | } | ||
98 | |||
99 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) | ||
100 | { | ||
101 | int old_cpu; | ||
102 | int node, old_node; | ||
103 | |||
104 | /* those all static, do move them */ | ||
105 | if (desc->irq < NR_IRQS_LEGACY) | ||
106 | return desc; | ||
107 | |||
108 | old_cpu = desc->cpu; | ||
109 | printk(KERN_DEBUG | ||
110 | "try to move irq_desc from cpu %d to %d\n", old_cpu, cpu); | ||
111 | if (old_cpu != cpu) { | ||
112 | node = cpu_to_node(cpu); | ||
113 | old_node = cpu_to_node(old_cpu); | ||
114 | if (old_node != node) | ||
115 | desc = __real_move_irq_desc(desc, cpu); | ||
116 | else | ||
117 | desc->cpu = cpu; | ||
118 | } | ||
119 | |||
120 | return desc; | ||
121 | } | ||
122 | |||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index a09dd29c2fd7..f6b3440f05bc 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -19,7 +19,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
19 | 19 | ||
20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
21 | { | 21 | { |
22 | struct irq_desc *desc = irq_desc + (long)m->private; | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
23 | cpumask_t *mask = &desc->affinity; | 23 | cpumask_t *mask = &desc->affinity; |
24 | 24 | ||
25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
@@ -43,7 +43,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
43 | cpumask_t new_value; | 43 | cpumask_t new_value; |
44 | int err; | 44 | int err; |
45 | 45 | ||
46 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || | 46 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || |
47 | irq_balancing_disabled(irq)) | 47 | irq_balancing_disabled(irq)) |
48 | return -EIO; | 48 | return -EIO; |
49 | 49 | ||
@@ -62,7 +62,7 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
62 | if (!cpus_intersects(new_value, cpu_online_map)) | 62 | if (!cpus_intersects(new_value, cpu_online_map)) |
63 | /* Special case for empty set - allow the architecture | 63 | /* Special case for empty set - allow the architecture |
64 | code to set default SMP affinity. */ | 64 | code to set default SMP affinity. */ |
65 | return irq_select_affinity(irq) ? -EINVAL : count; | 65 | return irq_select_affinity_usr(irq) ? -EINVAL : count; |
66 | 66 | ||
67 | irq_set_affinity(irq, new_value); | 67 | irq_set_affinity(irq, new_value); |
68 | 68 | ||
@@ -132,20 +132,20 @@ static const struct file_operations default_affinity_proc_fops = { | |||
132 | static int irq_spurious_read(char *page, char **start, off_t off, | 132 | static int irq_spurious_read(char *page, char **start, off_t off, |
133 | int count, int *eof, void *data) | 133 | int count, int *eof, void *data) |
134 | { | 134 | { |
135 | struct irq_desc *d = &irq_desc[(long) data]; | 135 | struct irq_desc *desc = irq_to_desc((long) data); |
136 | return sprintf(page, "count %u\n" | 136 | return sprintf(page, "count %u\n" |
137 | "unhandled %u\n" | 137 | "unhandled %u\n" |
138 | "last_unhandled %u ms\n", | 138 | "last_unhandled %u ms\n", |
139 | d->irq_count, | 139 | desc->irq_count, |
140 | d->irqs_unhandled, | 140 | desc->irqs_unhandled, |
141 | jiffies_to_msecs(d->last_unhandled)); | 141 | jiffies_to_msecs(desc->last_unhandled)); |
142 | } | 142 | } |
143 | 143 | ||
144 | #define MAX_NAMELEN 128 | 144 | #define MAX_NAMELEN 128 |
145 | 145 | ||
146 | static int name_unique(unsigned int irq, struct irqaction *new_action) | 146 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
147 | { | 147 | { |
148 | struct irq_desc *desc = irq_desc + irq; | 148 | struct irq_desc *desc = irq_to_desc(irq); |
149 | struct irqaction *action; | 149 | struct irqaction *action; |
150 | unsigned long flags; | 150 | unsigned long flags; |
151 | int ret = 1; | 151 | int ret = 1; |
@@ -165,8 +165,9 @@ static int name_unique(unsigned int irq, struct irqaction *new_action) | |||
165 | void register_handler_proc(unsigned int irq, struct irqaction *action) | 165 | void register_handler_proc(unsigned int irq, struct irqaction *action) |
166 | { | 166 | { |
167 | char name [MAX_NAMELEN]; | 167 | char name [MAX_NAMELEN]; |
168 | struct irq_desc *desc = irq_to_desc(irq); | ||
168 | 169 | ||
169 | if (!irq_desc[irq].dir || action->dir || !action->name || | 170 | if (!desc->dir || action->dir || !action->name || |
170 | !name_unique(irq, action)) | 171 | !name_unique(irq, action)) |
171 | return; | 172 | return; |
172 | 173 | ||
@@ -174,36 +175,34 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) | |||
174 | snprintf(name, MAX_NAMELEN, "%s", action->name); | 175 | snprintf(name, MAX_NAMELEN, "%s", action->name); |
175 | 176 | ||
176 | /* create /proc/irq/1234/handler/ */ | 177 | /* create /proc/irq/1234/handler/ */ |
177 | action->dir = proc_mkdir(name, irq_desc[irq].dir); | 178 | action->dir = proc_mkdir(name, desc->dir); |
178 | } | 179 | } |
179 | 180 | ||
180 | #undef MAX_NAMELEN | 181 | #undef MAX_NAMELEN |
181 | 182 | ||
182 | #define MAX_NAMELEN 10 | 183 | #define MAX_NAMELEN 10 |
183 | 184 | ||
184 | void register_irq_proc(unsigned int irq) | 185 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
185 | { | 186 | { |
186 | char name [MAX_NAMELEN]; | 187 | char name [MAX_NAMELEN]; |
187 | struct proc_dir_entry *entry; | 188 | struct proc_dir_entry *entry; |
188 | 189 | ||
189 | if (!root_irq_dir || | 190 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) |
190 | (irq_desc[irq].chip == &no_irq_chip) || | ||
191 | irq_desc[irq].dir) | ||
192 | return; | 191 | return; |
193 | 192 | ||
194 | memset(name, 0, MAX_NAMELEN); | 193 | memset(name, 0, MAX_NAMELEN); |
195 | sprintf(name, "%d", irq); | 194 | sprintf(name, "%d", irq); |
196 | 195 | ||
197 | /* create /proc/irq/1234 */ | 196 | /* create /proc/irq/1234 */ |
198 | irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); | 197 | desc->dir = proc_mkdir(name, root_irq_dir); |
199 | 198 | ||
200 | #ifdef CONFIG_SMP | 199 | #ifdef CONFIG_SMP |
201 | /* create /proc/irq/<irq>/smp_affinity */ | 200 | /* create /proc/irq/<irq>/smp_affinity */ |
202 | proc_create_data("smp_affinity", 0600, irq_desc[irq].dir, | 201 | proc_create_data("smp_affinity", 0600, desc->dir, |
203 | &irq_affinity_proc_fops, (void *)(long)irq); | 202 | &irq_affinity_proc_fops, (void *)(long)irq); |
204 | #endif | 203 | #endif |
205 | 204 | ||
206 | entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); | 205 | entry = create_proc_entry("spurious", 0444, desc->dir); |
207 | if (entry) { | 206 | if (entry) { |
208 | entry->data = (void *)(long)irq; | 207 | entry->data = (void *)(long)irq; |
209 | entry->read_proc = irq_spurious_read; | 208 | entry->read_proc = irq_spurious_read; |
@@ -214,11 +213,14 @@ void register_irq_proc(unsigned int irq) | |||
214 | 213 | ||
215 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) | 214 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) |
216 | { | 215 | { |
217 | if (action->dir) | 216 | if (action->dir) { |
218 | remove_proc_entry(action->dir->name, irq_desc[irq].dir); | 217 | struct irq_desc *desc = irq_to_desc(irq); |
218 | |||
219 | remove_proc_entry(action->dir->name, desc->dir); | ||
220 | } | ||
219 | } | 221 | } |
220 | 222 | ||
221 | void register_default_affinity_proc(void) | 223 | static void register_default_affinity_proc(void) |
222 | { | 224 | { |
223 | #ifdef CONFIG_SMP | 225 | #ifdef CONFIG_SMP |
224 | proc_create("irq/default_smp_affinity", 0600, NULL, | 226 | proc_create("irq/default_smp_affinity", 0600, NULL, |
@@ -228,7 +230,8 @@ void register_default_affinity_proc(void) | |||
228 | 230 | ||
229 | void init_irq_proc(void) | 231 | void init_irq_proc(void) |
230 | { | 232 | { |
231 | int i; | 233 | unsigned int irq; |
234 | struct irq_desc *desc; | ||
232 | 235 | ||
233 | /* create /proc/irq */ | 236 | /* create /proc/irq */ |
234 | root_irq_dir = proc_mkdir("irq", NULL); | 237 | root_irq_dir = proc_mkdir("irq", NULL); |
@@ -240,7 +243,11 @@ void init_irq_proc(void) | |||
240 | /* | 243 | /* |
241 | * Create entries for all existing IRQs. | 244 | * Create entries for all existing IRQs. |
242 | */ | 245 | */ |
243 | for (i = 0; i < NR_IRQS; i++) | 246 | for_each_irq_desc(irq, desc) { |
244 | register_irq_proc(i); | 247 | if (!desc) |
248 | continue; | ||
249 | |||
250 | register_irq_proc(irq, desc); | ||
251 | } | ||
245 | } | 252 | } |
246 | 253 | ||
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index a8046791ba2d..89c7117acf2b 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -33,10 +33,10 @@ static void resend_irqs(unsigned long arg) | |||
33 | struct irq_desc *desc; | 33 | struct irq_desc *desc; |
34 | int irq; | 34 | int irq; |
35 | 35 | ||
36 | while (!bitmap_empty(irqs_resend, NR_IRQS)) { | 36 | while (!bitmap_empty(irqs_resend, nr_irqs)) { |
37 | irq = find_first_bit(irqs_resend, NR_IRQS); | 37 | irq = find_first_bit(irqs_resend, nr_irqs); |
38 | clear_bit(irq, irqs_resend); | 38 | clear_bit(irq, irqs_resend); |
39 | desc = irq_desc + irq; | 39 | desc = irq_to_desc(irq); |
40 | local_irq_disable(); | 40 | local_irq_disable(); |
41 | desc->handle_irq(irq, desc); | 41 | desc->handle_irq(irq, desc); |
42 | local_irq_enable(); | 42 | local_irq_enable(); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index c66d3f10e853..3738107531fd 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -12,83 +12,127 @@ | |||
12 | #include <linux/kallsyms.h> | 12 | #include <linux/kallsyms.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/timer.h> | ||
15 | 16 | ||
16 | static int irqfixup __read_mostly; | 17 | static int irqfixup __read_mostly; |
17 | 18 | ||
19 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | ||
20 | static void poll_spurious_irqs(unsigned long dummy); | ||
21 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | ||
22 | |||
18 | /* | 23 | /* |
19 | * Recovery handler for misrouted interrupts. | 24 | * Recovery handler for misrouted interrupts. |
20 | */ | 25 | */ |
21 | static int misrouted_irq(int irq) | 26 | static int try_one_irq(int irq, struct irq_desc *desc) |
22 | { | 27 | { |
23 | int i; | 28 | struct irqaction *action; |
24 | int ok = 0; | 29 | int ok = 0, work = 0; |
25 | int work = 0; /* Did we do work for a real IRQ */ | ||
26 | |||
27 | for (i = 1; i < NR_IRQS; i++) { | ||
28 | struct irq_desc *desc = irq_desc + i; | ||
29 | struct irqaction *action; | ||
30 | |||
31 | if (i == irq) /* Already tried */ | ||
32 | continue; | ||
33 | 30 | ||
34 | spin_lock(&desc->lock); | 31 | spin_lock(&desc->lock); |
35 | /* Already running on another processor */ | 32 | /* Already running on another processor */ |
36 | if (desc->status & IRQ_INPROGRESS) { | 33 | if (desc->status & IRQ_INPROGRESS) { |
37 | /* | 34 | /* |
38 | * Already running: If it is shared get the other | 35 | * Already running: If it is shared get the other |
39 | * CPU to go looking for our mystery interrupt too | 36 | * CPU to go looking for our mystery interrupt too |
40 | */ | 37 | */ |
41 | if (desc->action && (desc->action->flags & IRQF_SHARED)) | 38 | if (desc->action && (desc->action->flags & IRQF_SHARED)) |
42 | desc->status |= IRQ_PENDING; | 39 | desc->status |= IRQ_PENDING; |
43 | spin_unlock(&desc->lock); | ||
44 | continue; | ||
45 | } | ||
46 | /* Honour the normal IRQ locking */ | ||
47 | desc->status |= IRQ_INPROGRESS; | ||
48 | action = desc->action; | ||
49 | spin_unlock(&desc->lock); | 40 | spin_unlock(&desc->lock); |
41 | return ok; | ||
42 | } | ||
43 | /* Honour the normal IRQ locking */ | ||
44 | desc->status |= IRQ_INPROGRESS; | ||
45 | action = desc->action; | ||
46 | spin_unlock(&desc->lock); | ||
50 | 47 | ||
51 | while (action) { | 48 | while (action) { |
52 | /* Only shared IRQ handlers are safe to call */ | 49 | /* Only shared IRQ handlers are safe to call */ |
53 | if (action->flags & IRQF_SHARED) { | 50 | if (action->flags & IRQF_SHARED) { |
54 | if (action->handler(i, action->dev_id) == | 51 | if (action->handler(irq, action->dev_id) == |
55 | IRQ_HANDLED) | 52 | IRQ_HANDLED) |
56 | ok = 1; | 53 | ok = 1; |
57 | } | ||
58 | action = action->next; | ||
59 | } | 54 | } |
60 | local_irq_disable(); | 55 | action = action->next; |
61 | /* Now clean up the flags */ | 56 | } |
62 | spin_lock(&desc->lock); | 57 | local_irq_disable(); |
63 | action = desc->action; | 58 | /* Now clean up the flags */ |
59 | spin_lock(&desc->lock); | ||
60 | action = desc->action; | ||
64 | 61 | ||
62 | /* | ||
63 | * While we were looking for a fixup someone queued a real | ||
64 | * IRQ clashing with our walk: | ||
65 | */ | ||
66 | while ((desc->status & IRQ_PENDING) && action) { | ||
65 | /* | 67 | /* |
66 | * While we were looking for a fixup someone queued a real | 68 | * Perform real IRQ processing for the IRQ we deferred |
67 | * IRQ clashing with our walk: | ||
68 | */ | ||
69 | while ((desc->status & IRQ_PENDING) && action) { | ||
70 | /* | ||
71 | * Perform real IRQ processing for the IRQ we deferred | ||
72 | */ | ||
73 | work = 1; | ||
74 | spin_unlock(&desc->lock); | ||
75 | handle_IRQ_event(i, action); | ||
76 | spin_lock(&desc->lock); | ||
77 | desc->status &= ~IRQ_PENDING; | ||
78 | } | ||
79 | desc->status &= ~IRQ_INPROGRESS; | ||
80 | /* | ||
81 | * If we did actual work for the real IRQ line we must let the | ||
82 | * IRQ controller clean up too | ||
83 | */ | 69 | */ |
84 | if (work && desc->chip && desc->chip->end) | 70 | work = 1; |
85 | desc->chip->end(i); | ||
86 | spin_unlock(&desc->lock); | 71 | spin_unlock(&desc->lock); |
72 | handle_IRQ_event(irq, action); | ||
73 | spin_lock(&desc->lock); | ||
74 | desc->status &= ~IRQ_PENDING; | ||
75 | } | ||
76 | desc->status &= ~IRQ_INPROGRESS; | ||
77 | /* | ||
78 | * If we did actual work for the real IRQ line we must let the | ||
79 | * IRQ controller clean up too | ||
80 | */ | ||
81 | if (work && desc->chip && desc->chip->end) | ||
82 | desc->chip->end(irq); | ||
83 | spin_unlock(&desc->lock); | ||
84 | |||
85 | return ok; | ||
86 | } | ||
87 | |||
88 | static int misrouted_irq(int irq) | ||
89 | { | ||
90 | struct irq_desc *desc; | ||
91 | int i, ok = 0; | ||
92 | |||
93 | for_each_irq_desc(i, desc) { | ||
94 | if (!desc) | ||
95 | continue; | ||
96 | |||
97 | if (!i) | ||
98 | continue; | ||
99 | |||
100 | if (i == irq) /* Already tried */ | ||
101 | continue; | ||
102 | |||
103 | if (try_one_irq(i, desc)) | ||
104 | ok = 1; | ||
87 | } | 105 | } |
88 | /* So the caller can adjust the irq error counts */ | 106 | /* So the caller can adjust the irq error counts */ |
89 | return ok; | 107 | return ok; |
90 | } | 108 | } |
91 | 109 | ||
110 | static void poll_spurious_irqs(unsigned long dummy) | ||
111 | { | ||
112 | struct irq_desc *desc; | ||
113 | int i; | ||
114 | |||
115 | for_each_irq_desc(i, desc) { | ||
116 | unsigned int status; | ||
117 | |||
118 | if (!desc) | ||
119 | continue; | ||
120 | if (!i) | ||
121 | continue; | ||
122 | |||
123 | /* Racy but it doesn't matter */ | ||
124 | status = desc->status; | ||
125 | barrier(); | ||
126 | if (!(status & IRQ_SPURIOUS_DISABLED)) | ||
127 | continue; | ||
128 | |||
129 | try_one_irq(i, desc); | ||
130 | } | ||
131 | |||
132 | mod_timer(&poll_spurious_irq_timer, | ||
133 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | ||
134 | } | ||
135 | |||
92 | /* | 136 | /* |
93 | * If 99,900 of the previous 100,000 interrupts have not been handled | 137 | * If 99,900 of the previous 100,000 interrupts have not been handled |
94 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 138 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
@@ -137,7 +181,9 @@ report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) | |||
137 | } | 181 | } |
138 | } | 182 | } |
139 | 183 | ||
140 | static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) | 184 | static inline int |
185 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | ||
186 | irqreturn_t action_ret) | ||
141 | { | 187 | { |
142 | struct irqaction *action; | 188 | struct irqaction *action; |
143 | 189 | ||
@@ -212,6 +258,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
212 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 258 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
213 | desc->depth++; | 259 | desc->depth++; |
214 | desc->chip->disable(irq); | 260 | desc->chip->disable(irq); |
261 | |||
262 | mod_timer(&poll_spurious_irq_timer, | ||
263 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | ||
215 | } | 264 | } |
216 | desc->irqs_unhandled = 0; | 265 | desc->irqs_unhandled = 0; |
217 | } | 266 | } |
@@ -241,7 +290,7 @@ static int __init irqfixup_setup(char *str) | |||
241 | 290 | ||
242 | __setup("irqfixup", irqfixup_setup); | 291 | __setup("irqfixup", irqfixup_setup); |
243 | module_param(irqfixup, int, 0644); | 292 | module_param(irqfixup, int, 0644); |
244 | MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode 2: irqpoll mode"); | 293 | MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode"); |
245 | 294 | ||
246 | static int __init irqpoll_setup(char *str) | 295 | static int __init irqpoll_setup(char *str) |
247 | { | 296 | { |