diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 2 | ||||
-rw-r--r-- | kernel/irq/chip.c | 92 | ||||
-rw-r--r-- | kernel/irq/devres.c | 16 | ||||
-rw-r--r-- | kernel/irq/handle.c | 128 | ||||
-rw-r--r-- | kernel/irq/internals.h | 17 | ||||
-rw-r--r-- | kernel/irq/manage.c | 349 | ||||
-rw-r--r-- | kernel/irq/migration.c | 14 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 43 | ||||
-rw-r--r-- | kernel/irq/pm.c | 8 | ||||
-rw-r--r-- | kernel/irq/proc.c | 40 | ||||
-rw-r--r-- | kernel/irq/resend.c | 3 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 17 |
12 files changed, 577 insertions, 152 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 3394f8f52964..7d047808419d 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -3,5 +3,5 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o | |||
3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o | 6 | obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o |
7 | obj-$(CONFIG_PM_SLEEP) += pm.o | 7 | obj-$(CONFIG_PM_SLEEP) += pm.o |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c687ba4363f2..ba566c261adc 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -166,11 +166,11 @@ int set_irq_data(unsigned int irq, void *data) | |||
166 | EXPORT_SYMBOL(set_irq_data); | 166 | EXPORT_SYMBOL(set_irq_data); |
167 | 167 | ||
168 | /** | 168 | /** |
169 | * set_irq_data - set irq type data for an irq | 169 | * set_irq_msi - set MSI descriptor data for an irq |
170 | * @irq: Interrupt number | 170 | * @irq: Interrupt number |
171 | * @entry: Pointer to MSI descriptor data | 171 | * @entry: Pointer to MSI descriptor data |
172 | * | 172 | * |
173 | * Set the hardware irq controller data for an irq | 173 | * Set the MSI descriptor entry for an irq |
174 | */ | 174 | */ |
175 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) | 175 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) |
176 | { | 176 | { |
@@ -222,6 +222,34 @@ int set_irq_chip_data(unsigned int irq, void *data) | |||
222 | } | 222 | } |
223 | EXPORT_SYMBOL(set_irq_chip_data); | 223 | EXPORT_SYMBOL(set_irq_chip_data); |
224 | 224 | ||
225 | /** | ||
226 | * set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq | ||
227 | * | ||
228 | * @irq: Interrupt number | ||
229 | * @nest: 0 to clear / 1 to set the IRQ_NESTED_THREAD flag | ||
230 | * | ||
231 | * The IRQ_NESTED_THREAD flag indicates that on | ||
232 | * request_threaded_irq() no separate interrupt thread should be | ||
233 | * created for the irq as the handler are called nested in the | ||
234 | * context of a demultiplexing interrupt handler thread. | ||
235 | */ | ||
236 | void set_irq_nested_thread(unsigned int irq, int nest) | ||
237 | { | ||
238 | struct irq_desc *desc = irq_to_desc(irq); | ||
239 | unsigned long flags; | ||
240 | |||
241 | if (!desc) | ||
242 | return; | ||
243 | |||
244 | spin_lock_irqsave(&desc->lock, flags); | ||
245 | if (nest) | ||
246 | desc->status |= IRQ_NESTED_THREAD; | ||
247 | else | ||
248 | desc->status &= ~IRQ_NESTED_THREAD; | ||
249 | spin_unlock_irqrestore(&desc->lock, flags); | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); | ||
252 | |||
225 | /* | 253 | /* |
226 | * default enable function | 254 | * default enable function |
227 | */ | 255 | */ |
@@ -299,6 +327,45 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
299 | } | 327 | } |
300 | } | 328 | } |
301 | 329 | ||
330 | /* | ||
331 | * handle_nested_irq - Handle a nested irq from a irq thread | ||
332 | * @irq: the interrupt number | ||
333 | * | ||
334 | * Handle interrupts which are nested into a threaded interrupt | ||
335 | * handler. The handler function is called inside the calling | ||
336 | * threads context. | ||
337 | */ | ||
338 | void handle_nested_irq(unsigned int irq) | ||
339 | { | ||
340 | struct irq_desc *desc = irq_to_desc(irq); | ||
341 | struct irqaction *action; | ||
342 | irqreturn_t action_ret; | ||
343 | |||
344 | might_sleep(); | ||
345 | |||
346 | spin_lock_irq(&desc->lock); | ||
347 | |||
348 | kstat_incr_irqs_this_cpu(irq, desc); | ||
349 | |||
350 | action = desc->action; | ||
351 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) | ||
352 | goto out_unlock; | ||
353 | |||
354 | desc->status |= IRQ_INPROGRESS; | ||
355 | spin_unlock_irq(&desc->lock); | ||
356 | |||
357 | action_ret = action->thread_fn(action->irq, action->dev_id); | ||
358 | if (!noirqdebug) | ||
359 | note_interrupt(irq, desc, action_ret); | ||
360 | |||
361 | spin_lock_irq(&desc->lock); | ||
362 | desc->status &= ~IRQ_INPROGRESS; | ||
363 | |||
364 | out_unlock: | ||
365 | spin_unlock_irq(&desc->lock); | ||
366 | } | ||
367 | EXPORT_SYMBOL_GPL(handle_nested_irq); | ||
368 | |||
302 | /** | 369 | /** |
303 | * handle_simple_irq - Simple and software-decoded IRQs. | 370 | * handle_simple_irq - Simple and software-decoded IRQs. |
304 | * @irq: the interrupt number | 371 | * @irq: the interrupt number |
@@ -359,7 +426,6 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
359 | 426 | ||
360 | spin_lock(&desc->lock); | 427 | spin_lock(&desc->lock); |
361 | mask_ack_irq(desc, irq); | 428 | mask_ack_irq(desc, irq); |
362 | desc = irq_remap_to_desc(irq, desc); | ||
363 | 429 | ||
364 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 430 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
365 | goto out_unlock; | 431 | goto out_unlock; |
@@ -383,7 +449,10 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
383 | 449 | ||
384 | spin_lock(&desc->lock); | 450 | spin_lock(&desc->lock); |
385 | desc->status &= ~IRQ_INPROGRESS; | 451 | desc->status &= ~IRQ_INPROGRESS; |
386 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | 452 | |
453 | if (unlikely(desc->status & IRQ_ONESHOT)) | ||
454 | desc->status |= IRQ_MASKED; | ||
455 | else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | ||
387 | desc->chip->unmask(irq); | 456 | desc->chip->unmask(irq); |
388 | out_unlock: | 457 | out_unlock: |
389 | spin_unlock(&desc->lock); | 458 | spin_unlock(&desc->lock); |
@@ -438,7 +507,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
438 | desc->status &= ~IRQ_INPROGRESS; | 507 | desc->status &= ~IRQ_INPROGRESS; |
439 | out: | 508 | out: |
440 | desc->chip->eoi(irq); | 509 | desc->chip->eoi(irq); |
441 | desc = irq_remap_to_desc(irq, desc); | ||
442 | 510 | ||
443 | spin_unlock(&desc->lock); | 511 | spin_unlock(&desc->lock); |
444 | } | 512 | } |
@@ -475,7 +543,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
475 | !desc->action)) { | 543 | !desc->action)) { |
476 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 544 | desc->status |= (IRQ_PENDING | IRQ_MASKED); |
477 | mask_ack_irq(desc, irq); | 545 | mask_ack_irq(desc, irq); |
478 | desc = irq_remap_to_desc(irq, desc); | ||
479 | goto out_unlock; | 546 | goto out_unlock; |
480 | } | 547 | } |
481 | kstat_incr_irqs_this_cpu(irq, desc); | 548 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -483,7 +550,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
483 | /* Start handling the irq */ | 550 | /* Start handling the irq */ |
484 | if (desc->chip->ack) | 551 | if (desc->chip->ack) |
485 | desc->chip->ack(irq); | 552 | desc->chip->ack(irq); |
486 | desc = irq_remap_to_desc(irq, desc); | ||
487 | 553 | ||
488 | /* Mark the IRQ currently in progress.*/ | 554 | /* Mark the IRQ currently in progress.*/ |
489 | desc->status |= IRQ_INPROGRESS; | 555 | desc->status |= IRQ_INPROGRESS; |
@@ -524,7 +590,7 @@ out_unlock: | |||
524 | } | 590 | } |
525 | 591 | ||
526 | /** | 592 | /** |
527 | * handle_percpu_IRQ - Per CPU local irq handler | 593 | * handle_percpu_irq - Per CPU local irq handler |
528 | * @irq: the interrupt number | 594 | * @irq: the interrupt number |
529 | * @desc: the interrupt description structure for this irq | 595 | * @desc: the interrupt description structure for this irq |
530 | * | 596 | * |
@@ -544,10 +610,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
544 | if (!noirqdebug) | 610 | if (!noirqdebug) |
545 | note_interrupt(irq, desc, action_ret); | 611 | note_interrupt(irq, desc, action_ret); |
546 | 612 | ||
547 | if (desc->chip->eoi) { | 613 | if (desc->chip->eoi) |
548 | desc->chip->eoi(irq); | 614 | desc->chip->eoi(irq); |
549 | desc = irq_remap_to_desc(irq, desc); | ||
550 | } | ||
551 | } | 615 | } |
552 | 616 | ||
553 | void | 617 | void |
@@ -578,14 +642,13 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
578 | desc->chip = &dummy_irq_chip; | 642 | desc->chip = &dummy_irq_chip; |
579 | } | 643 | } |
580 | 644 | ||
645 | chip_bus_lock(irq, desc); | ||
581 | spin_lock_irqsave(&desc->lock, flags); | 646 | spin_lock_irqsave(&desc->lock, flags); |
582 | 647 | ||
583 | /* Uninstall? */ | 648 | /* Uninstall? */ |
584 | if (handle == handle_bad_irq) { | 649 | if (handle == handle_bad_irq) { |
585 | if (desc->chip != &no_irq_chip) { | 650 | if (desc->chip != &no_irq_chip) |
586 | mask_ack_irq(desc, irq); | 651 | mask_ack_irq(desc, irq); |
587 | desc = irq_remap_to_desc(irq, desc); | ||
588 | } | ||
589 | desc->status |= IRQ_DISABLED; | 652 | desc->status |= IRQ_DISABLED; |
590 | desc->depth = 1; | 653 | desc->depth = 1; |
591 | } | 654 | } |
@@ -599,6 +662,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
599 | desc->chip->startup(irq); | 662 | desc->chip->startup(irq); |
600 | } | 663 | } |
601 | spin_unlock_irqrestore(&desc->lock, flags); | 664 | spin_unlock_irqrestore(&desc->lock, flags); |
665 | chip_bus_sync_unlock(irq, desc); | ||
602 | } | 666 | } |
603 | EXPORT_SYMBOL_GPL(__set_irq_handler); | 667 | EXPORT_SYMBOL_GPL(__set_irq_handler); |
604 | 668 | ||
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index 38a25b8d8bff..d06df9c41cba 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
@@ -26,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data) | |||
26 | } | 26 | } |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * devm_request_irq - allocate an interrupt line for a managed device | 29 | * devm_request_threaded_irq - allocate an interrupt line for a managed device |
30 | * @dev: device to request interrupt for | 30 | * @dev: device to request interrupt for |
31 | * @irq: Interrupt line to allocate | 31 | * @irq: Interrupt line to allocate |
32 | * @handler: Function to be called when the IRQ occurs | 32 | * @handler: Function to be called when the IRQ occurs |
33 | * @thread_fn: function to be called in a threaded interrupt context. NULL | ||
34 | * for devices which handle everything in @handler | ||
33 | * @irqflags: Interrupt type flags | 35 | * @irqflags: Interrupt type flags |
34 | * @devname: An ascii name for the claiming device | 36 | * @devname: An ascii name for the claiming device |
35 | * @dev_id: A cookie passed back to the handler function | 37 | * @dev_id: A cookie passed back to the handler function |
@@ -42,9 +44,10 @@ static int devm_irq_match(struct device *dev, void *res, void *data) | |||
42 | * If an IRQ allocated with this function needs to be freed | 44 | * If an IRQ allocated with this function needs to be freed |
43 | * separately, dev_free_irq() must be used. | 45 | * separately, dev_free_irq() must be used. |
44 | */ | 46 | */ |
45 | int devm_request_irq(struct device *dev, unsigned int irq, | 47 | int devm_request_threaded_irq(struct device *dev, unsigned int irq, |
46 | irq_handler_t handler, unsigned long irqflags, | 48 | irq_handler_t handler, irq_handler_t thread_fn, |
47 | const char *devname, void *dev_id) | 49 | unsigned long irqflags, const char *devname, |
50 | void *dev_id) | ||
48 | { | 51 | { |
49 | struct irq_devres *dr; | 52 | struct irq_devres *dr; |
50 | int rc; | 53 | int rc; |
@@ -54,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq, | |||
54 | if (!dr) | 57 | if (!dr) |
55 | return -ENOMEM; | 58 | return -ENOMEM; |
56 | 59 | ||
57 | rc = request_irq(irq, handler, irqflags, devname, dev_id); | 60 | rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname, |
61 | dev_id); | ||
58 | if (rc) { | 62 | if (rc) { |
59 | devres_free(dr); | 63 | devres_free(dr); |
60 | return rc; | 64 | return rc; |
@@ -66,7 +70,7 @@ int devm_request_irq(struct device *dev, unsigned int irq, | |||
66 | 70 | ||
67 | return 0; | 71 | return 0; |
68 | } | 72 | } |
69 | EXPORT_SYMBOL(devm_request_irq); | 73 | EXPORT_SYMBOL(devm_request_threaded_irq); |
70 | 74 | ||
71 | /** | 75 | /** |
72 | * devm_free_irq - free an interrupt | 76 | * devm_free_irq - free an interrupt |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 9ebf77968871..17c71bb565c6 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -11,6 +11,8 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/sched.h> | ||
15 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/random.h> | 17 | #include <linux/random.h> |
16 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
@@ -18,6 +20,7 @@ | |||
18 | #include <linux/rculist.h> | 20 | #include <linux/rculist.h> |
19 | #include <linux/hash.h> | 21 | #include <linux/hash.h> |
20 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
23 | #include <trace/events/irq.h> | ||
21 | 24 | ||
22 | #include "internals.h" | 25 | #include "internals.h" |
23 | 26 | ||
@@ -43,7 +46,7 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
43 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 46 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
44 | static void __init init_irq_default_affinity(void) | 47 | static void __init init_irq_default_affinity(void) |
45 | { | 48 | { |
46 | alloc_bootmem_cpumask_var(&irq_default_affinity); | 49 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
47 | cpumask_setall(irq_default_affinity); | 50 | cpumask_setall(irq_default_affinity); |
48 | } | 51 | } |
49 | #else | 52 | #else |
@@ -80,45 +83,48 @@ static struct irq_desc irq_desc_init = { | |||
80 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 83 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
81 | }; | 84 | }; |
82 | 85 | ||
83 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 86 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) |
84 | { | 87 | { |
85 | int node; | ||
86 | void *ptr; | 88 | void *ptr; |
87 | 89 | ||
88 | node = cpu_to_node(cpu); | 90 | if (slab_is_available()) |
89 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); | 91 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), |
92 | GFP_ATOMIC, node); | ||
93 | else | ||
94 | ptr = alloc_bootmem_node(NODE_DATA(node), | ||
95 | nr * sizeof(*desc->kstat_irqs)); | ||
90 | 96 | ||
91 | /* | 97 | /* |
92 | * don't overwite if can not get new one | 98 | * don't overwite if can not get new one |
93 | * init_copy_kstat_irqs() could still use old one | 99 | * init_copy_kstat_irqs() could still use old one |
94 | */ | 100 | */ |
95 | if (ptr) { | 101 | if (ptr) { |
96 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", | 102 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); |
97 | cpu, node); | ||
98 | desc->kstat_irqs = ptr; | 103 | desc->kstat_irqs = ptr; |
99 | } | 104 | } |
100 | } | 105 | } |
101 | 106 | ||
102 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | 107 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) |
103 | { | 108 | { |
104 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | 109 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); |
105 | 110 | ||
106 | spin_lock_init(&desc->lock); | 111 | spin_lock_init(&desc->lock); |
107 | desc->irq = irq; | 112 | desc->irq = irq; |
108 | #ifdef CONFIG_SMP | 113 | #ifdef CONFIG_SMP |
109 | desc->cpu = cpu; | 114 | desc->node = node; |
110 | #endif | 115 | #endif |
111 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 116 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
112 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | 117 | init_kstat_irqs(desc, node, nr_cpu_ids); |
113 | if (!desc->kstat_irqs) { | 118 | if (!desc->kstat_irqs) { |
114 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 119 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
115 | BUG_ON(1); | 120 | BUG_ON(1); |
116 | } | 121 | } |
117 | if (!init_alloc_desc_masks(desc, cpu, false)) { | 122 | if (!alloc_desc_masks(desc, node, false)) { |
118 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | 123 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); |
119 | BUG_ON(1); | 124 | BUG_ON(1); |
120 | } | 125 | } |
121 | arch_init_chip_data(desc, cpu); | 126 | init_desc_masks(desc); |
127 | arch_init_chip_data(desc, node); | ||
122 | } | 128 | } |
123 | 129 | ||
124 | /* | 130 | /* |
@@ -145,6 +151,7 @@ int __init early_irq_init(void) | |||
145 | { | 151 | { |
146 | struct irq_desc *desc; | 152 | struct irq_desc *desc; |
147 | int legacy_count; | 153 | int legacy_count; |
154 | int node; | ||
148 | int i; | 155 | int i; |
149 | 156 | ||
150 | init_irq_default_affinity(); | 157 | init_irq_default_affinity(); |
@@ -155,20 +162,24 @@ int __init early_irq_init(void) | |||
155 | 162 | ||
156 | desc = irq_desc_legacy; | 163 | desc = irq_desc_legacy; |
157 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 164 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
165 | node = first_online_node; | ||
158 | 166 | ||
159 | /* allocate irq_desc_ptrs array based on nr_irqs */ | 167 | /* allocate irq_desc_ptrs array based on nr_irqs */ |
160 | irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); | 168 | irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT); |
161 | 169 | ||
162 | /* allocate based on nr_cpu_ids */ | 170 | /* allocate based on nr_cpu_ids */ |
163 | /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ | 171 | kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * |
164 | kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * | 172 | sizeof(int), GFP_NOWAIT, node); |
165 | sizeof(int)); | ||
166 | 173 | ||
167 | for (i = 0; i < legacy_count; i++) { | 174 | for (i = 0; i < legacy_count; i++) { |
168 | desc[i].irq = i; | 175 | desc[i].irq = i; |
176 | #ifdef CONFIG_SMP | ||
177 | desc[i].node = node; | ||
178 | #endif | ||
169 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; | 179 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
170 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 180 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
171 | init_alloc_desc_masks(&desc[i], 0, true); | 181 | alloc_desc_masks(&desc[i], node, true); |
182 | init_desc_masks(&desc[i]); | ||
172 | irq_desc_ptrs[i] = desc + i; | 183 | irq_desc_ptrs[i] = desc + i; |
173 | } | 184 | } |
174 | 185 | ||
@@ -186,11 +197,10 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
186 | return NULL; | 197 | return NULL; |
187 | } | 198 | } |
188 | 199 | ||
189 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 200 | struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) |
190 | { | 201 | { |
191 | struct irq_desc *desc; | 202 | struct irq_desc *desc; |
192 | unsigned long flags; | 203 | unsigned long flags; |
193 | int node; | ||
194 | 204 | ||
195 | if (irq >= nr_irqs) { | 205 | if (irq >= nr_irqs) { |
196 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", | 206 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
@@ -209,15 +219,17 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
209 | if (desc) | 219 | if (desc) |
210 | goto out_unlock; | 220 | goto out_unlock; |
211 | 221 | ||
212 | node = cpu_to_node(cpu); | 222 | if (slab_is_available()) |
213 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 223 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
214 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | 224 | else |
215 | irq, cpu, node); | 225 | desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc)); |
226 | |||
227 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); | ||
216 | if (!desc) { | 228 | if (!desc) { |
217 | printk(KERN_ERR "can not alloc irq_desc\n"); | 229 | printk(KERN_ERR "can not alloc irq_desc\n"); |
218 | BUG_ON(1); | 230 | BUG_ON(1); |
219 | } | 231 | } |
220 | init_one_irq_desc(irq, desc, cpu); | 232 | init_one_irq_desc(irq, desc, node); |
221 | 233 | ||
222 | irq_desc_ptrs[irq] = desc; | 234 | irq_desc_ptrs[irq] = desc; |
223 | 235 | ||
@@ -255,7 +267,8 @@ int __init early_irq_init(void) | |||
255 | 267 | ||
256 | for (i = 0; i < count; i++) { | 268 | for (i = 0; i < count; i++) { |
257 | desc[i].irq = i; | 269 | desc[i].irq = i; |
258 | init_alloc_desc_masks(&desc[i], 0, true); | 270 | alloc_desc_masks(&desc[i], 0, true); |
271 | init_desc_masks(&desc[i]); | ||
259 | desc[i].kstat_irqs = kstat_irqs_all[i]; | 272 | desc[i].kstat_irqs = kstat_irqs_all[i]; |
260 | } | 273 | } |
261 | return arch_early_irq_init(); | 274 | return arch_early_irq_init(); |
@@ -266,7 +279,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
266 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 279 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
267 | } | 280 | } |
268 | 281 | ||
269 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 282 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) |
270 | { | 283 | { |
271 | return irq_to_desc(irq); | 284 | return irq_to_desc(irq); |
272 | } | 285 | } |
@@ -338,6 +351,15 @@ irqreturn_t no_action(int cpl, void *dev_id) | |||
338 | return IRQ_NONE; | 351 | return IRQ_NONE; |
339 | } | 352 | } |
340 | 353 | ||
354 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | ||
355 | { | ||
356 | if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) | ||
357 | return; | ||
358 | |||
359 | printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " | ||
360 | "but no thread function available.", irq, action->name); | ||
361 | } | ||
362 | |||
341 | /** | 363 | /** |
342 | * handle_IRQ_event - irq action chain handler | 364 | * handle_IRQ_event - irq action chain handler |
343 | * @irq: the interrupt number | 365 | * @irq: the interrupt number |
@@ -350,15 +372,54 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
350 | irqreturn_t ret, retval = IRQ_NONE; | 372 | irqreturn_t ret, retval = IRQ_NONE; |
351 | unsigned int status = 0; | 373 | unsigned int status = 0; |
352 | 374 | ||
353 | WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); | ||
354 | |||
355 | if (!(action->flags & IRQF_DISABLED)) | 375 | if (!(action->flags & IRQF_DISABLED)) |
356 | local_irq_enable_in_hardirq(); | 376 | local_irq_enable_in_hardirq(); |
357 | 377 | ||
358 | do { | 378 | do { |
379 | trace_irq_handler_entry(irq, action); | ||
359 | ret = action->handler(irq, action->dev_id); | 380 | ret = action->handler(irq, action->dev_id); |
360 | if (ret == IRQ_HANDLED) | 381 | trace_irq_handler_exit(irq, action, ret); |
382 | |||
383 | switch (ret) { | ||
384 | case IRQ_WAKE_THREAD: | ||
385 | /* | ||
386 | * Set result to handled so the spurious check | ||
387 | * does not trigger. | ||
388 | */ | ||
389 | ret = IRQ_HANDLED; | ||
390 | |||
391 | /* | ||
392 | * Catch drivers which return WAKE_THREAD but | ||
393 | * did not set up a thread function | ||
394 | */ | ||
395 | if (unlikely(!action->thread_fn)) { | ||
396 | warn_no_thread(irq, action); | ||
397 | break; | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Wake up the handler thread for this | ||
402 | * action. In case the thread crashed and was | ||
403 | * killed we just pretend that we handled the | ||
404 | * interrupt. The hardirq handler above has | ||
405 | * disabled the device interrupt, so no irq | ||
406 | * storm is lurking. | ||
407 | */ | ||
408 | if (likely(!test_bit(IRQTF_DIED, | ||
409 | &action->thread_flags))) { | ||
410 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
411 | wake_up_process(action->thread); | ||
412 | } | ||
413 | |||
414 | /* Fall through to add to randomness */ | ||
415 | case IRQ_HANDLED: | ||
361 | status |= action->flags; | 416 | status |= action->flags; |
417 | break; | ||
418 | |||
419 | default: | ||
420 | break; | ||
421 | } | ||
422 | |||
362 | retval |= ret; | 423 | retval |= ret; |
363 | action = action->next; | 424 | action = action->next; |
364 | } while (action); | 425 | } while (action); |
@@ -401,11 +462,8 @@ unsigned int __do_IRQ(unsigned int irq) | |||
401 | /* | 462 | /* |
402 | * No locking required for CPU-local interrupts: | 463 | * No locking required for CPU-local interrupts: |
403 | */ | 464 | */ |
404 | if (desc->chip->ack) { | 465 | if (desc->chip->ack) |
405 | desc->chip->ack(irq); | 466 | desc->chip->ack(irq); |
406 | /* get new one */ | ||
407 | desc = irq_remap_to_desc(irq, desc); | ||
408 | } | ||
409 | if (likely(!(desc->status & IRQ_DISABLED))) { | 467 | if (likely(!(desc->status & IRQ_DISABLED))) { |
410 | action_ret = handle_IRQ_event(irq, desc->action); | 468 | action_ret = handle_IRQ_event(irq, desc->action); |
411 | if (!noirqdebug) | 469 | if (!noirqdebug) |
@@ -416,10 +474,8 @@ unsigned int __do_IRQ(unsigned int irq) | |||
416 | } | 474 | } |
417 | 475 | ||
418 | spin_lock(&desc->lock); | 476 | spin_lock(&desc->lock); |
419 | if (desc->chip->ack) { | 477 | if (desc->chip->ack) |
420 | desc->chip->ack(irq); | 478 | desc->chip->ack(irq); |
421 | desc = irq_remap_to_desc(irq, desc); | ||
422 | } | ||
423 | /* | 479 | /* |
424 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 480 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
425 | * WAITING is used by probe to mark irqs that are being tested | 481 | * WAITING is used by probe to mark irqs that are being tested |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 01ce20eab38f..1b5d742c6a77 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -16,7 +16,7 @@ extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | |||
16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
17 | 17 | ||
18 | extern struct lock_class_key irq_desc_lock_class; | 18 | extern struct lock_class_key irq_desc_lock_class; |
19 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 19 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
20 | extern void clear_kstat_irqs(struct irq_desc *desc); | 20 | extern void clear_kstat_irqs(struct irq_desc *desc); |
21 | extern spinlock_t sparse_irq_lock; | 21 | extern spinlock_t sparse_irq_lock; |
22 | 22 | ||
@@ -42,6 +42,21 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
42 | 42 | ||
43 | extern int irq_select_affinity_usr(unsigned int irq); | 43 | extern int irq_select_affinity_usr(unsigned int irq); |
44 | 44 | ||
45 | extern void irq_set_thread_affinity(struct irq_desc *desc); | ||
46 | |||
47 | /* Inline functions for support of irq chips on slow busses */ | ||
48 | static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc) | ||
49 | { | ||
50 | if (unlikely(desc->chip->bus_lock)) | ||
51 | desc->chip->bus_lock(irq); | ||
52 | } | ||
53 | |||
54 | static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc) | ||
55 | { | ||
56 | if (unlikely(desc->chip->bus_sync_unlock)) | ||
57 | desc->chip->bus_sync_unlock(irq); | ||
58 | } | ||
59 | |||
45 | /* | 60 | /* |
46 | * Debugging printout: | 61 | * Debugging printout: |
47 | */ | 62 | */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 8c68d5b95d48..7305b297d1eb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -8,16 +8,15 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/kthread.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/sched.h> | ||
15 | 17 | ||
16 | #include "internals.h" | 18 | #include "internals.h" |
17 | 19 | ||
18 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
19 | cpumask_var_t irq_default_affinity; | ||
20 | |||
21 | /** | 20 | /** |
22 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
23 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
@@ -53,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
53 | 52 | ||
54 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
55 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
55 | |||
56 | /* | ||
57 | * We made sure that no hardirq handler is running. Now verify | ||
58 | * that no threaded handlers are active. | ||
59 | */ | ||
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
56 | } | 61 | } |
57 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); |
58 | 63 | ||
64 | #ifdef CONFIG_SMP | ||
65 | cpumask_var_t irq_default_affinity; | ||
66 | |||
59 | /** | 67 | /** |
60 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
61 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check |
@@ -73,6 +81,26 @@ int irq_can_set_affinity(unsigned int irq) | |||
73 | } | 81 | } |
74 | 82 | ||
75 | /** | 83 | /** |
84 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | ||
85 | * @desc: irq descriptor which has affitnity changed | ||
86 | * | ||
87 | * We just set IRQTF_AFFINITY and delegate the affinity setting | ||
88 | * to the interrupt thread itself. We can not call | ||
89 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | ||
90 | * code can be called from hard interrupt context. | ||
91 | */ | ||
92 | void irq_set_thread_affinity(struct irq_desc *desc) | ||
93 | { | ||
94 | struct irqaction *action = desc->action; | ||
95 | |||
96 | while (action) { | ||
97 | if (action->thread) | ||
98 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | ||
99 | action = action->next; | ||
100 | } | ||
101 | } | ||
102 | |||
103 | /** | ||
76 | * irq_set_affinity - Set the irq affinity of a given irq | 104 | * irq_set_affinity - Set the irq affinity of a given irq |
77 | * @irq: Interrupt to set affinity | 105 | * @irq: Interrupt to set affinity |
78 | * @cpumask: cpumask | 106 | * @cpumask: cpumask |
@@ -89,16 +117,21 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
89 | spin_lock_irqsave(&desc->lock, flags); | 117 | spin_lock_irqsave(&desc->lock, flags); |
90 | 118 | ||
91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
92 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 120 | if (desc->status & IRQ_MOVE_PCNTXT) { |
93 | cpumask_copy(desc->affinity, cpumask); | 121 | if (!desc->chip->set_affinity(irq, cpumask)) { |
94 | desc->chip->set_affinity(irq, cpumask); | 122 | cpumask_copy(desc->affinity, cpumask); |
95 | } else { | 123 | irq_set_thread_affinity(desc); |
124 | } | ||
125 | } | ||
126 | else { | ||
96 | desc->status |= IRQ_MOVE_PENDING; | 127 | desc->status |= IRQ_MOVE_PENDING; |
97 | cpumask_copy(desc->pending_mask, cpumask); | 128 | cpumask_copy(desc->pending_mask, cpumask); |
98 | } | 129 | } |
99 | #else | 130 | #else |
100 | cpumask_copy(desc->affinity, cpumask); | 131 | if (!desc->chip->set_affinity(irq, cpumask)) { |
101 | desc->chip->set_affinity(irq, cpumask); | 132 | cpumask_copy(desc->affinity, cpumask); |
133 | irq_set_thread_affinity(desc); | ||
134 | } | ||
102 | #endif | 135 | #endif |
103 | desc->status |= IRQ_AFFINITY_SET; | 136 | desc->status |= IRQ_AFFINITY_SET; |
104 | spin_unlock_irqrestore(&desc->lock, flags); | 137 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -150,6 +183,8 @@ int irq_select_affinity_usr(unsigned int irq) | |||
150 | 183 | ||
151 | spin_lock_irqsave(&desc->lock, flags); | 184 | spin_lock_irqsave(&desc->lock, flags); |
152 | ret = setup_affinity(irq, desc); | 185 | ret = setup_affinity(irq, desc); |
186 | if (!ret) | ||
187 | irq_set_thread_affinity(desc); | ||
153 | spin_unlock_irqrestore(&desc->lock, flags); | 188 | spin_unlock_irqrestore(&desc->lock, flags); |
154 | 189 | ||
155 | return ret; | 190 | return ret; |
@@ -195,9 +230,11 @@ void disable_irq_nosync(unsigned int irq) | |||
195 | if (!desc) | 230 | if (!desc) |
196 | return; | 231 | return; |
197 | 232 | ||
233 | chip_bus_lock(irq, desc); | ||
198 | spin_lock_irqsave(&desc->lock, flags); | 234 | spin_lock_irqsave(&desc->lock, flags); |
199 | __disable_irq(desc, irq, false); | 235 | __disable_irq(desc, irq, false); |
200 | spin_unlock_irqrestore(&desc->lock, flags); | 236 | spin_unlock_irqrestore(&desc->lock, flags); |
237 | chip_bus_sync_unlock(irq, desc); | ||
201 | } | 238 | } |
202 | EXPORT_SYMBOL(disable_irq_nosync); | 239 | EXPORT_SYMBOL(disable_irq_nosync); |
203 | 240 | ||
@@ -259,7 +296,8 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
259 | * matches the last disable, processing of interrupts on this | 296 | * matches the last disable, processing of interrupts on this |
260 | * IRQ line is re-enabled. | 297 | * IRQ line is re-enabled. |
261 | * | 298 | * |
262 | * This function may be called from IRQ context. | 299 | * This function may be called from IRQ context only when |
300 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | ||
263 | */ | 301 | */ |
264 | void enable_irq(unsigned int irq) | 302 | void enable_irq(unsigned int irq) |
265 | { | 303 | { |
@@ -269,9 +307,11 @@ void enable_irq(unsigned int irq) | |||
269 | if (!desc) | 307 | if (!desc) |
270 | return; | 308 | return; |
271 | 309 | ||
310 | chip_bus_lock(irq, desc); | ||
272 | spin_lock_irqsave(&desc->lock, flags); | 311 | spin_lock_irqsave(&desc->lock, flags); |
273 | __enable_irq(desc, irq, false); | 312 | __enable_irq(desc, irq, false); |
274 | spin_unlock_irqrestore(&desc->lock, flags); | 313 | spin_unlock_irqrestore(&desc->lock, flags); |
314 | chip_bus_sync_unlock(irq, desc); | ||
275 | } | 315 | } |
276 | EXPORT_SYMBOL(enable_irq); | 316 | EXPORT_SYMBOL(enable_irq); |
277 | 317 | ||
@@ -402,6 +442,165 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
402 | } | 442 | } |
403 | 443 | ||
404 | /* | 444 | /* |
445 | * Default primary interrupt handler for threaded interrupts. Is | ||
446 | * assigned as primary handler when request_threaded_irq is called | ||
447 | * with handler == NULL. Useful for oneshot interrupts. | ||
448 | */ | ||
449 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | ||
450 | { | ||
451 | return IRQ_WAKE_THREAD; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Primary handler for nested threaded interrupts. Should never be | ||
456 | * called. | ||
457 | */ | ||
458 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | ||
459 | { | ||
460 | WARN(1, "Primary handler called for nested irq %d\n", irq); | ||
461 | return IRQ_NONE; | ||
462 | } | ||
463 | |||
464 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
465 | { | ||
466 | while (!kthread_should_stop()) { | ||
467 | set_current_state(TASK_INTERRUPTIBLE); | ||
468 | |||
469 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | ||
470 | &action->thread_flags)) { | ||
471 | __set_current_state(TASK_RUNNING); | ||
472 | return 0; | ||
473 | } | ||
474 | schedule(); | ||
475 | } | ||
476 | return -1; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Oneshot interrupts keep the irq line masked until the threaded | ||
481 | * handler finished. unmask if the interrupt has not been disabled and | ||
482 | * is marked MASKED. | ||
483 | */ | ||
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | ||
485 | { | ||
486 | chip_bus_lock(irq, desc); | ||
487 | spin_lock_irq(&desc->lock); | ||
488 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | ||
489 | desc->status &= ~IRQ_MASKED; | ||
490 | desc->chip->unmask(irq); | ||
491 | } | ||
492 | spin_unlock_irq(&desc->lock); | ||
493 | chip_bus_sync_unlock(irq, desc); | ||
494 | } | ||
495 | |||
496 | #ifdef CONFIG_SMP | ||
497 | /* | ||
498 | * Check whether we need to change the affinity of the interrupt thread. | ||
499 | */ | ||
500 | static void | ||
501 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | ||
502 | { | ||
503 | cpumask_var_t mask; | ||
504 | |||
505 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | ||
506 | return; | ||
507 | |||
508 | /* | ||
509 | * In case we are out of memory we set IRQTF_AFFINITY again and | ||
510 | * try again next time | ||
511 | */ | ||
512 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
513 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | ||
514 | return; | ||
515 | } | ||
516 | |||
517 | spin_lock_irq(&desc->lock); | ||
518 | cpumask_copy(mask, desc->affinity); | ||
519 | spin_unlock_irq(&desc->lock); | ||
520 | |||
521 | set_cpus_allowed_ptr(current, mask); | ||
522 | free_cpumask_var(mask); | ||
523 | } | ||
524 | #else | ||
525 | static inline void | ||
526 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | ||
527 | #endif | ||
528 | |||
529 | /* | ||
530 | * Interrupt handler thread | ||
531 | */ | ||
532 | static int irq_thread(void *data) | ||
533 | { | ||
534 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
535 | struct irqaction *action = data; | ||
536 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
537 | int wake, oneshot = desc->status & IRQ_ONESHOT; | ||
538 | |||
539 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
540 | current->irqaction = action; | ||
541 | |||
542 | while (!irq_wait_for_interrupt(action)) { | ||
543 | |||
544 | irq_thread_check_affinity(desc, action); | ||
545 | |||
546 | atomic_inc(&desc->threads_active); | ||
547 | |||
548 | spin_lock_irq(&desc->lock); | ||
549 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
550 | /* | ||
551 | * CHECKME: We might need a dedicated | ||
552 | * IRQ_THREAD_PENDING flag here, which | ||
553 | * retriggers the thread in check_irq_resend() | ||
554 | * but AFAICT IRQ_PENDING should be fine as it | ||
555 | * retriggers the interrupt itself --- tglx | ||
556 | */ | ||
557 | desc->status |= IRQ_PENDING; | ||
558 | spin_unlock_irq(&desc->lock); | ||
559 | } else { | ||
560 | spin_unlock_irq(&desc->lock); | ||
561 | |||
562 | action->thread_fn(action->irq, action->dev_id); | ||
563 | |||
564 | if (oneshot) | ||
565 | irq_finalize_oneshot(action->irq, desc); | ||
566 | } | ||
567 | |||
568 | wake = atomic_dec_and_test(&desc->threads_active); | ||
569 | |||
570 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
571 | wake_up(&desc->wait_for_threads); | ||
572 | } | ||
573 | |||
574 | /* | ||
575 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
576 | * fuzz about an active irq thread going into nirvana. | ||
577 | */ | ||
578 | current->irqaction = NULL; | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | /* | ||
583 | * Called from do_exit() | ||
584 | */ | ||
585 | void exit_irq_thread(void) | ||
586 | { | ||
587 | struct task_struct *tsk = current; | ||
588 | |||
589 | if (!tsk->irqaction) | ||
590 | return; | ||
591 | |||
592 | printk(KERN_ERR | ||
593 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
594 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
595 | |||
596 | /* | ||
597 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
598 | * soon to be gone threaded handler. | ||
599 | */ | ||
600 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
601 | } | ||
602 | |||
603 | /* | ||
405 | * Internal function to register an irqaction - typically used to | 604 | * Internal function to register an irqaction - typically used to |
406 | * allocate special interrupts that are part of the architecture. | 605 | * allocate special interrupts that are part of the architecture. |
407 | */ | 606 | */ |
@@ -411,7 +610,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
411 | struct irqaction *old, **old_ptr; | 610 | struct irqaction *old, **old_ptr; |
412 | const char *old_name = NULL; | 611 | const char *old_name = NULL; |
413 | unsigned long flags; | 612 | unsigned long flags; |
414 | int shared = 0; | 613 | int nested, shared = 0; |
415 | int ret; | 614 | int ret; |
416 | 615 | ||
417 | if (!desc) | 616 | if (!desc) |
@@ -436,6 +635,47 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
436 | rand_initialize_irq(irq); | 635 | rand_initialize_irq(irq); |
437 | } | 636 | } |
438 | 637 | ||
638 | /* Oneshot interrupts are not allowed with shared */ | ||
639 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | ||
640 | return -EINVAL; | ||
641 | |||
642 | /* | ||
643 | * Check whether the interrupt nests into another interrupt | ||
644 | * thread. | ||
645 | */ | ||
646 | nested = desc->status & IRQ_NESTED_THREAD; | ||
647 | if (nested) { | ||
648 | if (!new->thread_fn) | ||
649 | return -EINVAL; | ||
650 | /* | ||
651 | * Replace the primary handler which was provided from | ||
652 | * the driver for non nested interrupt handling by the | ||
653 | * dummy function which warns when called. | ||
654 | */ | ||
655 | new->handler = irq_nested_primary_handler; | ||
656 | } | ||
657 | |||
658 | /* | ||
659 | * Create a handler thread when a thread function is supplied | ||
660 | * and the interrupt does not nest into another interrupt | ||
661 | * thread. | ||
662 | */ | ||
663 | if (new->thread_fn && !nested) { | ||
664 | struct task_struct *t; | ||
665 | |||
666 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
667 | new->name); | ||
668 | if (IS_ERR(t)) | ||
669 | return PTR_ERR(t); | ||
670 | /* | ||
671 | * We keep the reference to the task struct even if | ||
672 | * the thread dies to avoid that the interrupt code | ||
673 | * references an already freed task_struct. | ||
674 | */ | ||
675 | get_task_struct(t); | ||
676 | new->thread = t; | ||
677 | } | ||
678 | |||
439 | /* | 679 | /* |
440 | * The following block of code has to be executed atomically | 680 | * The following block of code has to be executed atomically |
441 | */ | 681 | */ |
@@ -473,15 +713,15 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
473 | if (!shared) { | 713 | if (!shared) { |
474 | irq_chip_set_defaults(desc->chip); | 714 | irq_chip_set_defaults(desc->chip); |
475 | 715 | ||
716 | init_waitqueue_head(&desc->wait_for_threads); | ||
717 | |||
476 | /* Setup the type (level, edge polarity) if configured: */ | 718 | /* Setup the type (level, edge polarity) if configured: */ |
477 | if (new->flags & IRQF_TRIGGER_MASK) { | 719 | if (new->flags & IRQF_TRIGGER_MASK) { |
478 | ret = __irq_set_trigger(desc, irq, | 720 | ret = __irq_set_trigger(desc, irq, |
479 | new->flags & IRQF_TRIGGER_MASK); | 721 | new->flags & IRQF_TRIGGER_MASK); |
480 | 722 | ||
481 | if (ret) { | 723 | if (ret) |
482 | spin_unlock_irqrestore(&desc->lock, flags); | 724 | goto out_thread; |
483 | return ret; | ||
484 | } | ||
485 | } else | 725 | } else |
486 | compat_irq_chip_set_default_handler(desc); | 726 | compat_irq_chip_set_default_handler(desc); |
487 | #if defined(CONFIG_IRQ_PER_CPU) | 727 | #if defined(CONFIG_IRQ_PER_CPU) |
@@ -489,9 +729,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
489 | desc->status |= IRQ_PER_CPU; | 729 | desc->status |= IRQ_PER_CPU; |
490 | #endif | 730 | #endif |
491 | 731 | ||
492 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 732 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | |
493 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); | 733 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
494 | 734 | ||
735 | if (new->flags & IRQF_ONESHOT) | ||
736 | desc->status |= IRQ_ONESHOT; | ||
737 | |||
495 | if (!(desc->status & IRQ_NOAUTOEN)) { | 738 | if (!(desc->status & IRQ_NOAUTOEN)) { |
496 | desc->depth = 0; | 739 | desc->depth = 0; |
497 | desc->status &= ~IRQ_DISABLED; | 740 | desc->status &= ~IRQ_DISABLED; |
@@ -516,6 +759,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
516 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 759 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
517 | } | 760 | } |
518 | 761 | ||
762 | new->irq = irq; | ||
519 | *old_ptr = new; | 763 | *old_ptr = new; |
520 | 764 | ||
521 | /* Reset broken irq detection when installing new handler */ | 765 | /* Reset broken irq detection when installing new handler */ |
@@ -533,7 +777,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
533 | 777 | ||
534 | spin_unlock_irqrestore(&desc->lock, flags); | 778 | spin_unlock_irqrestore(&desc->lock, flags); |
535 | 779 | ||
536 | new->irq = irq; | 780 | /* |
781 | * Strictly no need to wake it up, but hung_task complains | ||
782 | * when no hard interrupt wakes the thread up. | ||
783 | */ | ||
784 | if (new->thread) | ||
785 | wake_up_process(new->thread); | ||
786 | |||
537 | register_irq_proc(irq, desc); | 787 | register_irq_proc(irq, desc); |
538 | new->dir = NULL; | 788 | new->dir = NULL; |
539 | register_handler_proc(irq, new); | 789 | register_handler_proc(irq, new); |
@@ -549,8 +799,19 @@ mismatch: | |||
549 | dump_stack(); | 799 | dump_stack(); |
550 | } | 800 | } |
551 | #endif | 801 | #endif |
802 | ret = -EBUSY; | ||
803 | |||
804 | out_thread: | ||
552 | spin_unlock_irqrestore(&desc->lock, flags); | 805 | spin_unlock_irqrestore(&desc->lock, flags); |
553 | return -EBUSY; | 806 | if (new->thread) { |
807 | struct task_struct *t = new->thread; | ||
808 | |||
809 | new->thread = NULL; | ||
810 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
811 | kthread_stop(t); | ||
812 | put_task_struct(t); | ||
813 | } | ||
814 | return ret; | ||
554 | } | 815 | } |
555 | 816 | ||
556 | /** | 817 | /** |
@@ -622,6 +883,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
622 | else | 883 | else |
623 | desc->chip->disable(irq); | 884 | desc->chip->disable(irq); |
624 | } | 885 | } |
886 | |||
625 | spin_unlock_irqrestore(&desc->lock, flags); | 887 | spin_unlock_irqrestore(&desc->lock, flags); |
626 | 888 | ||
627 | unregister_handler_proc(irq, action); | 889 | unregister_handler_proc(irq, action); |
@@ -644,6 +906,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
644 | local_irq_restore(flags); | 906 | local_irq_restore(flags); |
645 | } | 907 | } |
646 | #endif | 908 | #endif |
909 | |||
910 | if (action->thread) { | ||
911 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
912 | kthread_stop(action->thread); | ||
913 | put_task_struct(action->thread); | ||
914 | } | ||
915 | |||
647 | return action; | 916 | return action; |
648 | } | 917 | } |
649 | 918 | ||
@@ -676,14 +945,26 @@ EXPORT_SYMBOL_GPL(remove_irq); | |||
676 | */ | 945 | */ |
677 | void free_irq(unsigned int irq, void *dev_id) | 946 | void free_irq(unsigned int irq, void *dev_id) |
678 | { | 947 | { |
948 | struct irq_desc *desc = irq_to_desc(irq); | ||
949 | |||
950 | if (!desc) | ||
951 | return; | ||
952 | |||
953 | chip_bus_lock(irq, desc); | ||
679 | kfree(__free_irq(irq, dev_id)); | 954 | kfree(__free_irq(irq, dev_id)); |
955 | chip_bus_sync_unlock(irq, desc); | ||
680 | } | 956 | } |
681 | EXPORT_SYMBOL(free_irq); | 957 | EXPORT_SYMBOL(free_irq); |
682 | 958 | ||
683 | /** | 959 | /** |
684 | * request_irq - allocate an interrupt line | 960 | * request_threaded_irq - allocate an interrupt line |
685 | * @irq: Interrupt line to allocate | 961 | * @irq: Interrupt line to allocate |
686 | * @handler: Function to be called when the IRQ occurs | 962 | * @handler: Function to be called when the IRQ occurs. |
963 | * Primary handler for threaded interrupts | ||
964 | * If NULL and thread_fn != NULL the default | ||
965 | * primary handler is installed | ||
966 | * @thread_fn: Function called from the irq handler thread | ||
967 | * If NULL, no irq thread is created | ||
687 | * @irqflags: Interrupt type flags | 968 | * @irqflags: Interrupt type flags |
688 | * @devname: An ascii name for the claiming device | 969 | * @devname: An ascii name for the claiming device |
689 | * @dev_id: A cookie passed back to the handler function | 970 | * @dev_id: A cookie passed back to the handler function |
@@ -695,6 +976,15 @@ EXPORT_SYMBOL(free_irq); | |||
695 | * raises, you must take care both to initialise your hardware | 976 | * raises, you must take care both to initialise your hardware |
696 | * and to set up the interrupt handler in the right order. | 977 | * and to set up the interrupt handler in the right order. |
697 | * | 978 | * |
979 | * If you want to set up a threaded irq handler for your device | ||
980 | * then you need to supply @handler and @thread_fn. @handler ist | ||
981 | * still called in hard interrupt context and has to check | ||
982 | * whether the interrupt originates from the device. If yes it | ||
983 | * needs to disable the interrupt on the device and return | ||
984 | * IRQ_WAKE_THREAD which will wake up the handler thread and run | ||
985 | * @thread_fn. This split handler design is necessary to support | ||
986 | * shared interrupts. | ||
987 | * | ||
698 | * Dev_id must be globally unique. Normally the address of the | 988 | * Dev_id must be globally unique. Normally the address of the |
699 | * device data structure is used as the cookie. Since the handler | 989 | * device data structure is used as the cookie. Since the handler |
700 | * receives this value it makes sense to use it. | 990 | * receives this value it makes sense to use it. |
@@ -710,8 +1000,9 @@ EXPORT_SYMBOL(free_irq); | |||
710 | * IRQF_TRIGGER_* Specify active edge(s) or level | 1000 | * IRQF_TRIGGER_* Specify active edge(s) or level |
711 | * | 1001 | * |
712 | */ | 1002 | */ |
713 | int request_irq(unsigned int irq, irq_handler_t handler, | 1003 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
714 | unsigned long irqflags, const char *devname, void *dev_id) | 1004 | irq_handler_t thread_fn, unsigned long irqflags, |
1005 | const char *devname, void *dev_id) | ||
715 | { | 1006 | { |
716 | struct irqaction *action; | 1007 | struct irqaction *action; |
717 | struct irq_desc *desc; | 1008 | struct irq_desc *desc; |
@@ -751,19 +1042,27 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
751 | 1042 | ||
752 | if (desc->status & IRQ_NOREQUEST) | 1043 | if (desc->status & IRQ_NOREQUEST) |
753 | return -EINVAL; | 1044 | return -EINVAL; |
754 | if (!handler) | 1045 | |
755 | return -EINVAL; | 1046 | if (!handler) { |
1047 | if (!thread_fn) | ||
1048 | return -EINVAL; | ||
1049 | handler = irq_default_primary_handler; | ||
1050 | } | ||
756 | 1051 | ||
757 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1052 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
758 | if (!action) | 1053 | if (!action) |
759 | return -ENOMEM; | 1054 | return -ENOMEM; |
760 | 1055 | ||
761 | action->handler = handler; | 1056 | action->handler = handler; |
1057 | action->thread_fn = thread_fn; | ||
762 | action->flags = irqflags; | 1058 | action->flags = irqflags; |
763 | action->name = devname; | 1059 | action->name = devname; |
764 | action->dev_id = dev_id; | 1060 | action->dev_id = dev_id; |
765 | 1061 | ||
1062 | chip_bus_lock(irq, desc); | ||
766 | retval = __setup_irq(irq, desc, action); | 1063 | retval = __setup_irq(irq, desc, action); |
1064 | chip_bus_sync_unlock(irq, desc); | ||
1065 | |||
767 | if (retval) | 1066 | if (retval) |
768 | kfree(action); | 1067 | kfree(action); |
769 | 1068 | ||
@@ -788,4 +1087,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
788 | #endif | 1087 | #endif |
789 | return retval; | 1088 | return retval; |
790 | } | 1089 | } |
791 | EXPORT_SYMBOL(request_irq); | 1090 | EXPORT_SYMBOL(request_threaded_irq); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index e05ad9be43b7..fcb6c96f2627 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -1,5 +1,8 @@ | |||
1 | 1 | ||
2 | #include <linux/irq.h> | 2 | #include <linux/irq.h> |
3 | #include <linux/interrupt.h> | ||
4 | |||
5 | #include "internals.h" | ||
3 | 6 | ||
4 | void move_masked_irq(int irq) | 7 | void move_masked_irq(int irq) |
5 | { | 8 | { |
@@ -39,11 +42,12 @@ void move_masked_irq(int irq) | |||
39 | * masking the irqs. | 42 | * masking the irqs. |
40 | */ | 43 | */ |
41 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) | 44 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
42 | < nr_cpu_ids)) { | 45 | < nr_cpu_ids)) |
43 | cpumask_and(desc->affinity, | 46 | if (!desc->chip->set_affinity(irq, desc->pending_mask)) { |
44 | desc->pending_mask, cpu_online_mask); | 47 | cpumask_copy(desc->affinity, desc->pending_mask); |
45 | desc->chip->set_affinity(irq, desc->affinity); | 48 | irq_set_thread_affinity(desc); |
46 | } | 49 | } |
50 | |||
47 | cpumask_clear(desc->pending_mask); | 51 | cpumask_clear(desc->pending_mask); |
48 | } | 52 | } |
49 | 53 | ||
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 243d6121e50e..3fd30197da2e 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -15,9 +15,9 @@ | |||
15 | 15 | ||
16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | 16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, |
17 | struct irq_desc *desc, | 17 | struct irq_desc *desc, |
18 | int cpu, int nr) | 18 | int node, int nr) |
19 | { | 19 | { |
20 | init_kstat_irqs(desc, cpu, nr); | 20 | init_kstat_irqs(desc, node, nr); |
21 | 21 | ||
22 | if (desc->kstat_irqs != old_desc->kstat_irqs) | 22 | if (desc->kstat_irqs != old_desc->kstat_irqs) |
23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, | 23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, |
@@ -34,36 +34,36 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, |
37 | struct irq_desc *desc, int cpu) | 37 | struct irq_desc *desc, int node) |
38 | { | 38 | { |
39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
40 | if (!init_alloc_desc_masks(desc, cpu, false)) { | 40 | if (!alloc_desc_masks(desc, node, false)) { |
41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " |
42 | "for migration.\n", irq); | 42 | "for migration.\n", irq); |
43 | return false; | 43 | return false; |
44 | } | 44 | } |
45 | spin_lock_init(&desc->lock); | 45 | spin_lock_init(&desc->lock); |
46 | desc->cpu = cpu; | 46 | desc->node = node; |
47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 48 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); |
49 | init_copy_desc_masks(old_desc, desc); | 49 | init_copy_desc_masks(old_desc, desc); |
50 | arch_init_copy_chip_data(old_desc, desc, cpu); | 50 | arch_init_copy_chip_data(old_desc, desc, node); |
51 | return true; | 51 | return true; |
52 | } | 52 | } |
53 | 53 | ||
54 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | 54 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) |
55 | { | 55 | { |
56 | free_kstat_irqs(old_desc, desc); | 56 | free_kstat_irqs(old_desc, desc); |
57 | free_desc_masks(old_desc, desc); | ||
57 | arch_free_chip_data(old_desc, desc); | 58 | arch_free_chip_data(old_desc, desc); |
58 | } | 59 | } |
59 | 60 | ||
60 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | 61 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, |
61 | int cpu) | 62 | int node) |
62 | { | 63 | { |
63 | struct irq_desc *desc; | 64 | struct irq_desc *desc; |
64 | unsigned int irq; | 65 | unsigned int irq; |
65 | unsigned long flags; | 66 | unsigned long flags; |
66 | int node; | ||
67 | 67 | ||
68 | irq = old_desc->irq; | 68 | irq = old_desc->irq; |
69 | 69 | ||
@@ -75,7 +75,6 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
75 | if (desc && old_desc != desc) | 75 | if (desc && old_desc != desc) |
76 | goto out_unlock; | 76 | goto out_unlock; |
77 | 77 | ||
78 | node = cpu_to_node(cpu); | ||
79 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 78 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
80 | if (!desc) { | 79 | if (!desc) { |
81 | printk(KERN_ERR "irq %d: can not get new irq_desc " | 80 | printk(KERN_ERR "irq %d: can not get new irq_desc " |
@@ -84,7 +83,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
84 | desc = old_desc; | 83 | desc = old_desc; |
85 | goto out_unlock; | 84 | goto out_unlock; |
86 | } | 85 | } |
87 | if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { | 86 | if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { |
88 | /* still use old one */ | 87 | /* still use old one */ |
89 | kfree(desc); | 88 | kfree(desc); |
90 | desc = old_desc; | 89 | desc = old_desc; |
@@ -96,9 +95,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
96 | 95 | ||
97 | /* free the old one */ | 96 | /* free the old one */ |
98 | free_one_irq_desc(old_desc, desc); | 97 | free_one_irq_desc(old_desc, desc); |
99 | spin_unlock(&old_desc->lock); | ||
100 | kfree(old_desc); | 98 | kfree(old_desc); |
101 | spin_lock(&desc->lock); | ||
102 | 99 | ||
103 | return desc; | 100 | return desc; |
104 | 101 | ||
@@ -108,24 +105,14 @@ out_unlock: | |||
108 | return desc; | 105 | return desc; |
109 | } | 106 | } |
110 | 107 | ||
111 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) | 108 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
112 | { | 109 | { |
113 | int old_cpu; | 110 | /* those static or target node is -1, do not move them */ |
114 | int node, old_node; | 111 | if (desc->irq < NR_IRQS_LEGACY || node == -1) |
115 | |||
116 | /* those all static, do move them */ | ||
117 | if (desc->irq < NR_IRQS_LEGACY) | ||
118 | return desc; | 112 | return desc; |
119 | 113 | ||
120 | old_cpu = desc->cpu; | 114 | if (desc->node != node) |
121 | if (old_cpu != cpu) { | 115 | desc = __real_move_irq_desc(desc, node); |
122 | node = cpu_to_node(cpu); | ||
123 | old_node = cpu_to_node(old_cpu); | ||
124 | if (old_node != node) | ||
125 | desc = __real_move_irq_desc(desc, cpu); | ||
126 | else | ||
127 | desc->cpu = cpu; | ||
128 | } | ||
129 | 116 | ||
130 | return desc; | 117 | return desc; |
131 | } | 118 | } |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 638d8bedec14..a0bb09e79867 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -15,10 +15,10 @@ | |||
15 | /** | 15 | /** |
16 | * suspend_device_irqs - disable all currently enabled interrupt lines | 16 | * suspend_device_irqs - disable all currently enabled interrupt lines |
17 | * | 17 | * |
18 | * During system-wide suspend or hibernation device interrupts need to be | 18 | * During system-wide suspend or hibernation device drivers need to be prevented |
19 | * disabled at the chip level and this function is provided for this purpose. | 19 | * from receiving interrupts and this function is provided for this purpose. |
20 | * It disables all interrupt lines that are enabled at the moment and sets the | 20 | * It marks all interrupt lines in use, except for the timer ones, as disabled |
21 | * IRQ_SUSPENDED flag for them. | 21 | * and sets the IRQ_SUSPENDED flag for each of them. |
22 | */ | 22 | */ |
23 | void suspend_device_irqs(void) | 23 | void suspend_device_irqs(void) |
24 | { | 24 | { |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 692363dd591f..0832145fea97 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -136,7 +136,7 @@ out: | |||
136 | 136 | ||
137 | static int default_affinity_open(struct inode *inode, struct file *file) | 137 | static int default_affinity_open(struct inode *inode, struct file *file) |
138 | { | 138 | { |
139 | return single_open(file, default_affinity_show, NULL); | 139 | return single_open(file, default_affinity_show, PDE(inode)->data); |
140 | } | 140 | } |
141 | 141 | ||
142 | static const struct file_operations default_affinity_proc_fops = { | 142 | static const struct file_operations default_affinity_proc_fops = { |
@@ -148,18 +148,28 @@ static const struct file_operations default_affinity_proc_fops = { | |||
148 | }; | 148 | }; |
149 | #endif | 149 | #endif |
150 | 150 | ||
151 | static int irq_spurious_read(char *page, char **start, off_t off, | 151 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
152 | int count, int *eof, void *data) | ||
153 | { | 152 | { |
154 | struct irq_desc *desc = irq_to_desc((long) data); | 153 | struct irq_desc *desc = irq_to_desc((long) m->private); |
155 | return sprintf(page, "count %u\n" | 154 | |
156 | "unhandled %u\n" | 155 | seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", |
157 | "last_unhandled %u ms\n", | 156 | desc->irq_count, desc->irqs_unhandled, |
158 | desc->irq_count, | 157 | jiffies_to_msecs(desc->last_unhandled)); |
159 | desc->irqs_unhandled, | 158 | return 0; |
160 | jiffies_to_msecs(desc->last_unhandled)); | 159 | } |
160 | |||
161 | static int irq_spurious_proc_open(struct inode *inode, struct file *file) | ||
162 | { | ||
163 | return single_open(file, irq_spurious_proc_show, NULL); | ||
161 | } | 164 | } |
162 | 165 | ||
166 | static const struct file_operations irq_spurious_proc_fops = { | ||
167 | .open = irq_spurious_proc_open, | ||
168 | .read = seq_read, | ||
169 | .llseek = seq_lseek, | ||
170 | .release = single_release, | ||
171 | }; | ||
172 | |||
163 | #define MAX_NAMELEN 128 | 173 | #define MAX_NAMELEN 128 |
164 | 174 | ||
165 | static int name_unique(unsigned int irq, struct irqaction *new_action) | 175 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
@@ -204,7 +214,6 @@ void register_handler_proc(unsigned int irq, struct irqaction *action) | |||
204 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) | 214 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
205 | { | 215 | { |
206 | char name [MAX_NAMELEN]; | 216 | char name [MAX_NAMELEN]; |
207 | struct proc_dir_entry *entry; | ||
208 | 217 | ||
209 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) | 218 | if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir) |
210 | return; | 219 | return; |
@@ -214,6 +223,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
214 | 223 | ||
215 | /* create /proc/irq/1234 */ | 224 | /* create /proc/irq/1234 */ |
216 | desc->dir = proc_mkdir(name, root_irq_dir); | 225 | desc->dir = proc_mkdir(name, root_irq_dir); |
226 | if (!desc->dir) | ||
227 | return; | ||
217 | 228 | ||
218 | #ifdef CONFIG_SMP | 229 | #ifdef CONFIG_SMP |
219 | /* create /proc/irq/<irq>/smp_affinity */ | 230 | /* create /proc/irq/<irq>/smp_affinity */ |
@@ -221,11 +232,8 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
221 | &irq_affinity_proc_fops, (void *)(long)irq); | 232 | &irq_affinity_proc_fops, (void *)(long)irq); |
222 | #endif | 233 | #endif |
223 | 234 | ||
224 | entry = create_proc_entry("spurious", 0444, desc->dir); | 235 | proc_create_data("spurious", 0444, desc->dir, |
225 | if (entry) { | 236 | &irq_spurious_proc_fops, (void *)(long)irq); |
226 | entry->data = (void *)(long)irq; | ||
227 | entry->read_proc = irq_spurious_read; | ||
228 | } | ||
229 | } | 237 | } |
230 | 238 | ||
231 | #undef MAX_NAMELEN | 239 | #undef MAX_NAMELEN |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 89c7117acf2b..090c3763f3a2 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -70,8 +70,7 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) | |||
70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { | 70 | if ((status & (IRQ_LEVEL | IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { |
71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; | 71 | desc->status = (status & ~IRQ_PENDING) | IRQ_REPLAY; |
72 | 72 | ||
73 | if (!desc->chip || !desc->chip->retrigger || | 73 | if (!desc->chip->retrigger || !desc->chip->retrigger(irq)) { |
74 | !desc->chip->retrigger(irq)) { | ||
75 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 74 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
76 | /* Set it pending and activate the softirq: */ | 75 | /* Set it pending and activate the softirq: */ |
77 | set_bit(irq, irqs_resend); | 76 | set_bit(irq, irqs_resend); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 4d568294de3e..22b0a6eedf24 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq) | |||
104 | return ok; | 104 | return ok; |
105 | } | 105 | } |
106 | 106 | ||
107 | static void poll_all_shared_irqs(void) | 107 | static void poll_spurious_irqs(unsigned long dummy) |
108 | { | 108 | { |
109 | struct irq_desc *desc; | 109 | struct irq_desc *desc; |
110 | int i; | 110 | int i; |
@@ -121,25 +121,15 @@ static void poll_all_shared_irqs(void) | |||
121 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 121 | if (!(status & IRQ_SPURIOUS_DISABLED)) |
122 | continue; | 122 | continue; |
123 | 123 | ||
124 | local_irq_disable(); | ||
124 | try_one_irq(i, desc); | 125 | try_one_irq(i, desc); |
126 | local_irq_enable(); | ||
125 | } | 127 | } |
126 | } | ||
127 | |||
128 | static void poll_spurious_irqs(unsigned long dummy) | ||
129 | { | ||
130 | poll_all_shared_irqs(); | ||
131 | 128 | ||
132 | mod_timer(&poll_spurious_irq_timer, | 129 | mod_timer(&poll_spurious_irq_timer, |
133 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 130 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
134 | } | 131 | } |
135 | 132 | ||
136 | #ifdef CONFIG_DEBUG_SHIRQ | ||
137 | void debug_poll_all_shared_irqs(void) | ||
138 | { | ||
139 | poll_all_shared_irqs(); | ||
140 | } | ||
141 | #endif | ||
142 | |||
143 | /* | 133 | /* |
144 | * If 99,900 of the previous 100,000 interrupts have not been handled | 134 | * If 99,900 of the previous 100,000 interrupts have not been handled |
145 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 135 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
@@ -297,7 +287,6 @@ static int __init irqfixup_setup(char *str) | |||
297 | 287 | ||
298 | __setup("irqfixup", irqfixup_setup); | 288 | __setup("irqfixup", irqfixup_setup); |
299 | module_param(irqfixup, int, 0644); | 289 | module_param(irqfixup, int, 0644); |
300 | MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode"); | ||
301 | 290 | ||
302 | static int __init irqpoll_setup(char *str) | 291 | static int __init irqpoll_setup(char *str) |
303 | { | 292 | { |