diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 1 | ||||
-rw-r--r-- | kernel/irq/chip.c | 14 | ||||
-rw-r--r-- | kernel/irq/devres.c | 16 | ||||
-rw-r--r-- | kernel/irq/handle.c | 165 | ||||
-rw-r--r-- | kernel/irq/internals.h | 10 | ||||
-rw-r--r-- | kernel/irq/manage.c | 432 | ||||
-rw-r--r-- | kernel/irq/migration.c | 12 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 37 | ||||
-rw-r--r-- | kernel/irq/pm.c | 79 | ||||
-rw-r--r-- | kernel/irq/proc.c | 4 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 14 |
11 files changed, 603 insertions, 181 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 4dd5b1edac98..3394f8f52964 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -4,3 +4,4 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | |||
4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o | 6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o |
7 | obj-$(CONFIG_PM_SLEEP) += pm.o | ||
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index f63c706d25e1..c687ba4363f2 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq) | |||
46 | desc->irq_count = 0; | 46 | desc->irq_count = 0; |
47 | desc->irqs_unhandled = 0; | 47 | desc->irqs_unhandled = 0; |
48 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
49 | cpumask_setall(&desc->affinity); | 49 | cpumask_setall(desc->affinity); |
50 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
51 | cpumask_clear(desc->pending_mask); | ||
52 | #endif | ||
50 | #endif | 53 | #endif |
51 | spin_unlock_irqrestore(&desc->lock, flags); | 54 | spin_unlock_irqrestore(&desc->lock, flags); |
52 | } | 55 | } |
@@ -78,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
78 | desc->handle_irq = handle_bad_irq; | 81 | desc->handle_irq = handle_bad_irq; |
79 | desc->chip = &no_irq_chip; | 82 | desc->chip = &no_irq_chip; |
80 | desc->name = NULL; | 83 | desc->name = NULL; |
84 | clear_kstat_irqs(desc); | ||
81 | spin_unlock_irqrestore(&desc->lock, flags); | 85 | spin_unlock_irqrestore(&desc->lock, flags); |
82 | } | 86 | } |
83 | 87 | ||
@@ -290,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
290 | desc->chip->mask_ack(irq); | 294 | desc->chip->mask_ack(irq); |
291 | else { | 295 | else { |
292 | desc->chip->mask(irq); | 296 | desc->chip->mask(irq); |
293 | desc->chip->ack(irq); | 297 | if (desc->chip->ack) |
298 | desc->chip->ack(irq); | ||
294 | } | 299 | } |
295 | } | 300 | } |
296 | 301 | ||
@@ -383,6 +388,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
383 | out_unlock: | 388 | out_unlock: |
384 | spin_unlock(&desc->lock); | 389 | spin_unlock(&desc->lock); |
385 | } | 390 | } |
391 | EXPORT_SYMBOL_GPL(handle_level_irq); | ||
386 | 392 | ||
387 | /** | 393 | /** |
388 | * handle_fasteoi_irq - irq handler for transparent controllers | 394 | * handle_fasteoi_irq - irq handler for transparent controllers |
@@ -475,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
475 | kstat_incr_irqs_this_cpu(irq, desc); | 481 | kstat_incr_irqs_this_cpu(irq, desc); |
476 | 482 | ||
477 | /* Start handling the irq */ | 483 | /* Start handling the irq */ |
478 | desc->chip->ack(irq); | 484 | if (desc->chip->ack) |
485 | desc->chip->ack(irq); | ||
479 | desc = irq_remap_to_desc(irq, desc); | 486 | desc = irq_remap_to_desc(irq, desc); |
480 | 487 | ||
481 | /* Mark the IRQ currently in progress.*/ | 488 | /* Mark the IRQ currently in progress.*/ |
@@ -593,6 +600,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
593 | } | 600 | } |
594 | spin_unlock_irqrestore(&desc->lock, flags); | 601 | spin_unlock_irqrestore(&desc->lock, flags); |
595 | } | 602 | } |
603 | EXPORT_SYMBOL_GPL(__set_irq_handler); | ||
596 | 604 | ||
597 | void | 605 | void |
598 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 606 | set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index 38a25b8d8bff..d06df9c41cba 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
@@ -26,10 +26,12 @@ static int devm_irq_match(struct device *dev, void *res, void *data) | |||
26 | } | 26 | } |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * devm_request_irq - allocate an interrupt line for a managed device | 29 | * devm_request_threaded_irq - allocate an interrupt line for a managed device |
30 | * @dev: device to request interrupt for | 30 | * @dev: device to request interrupt for |
31 | * @irq: Interrupt line to allocate | 31 | * @irq: Interrupt line to allocate |
32 | * @handler: Function to be called when the IRQ occurs | 32 | * @handler: Function to be called when the IRQ occurs |
33 | * @thread_fn: function to be called in a threaded interrupt context. NULL | ||
34 | * for devices which handle everything in @handler | ||
33 | * @irqflags: Interrupt type flags | 35 | * @irqflags: Interrupt type flags |
34 | * @devname: An ascii name for the claiming device | 36 | * @devname: An ascii name for the claiming device |
35 | * @dev_id: A cookie passed back to the handler function | 37 | * @dev_id: A cookie passed back to the handler function |
@@ -42,9 +44,10 @@ static int devm_irq_match(struct device *dev, void *res, void *data) | |||
42 | * If an IRQ allocated with this function needs to be freed | 44 | * If an IRQ allocated with this function needs to be freed |
43 | * separately, dev_free_irq() must be used. | 45 | * separately, dev_free_irq() must be used. |
44 | */ | 46 | */ |
45 | int devm_request_irq(struct device *dev, unsigned int irq, | 47 | int devm_request_threaded_irq(struct device *dev, unsigned int irq, |
46 | irq_handler_t handler, unsigned long irqflags, | 48 | irq_handler_t handler, irq_handler_t thread_fn, |
47 | const char *devname, void *dev_id) | 49 | unsigned long irqflags, const char *devname, |
50 | void *dev_id) | ||
48 | { | 51 | { |
49 | struct irq_devres *dr; | 52 | struct irq_devres *dr; |
50 | int rc; | 53 | int rc; |
@@ -54,7 +57,8 @@ int devm_request_irq(struct device *dev, unsigned int irq, | |||
54 | if (!dr) | 57 | if (!dr) |
55 | return -ENOMEM; | 58 | return -ENOMEM; |
56 | 59 | ||
57 | rc = request_irq(irq, handler, irqflags, devname, dev_id); | 60 | rc = request_threaded_irq(irq, handler, thread_fn, irqflags, devname, |
61 | dev_id); | ||
58 | if (rc) { | 62 | if (rc) { |
59 | devres_free(dr); | 63 | devres_free(dr); |
60 | return rc; | 64 | return rc; |
@@ -66,7 +70,7 @@ int devm_request_irq(struct device *dev, unsigned int irq, | |||
66 | 70 | ||
67 | return 0; | 71 | return 0; |
68 | } | 72 | } |
69 | EXPORT_SYMBOL(devm_request_irq); | 73 | EXPORT_SYMBOL(devm_request_threaded_irq); |
70 | 74 | ||
71 | /** | 75 | /** |
72 | * devm_free_irq - free an interrupt | 76 | * devm_free_irq - free an interrupt |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c20db0be9173..d82142be8dd2 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | 18 | #include <linux/rculist.h> |
19 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
20 | #include <trace/irq.h> | ||
21 | #include <linux/bootmem.h> | ||
20 | 22 | ||
21 | #include "internals.h" | 23 | #include "internals.h" |
22 | 24 | ||
@@ -39,6 +41,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
39 | ack_bad_irq(irq); | 41 | ack_bad_irq(irq); |
40 | } | 42 | } |
41 | 43 | ||
44 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | ||
45 | static void __init init_irq_default_affinity(void) | ||
46 | { | ||
47 | alloc_bootmem_cpumask_var(&irq_default_affinity); | ||
48 | cpumask_setall(irq_default_affinity); | ||
49 | } | ||
50 | #else | ||
51 | static void __init init_irq_default_affinity(void) | ||
52 | { | ||
53 | } | ||
54 | #endif | ||
55 | |||
42 | /* | 56 | /* |
43 | * Linux has a controller-independent interrupt architecture. | 57 | * Linux has a controller-independent interrupt architecture. |
44 | * Every controller has a 'controller-template', that is used | 58 | * Every controller has a 'controller-template', that is used |
@@ -57,6 +71,7 @@ int nr_irqs = NR_IRQS; | |||
57 | EXPORT_SYMBOL_GPL(nr_irqs); | 71 | EXPORT_SYMBOL_GPL(nr_irqs); |
58 | 72 | ||
59 | #ifdef CONFIG_SPARSE_IRQ | 73 | #ifdef CONFIG_SPARSE_IRQ |
74 | |||
60 | static struct irq_desc irq_desc_init = { | 75 | static struct irq_desc irq_desc_init = { |
61 | .irq = -1, | 76 | .irq = -1, |
62 | .status = IRQ_DISABLED, | 77 | .status = IRQ_DISABLED, |
@@ -64,26 +79,25 @@ static struct irq_desc irq_desc_init = { | |||
64 | .handle_irq = handle_bad_irq, | 79 | .handle_irq = handle_bad_irq, |
65 | .depth = 1, | 80 | .depth = 1, |
66 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 81 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
67 | #ifdef CONFIG_SMP | ||
68 | .affinity = CPU_MASK_ALL | ||
69 | #endif | ||
70 | }; | 82 | }; |
71 | 83 | ||
72 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 84 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
73 | { | 85 | { |
74 | unsigned long bytes; | ||
75 | char *ptr; | ||
76 | int node; | 86 | int node; |
77 | 87 | void *ptr; | |
78 | /* Compute how many bytes we need per irq and allocate them */ | ||
79 | bytes = nr * sizeof(unsigned int); | ||
80 | 88 | ||
81 | node = cpu_to_node(cpu); | 89 | node = cpu_to_node(cpu); |
82 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | 90 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); |
83 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | ||
84 | 91 | ||
85 | if (ptr) | 92 | /* |
86 | desc->kstat_irqs = (unsigned int *)ptr; | 93 | * don't overwite if can not get new one |
94 | * init_copy_kstat_irqs() could still use old one | ||
95 | */ | ||
96 | if (ptr) { | ||
97 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", | ||
98 | cpu, node); | ||
99 | desc->kstat_irqs = ptr; | ||
100 | } | ||
87 | } | 101 | } |
88 | 102 | ||
89 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | 103 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) |
@@ -101,6 +115,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
101 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
102 | BUG_ON(1); | 116 | BUG_ON(1); |
103 | } | 117 | } |
118 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | ||
120 | BUG_ON(1); | ||
121 | } | ||
104 | arch_init_chip_data(desc, cpu); | 122 | arch_init_chip_data(desc, cpu); |
105 | } | 123 | } |
106 | 124 | ||
@@ -109,7 +127,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
109 | */ | 127 | */ |
110 | DEFINE_SPINLOCK(sparse_irq_lock); | 128 | DEFINE_SPINLOCK(sparse_irq_lock); |
111 | 129 | ||
112 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | 130 | struct irq_desc **irq_desc_ptrs __read_mostly; |
113 | 131 | ||
114 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 132 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
115 | [0 ... NR_IRQS_LEGACY-1] = { | 133 | [0 ... NR_IRQS_LEGACY-1] = { |
@@ -119,14 +137,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm | |||
119 | .handle_irq = handle_bad_irq, | 137 | .handle_irq = handle_bad_irq, |
120 | .depth = 1, | 138 | .depth = 1, |
121 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 139 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
122 | #ifdef CONFIG_SMP | ||
123 | .affinity = CPU_MASK_ALL | ||
124 | #endif | ||
125 | } | 140 | } |
126 | }; | 141 | }; |
127 | 142 | ||
128 | /* FIXME: use bootmem alloc ...*/ | 143 | static unsigned int *kstat_irqs_legacy; |
129 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
130 | 144 | ||
131 | int __init early_irq_init(void) | 145 | int __init early_irq_init(void) |
132 | { | 146 | { |
@@ -134,18 +148,32 @@ int __init early_irq_init(void) | |||
134 | int legacy_count; | 148 | int legacy_count; |
135 | int i; | 149 | int i; |
136 | 150 | ||
151 | init_irq_default_affinity(); | ||
152 | |||
153 | /* initialize nr_irqs based on nr_cpu_ids */ | ||
154 | arch_probe_nr_irqs(); | ||
155 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | ||
156 | |||
137 | desc = irq_desc_legacy; | 157 | desc = irq_desc_legacy; |
138 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 158 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
139 | 159 | ||
160 | /* allocate irq_desc_ptrs array based on nr_irqs */ | ||
161 | irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); | ||
162 | |||
163 | /* allocate based on nr_cpu_ids */ | ||
164 | /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ | ||
165 | kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * | ||
166 | sizeof(int)); | ||
167 | |||
140 | for (i = 0; i < legacy_count; i++) { | 168 | for (i = 0; i < legacy_count; i++) { |
141 | desc[i].irq = i; | 169 | desc[i].irq = i; |
142 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | 170 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
143 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 171 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
144 | 172 | init_alloc_desc_masks(&desc[i], 0, true); | |
145 | irq_desc_ptrs[i] = desc + i; | 173 | irq_desc_ptrs[i] = desc + i; |
146 | } | 174 | } |
147 | 175 | ||
148 | for (i = legacy_count; i < NR_IRQS; i++) | 176 | for (i = legacy_count; i < nr_irqs; i++) |
149 | irq_desc_ptrs[i] = NULL; | 177 | irq_desc_ptrs[i] = NULL; |
150 | 178 | ||
151 | return arch_early_irq_init(); | 179 | return arch_early_irq_init(); |
@@ -153,7 +181,10 @@ int __init early_irq_init(void) | |||
153 | 181 | ||
154 | struct irq_desc *irq_to_desc(unsigned int irq) | 182 | struct irq_desc *irq_to_desc(unsigned int irq) |
155 | { | 183 | { |
156 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | 184 | if (irq_desc_ptrs && irq < nr_irqs) |
185 | return irq_desc_ptrs[irq]; | ||
186 | |||
187 | return NULL; | ||
157 | } | 188 | } |
158 | 189 | ||
159 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 190 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) |
@@ -162,10 +193,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
162 | unsigned long flags; | 193 | unsigned long flags; |
163 | int node; | 194 | int node; |
164 | 195 | ||
165 | if (irq >= NR_IRQS) { | 196 | if (irq >= nr_irqs) { |
166 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | 197 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
167 | irq, NR_IRQS); | 198 | irq, nr_irqs); |
168 | WARN_ON(1); | ||
169 | return NULL; | 199 | return NULL; |
170 | } | 200 | } |
171 | 201 | ||
@@ -207,24 +237,28 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
207 | .handle_irq = handle_bad_irq, | 237 | .handle_irq = handle_bad_irq, |
208 | .depth = 1, | 238 | .depth = 1, |
209 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 239 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
210 | #ifdef CONFIG_SMP | ||
211 | .affinity = CPU_MASK_ALL | ||
212 | #endif | ||
213 | } | 240 | } |
214 | }; | 241 | }; |
215 | 242 | ||
243 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | ||
216 | int __init early_irq_init(void) | 244 | int __init early_irq_init(void) |
217 | { | 245 | { |
218 | struct irq_desc *desc; | 246 | struct irq_desc *desc; |
219 | int count; | 247 | int count; |
220 | int i; | 248 | int i; |
221 | 249 | ||
250 | init_irq_default_affinity(); | ||
251 | |||
252 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
253 | |||
222 | desc = irq_desc; | 254 | desc = irq_desc; |
223 | count = ARRAY_SIZE(irq_desc); | 255 | count = ARRAY_SIZE(irq_desc); |
224 | 256 | ||
225 | for (i = 0; i < count; i++) | 257 | for (i = 0; i < count; i++) { |
226 | desc[i].irq = i; | 258 | desc[i].irq = i; |
227 | 259 | init_alloc_desc_masks(&desc[i], 0, true); | |
260 | desc[i].kstat_irqs = kstat_irqs_all[i]; | ||
261 | } | ||
228 | return arch_early_irq_init(); | 262 | return arch_early_irq_init(); |
229 | } | 263 | } |
230 | 264 | ||
@@ -239,6 +273,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
239 | } | 273 | } |
240 | #endif /* !CONFIG_SPARSE_IRQ */ | 274 | #endif /* !CONFIG_SPARSE_IRQ */ |
241 | 275 | ||
276 | void clear_kstat_irqs(struct irq_desc *desc) | ||
277 | { | ||
278 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | ||
279 | } | ||
280 | |||
242 | /* | 281 | /* |
243 | * What should we do if we get a hw irq event on an illegal vector? | 282 | * What should we do if we get a hw irq event on an illegal vector? |
244 | * Each architecture has to answer this themself. | 283 | * Each architecture has to answer this themself. |
@@ -300,6 +339,18 @@ irqreturn_t no_action(int cpl, void *dev_id) | |||
300 | return IRQ_NONE; | 339 | return IRQ_NONE; |
301 | } | 340 | } |
302 | 341 | ||
342 | static void warn_no_thread(unsigned int irq, struct irqaction *action) | ||
343 | { | ||
344 | if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags)) | ||
345 | return; | ||
346 | |||
347 | printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD " | ||
348 | "but no thread function available.", irq, action->name); | ||
349 | } | ||
350 | |||
351 | DEFINE_TRACE(irq_handler_entry); | ||
352 | DEFINE_TRACE(irq_handler_exit); | ||
353 | |||
303 | /** | 354 | /** |
304 | * handle_IRQ_event - irq action chain handler | 355 | * handle_IRQ_event - irq action chain handler |
305 | * @irq: the interrupt number | 356 | * @irq: the interrupt number |
@@ -312,13 +363,56 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
312 | irqreturn_t ret, retval = IRQ_NONE; | 363 | irqreturn_t ret, retval = IRQ_NONE; |
313 | unsigned int status = 0; | 364 | unsigned int status = 0; |
314 | 365 | ||
366 | WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); | ||
367 | |||
315 | if (!(action->flags & IRQF_DISABLED)) | 368 | if (!(action->flags & IRQF_DISABLED)) |
316 | local_irq_enable_in_hardirq(); | 369 | local_irq_enable_in_hardirq(); |
317 | 370 | ||
318 | do { | 371 | do { |
372 | trace_irq_handler_entry(irq, action); | ||
319 | ret = action->handler(irq, action->dev_id); | 373 | ret = action->handler(irq, action->dev_id); |
320 | if (ret == IRQ_HANDLED) | 374 | trace_irq_handler_exit(irq, action, ret); |
375 | |||
376 | switch (ret) { | ||
377 | case IRQ_WAKE_THREAD: | ||
378 | /* | ||
379 | * Set result to handled so the spurious check | ||
380 | * does not trigger. | ||
381 | */ | ||
382 | ret = IRQ_HANDLED; | ||
383 | |||
384 | /* | ||
385 | * Catch drivers which return WAKE_THREAD but | ||
386 | * did not set up a thread function | ||
387 | */ | ||
388 | if (unlikely(!action->thread_fn)) { | ||
389 | warn_no_thread(irq, action); | ||
390 | break; | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * Wake up the handler thread for this | ||
395 | * action. In case the thread crashed and was | ||
396 | * killed we just pretend that we handled the | ||
397 | * interrupt. The hardirq handler above has | ||
398 | * disabled the device interrupt, so no irq | ||
399 | * storm is lurking. | ||
400 | */ | ||
401 | if (likely(!test_bit(IRQTF_DIED, | ||
402 | &action->thread_flags))) { | ||
403 | set_bit(IRQTF_RUNTHREAD, &action->thread_flags); | ||
404 | wake_up_process(action->thread); | ||
405 | } | ||
406 | |||
407 | /* Fall through to add to randomness */ | ||
408 | case IRQ_HANDLED: | ||
321 | status |= action->flags; | 409 | status |= action->flags; |
410 | break; | ||
411 | |||
412 | default: | ||
413 | break; | ||
414 | } | ||
415 | |||
322 | retval |= ret; | 416 | retval |= ret; |
323 | action = action->next; | 417 | action = action->next; |
324 | } while (action); | 418 | } while (action); |
@@ -331,6 +425,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
331 | } | 425 | } |
332 | 426 | ||
333 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | 427 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
428 | |||
429 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | ||
430 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | ||
431 | #endif | ||
432 | |||
334 | /** | 433 | /** |
335 | * __do_IRQ - original all in one highlevel IRQ handler | 434 | * __do_IRQ - original all in one highlevel IRQ handler |
336 | * @irq: the interrupt number | 435 | * @irq: the interrupt number |
@@ -451,12 +550,10 @@ void early_init_irq_lock_class(void) | |||
451 | } | 550 | } |
452 | } | 551 | } |
453 | 552 | ||
454 | #ifdef CONFIG_SPARSE_IRQ | ||
455 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 553 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
456 | { | 554 | { |
457 | struct irq_desc *desc = irq_to_desc(irq); | 555 | struct irq_desc *desc = irq_to_desc(irq); |
458 | return desc ? desc->kstat_irqs[cpu] : 0; | 556 | return desc ? desc->kstat_irqs[cpu] : 0; |
459 | } | 557 | } |
460 | #endif | ||
461 | EXPORT_SYMBOL(kstat_irqs_cpu); | 558 | EXPORT_SYMBOL(kstat_irqs_cpu); |
462 | 559 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index e6d0a43cc125..01ce20eab38f 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -12,11 +12,21 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); | |||
12 | 12 | ||
13 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 13 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
14 | unsigned long flags); | 14 | unsigned long flags); |
15 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | ||
16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | ||
15 | 17 | ||
16 | extern struct lock_class_key irq_desc_lock_class; | 18 | extern struct lock_class_key irq_desc_lock_class; |
17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 19 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); |
20 | extern void clear_kstat_irqs(struct irq_desc *desc); | ||
18 | extern spinlock_t sparse_irq_lock; | 21 | extern spinlock_t sparse_irq_lock; |
22 | |||
23 | #ifdef CONFIG_SPARSE_IRQ | ||
24 | /* irq_desc_ptrs allocated at boot time */ | ||
25 | extern struct irq_desc **irq_desc_ptrs; | ||
26 | #else | ||
27 | /* irq_desc_ptrs is a fixed size array */ | ||
19 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; | 28 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; |
29 | #endif | ||
20 | 30 | ||
21 | #ifdef CONFIG_PROC_FS | 31 | #ifdef CONFIG_PROC_FS |
22 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 32 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index cd0cd8dcb345..7e2e7dd4cd2f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -8,24 +8,15 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/kthread.h> | ||
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
12 | #include <linux/random.h> | 13 | #include <linux/random.h> |
13 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
14 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/sched.h> | ||
15 | 17 | ||
16 | #include "internals.h" | 18 | #include "internals.h" |
17 | 19 | ||
18 | #ifdef CONFIG_SMP | ||
19 | cpumask_var_t irq_default_affinity; | ||
20 | |||
21 | static int init_irq_default_affinity(void) | ||
22 | { | ||
23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
24 | cpumask_setall(irq_default_affinity); | ||
25 | return 0; | ||
26 | } | ||
27 | core_initcall(init_irq_default_affinity); | ||
28 | |||
29 | /** | 20 | /** |
30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
31 | * @irq: interrupt number to wait for | 22 | * @irq: interrupt number to wait for |
@@ -61,9 +52,18 @@ void synchronize_irq(unsigned int irq) | |||
61 | 52 | ||
62 | /* Oops, that failed? */ | 53 | /* Oops, that failed? */ |
63 | } while (status & IRQ_INPROGRESS); | 54 | } while (status & IRQ_INPROGRESS); |
55 | |||
56 | /* | ||
57 | * We made sure that no hardirq handler is running. Now verify | ||
58 | * that no threaded handlers are active. | ||
59 | */ | ||
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | ||
64 | } | 61 | } |
65 | EXPORT_SYMBOL(synchronize_irq); | 62 | EXPORT_SYMBOL(synchronize_irq); |
66 | 63 | ||
64 | #ifdef CONFIG_SMP | ||
65 | cpumask_var_t irq_default_affinity; | ||
66 | |||
67 | /** | 67 | /** |
68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
69 | * @irq: Interrupt to check | 69 | * @irq: Interrupt to check |
@@ -80,6 +80,18 @@ int irq_can_set_affinity(unsigned int irq) | |||
80 | return 1; | 80 | return 1; |
81 | } | 81 | } |
82 | 82 | ||
83 | static void | ||
84 | irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) | ||
85 | { | ||
86 | struct irqaction *action = desc->action; | ||
87 | |||
88 | while (action) { | ||
89 | if (action->thread) | ||
90 | set_cpus_allowed_ptr(action->thread, cpumask); | ||
91 | action = action->next; | ||
92 | } | ||
93 | } | ||
94 | |||
83 | /** | 95 | /** |
84 | * irq_set_affinity - Set the irq affinity of a given irq | 96 | * irq_set_affinity - Set the irq affinity of a given irq |
85 | * @irq: Interrupt to set affinity | 97 | * @irq: Interrupt to set affinity |
@@ -98,16 +110,17 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
98 | 110 | ||
99 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 111 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
100 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 112 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
101 | cpumask_copy(&desc->affinity, cpumask); | 113 | cpumask_copy(desc->affinity, cpumask); |
102 | desc->chip->set_affinity(irq, cpumask); | 114 | desc->chip->set_affinity(irq, cpumask); |
103 | } else { | 115 | } else { |
104 | desc->status |= IRQ_MOVE_PENDING; | 116 | desc->status |= IRQ_MOVE_PENDING; |
105 | cpumask_copy(&desc->pending_mask, cpumask); | 117 | cpumask_copy(desc->pending_mask, cpumask); |
106 | } | 118 | } |
107 | #else | 119 | #else |
108 | cpumask_copy(&desc->affinity, cpumask); | 120 | cpumask_copy(desc->affinity, cpumask); |
109 | desc->chip->set_affinity(irq, cpumask); | 121 | desc->chip->set_affinity(irq, cpumask); |
110 | #endif | 122 | #endif |
123 | irq_set_thread_affinity(desc, cpumask); | ||
111 | desc->status |= IRQ_AFFINITY_SET; | 124 | desc->status |= IRQ_AFFINITY_SET; |
112 | spin_unlock_irqrestore(&desc->lock, flags); | 125 | spin_unlock_irqrestore(&desc->lock, flags); |
113 | return 0; | 126 | return 0; |
@@ -117,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
117 | /* | 130 | /* |
118 | * Generic version of the affinity autoselector. | 131 | * Generic version of the affinity autoselector. |
119 | */ | 132 | */ |
120 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 133 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) |
121 | { | 134 | { |
122 | if (!irq_can_set_affinity(irq)) | 135 | if (!irq_can_set_affinity(irq)) |
123 | return 0; | 136 | return 0; |
@@ -127,21 +140,21 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | |||
127 | * one of the targets is online. | 140 | * one of the targets is online. |
128 | */ | 141 | */ |
129 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 142 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
130 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) | 143 | if (cpumask_any_and(desc->affinity, cpu_online_mask) |
131 | < nr_cpu_ids) | 144 | < nr_cpu_ids) |
132 | goto set_affinity; | 145 | goto set_affinity; |
133 | else | 146 | else |
134 | desc->status &= ~IRQ_AFFINITY_SET; | 147 | desc->status &= ~IRQ_AFFINITY_SET; |
135 | } | 148 | } |
136 | 149 | ||
137 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); | 150 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); |
138 | set_affinity: | 151 | set_affinity: |
139 | desc->chip->set_affinity(irq, &desc->affinity); | 152 | desc->chip->set_affinity(irq, desc->affinity); |
140 | 153 | ||
141 | return 0; | 154 | return 0; |
142 | } | 155 | } |
143 | #else | 156 | #else |
144 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | 157 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) |
145 | { | 158 | { |
146 | return irq_select_affinity(irq); | 159 | return irq_select_affinity(irq); |
147 | } | 160 | } |
@@ -157,19 +170,35 @@ int irq_select_affinity_usr(unsigned int irq) | |||
157 | int ret; | 170 | int ret; |
158 | 171 | ||
159 | spin_lock_irqsave(&desc->lock, flags); | 172 | spin_lock_irqsave(&desc->lock, flags); |
160 | ret = do_irq_select_affinity(irq, desc); | 173 | ret = setup_affinity(irq, desc); |
174 | if (!ret) | ||
175 | irq_set_thread_affinity(desc, desc->affinity); | ||
161 | spin_unlock_irqrestore(&desc->lock, flags); | 176 | spin_unlock_irqrestore(&desc->lock, flags); |
162 | 177 | ||
163 | return ret; | 178 | return ret; |
164 | } | 179 | } |
165 | 180 | ||
166 | #else | 181 | #else |
167 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | 182 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) |
168 | { | 183 | { |
169 | return 0; | 184 | return 0; |
170 | } | 185 | } |
171 | #endif | 186 | #endif |
172 | 187 | ||
188 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | ||
189 | { | ||
190 | if (suspend) { | ||
191 | if (!desc->action || (desc->action->flags & IRQF_TIMER)) | ||
192 | return; | ||
193 | desc->status |= IRQ_SUSPENDED; | ||
194 | } | ||
195 | |||
196 | if (!desc->depth++) { | ||
197 | desc->status |= IRQ_DISABLED; | ||
198 | desc->chip->disable(irq); | ||
199 | } | ||
200 | } | ||
201 | |||
173 | /** | 202 | /** |
174 | * disable_irq_nosync - disable an irq without waiting | 203 | * disable_irq_nosync - disable an irq without waiting |
175 | * @irq: Interrupt to disable | 204 | * @irq: Interrupt to disable |
@@ -190,10 +219,7 @@ void disable_irq_nosync(unsigned int irq) | |||
190 | return; | 219 | return; |
191 | 220 | ||
192 | spin_lock_irqsave(&desc->lock, flags); | 221 | spin_lock_irqsave(&desc->lock, flags); |
193 | if (!desc->depth++) { | 222 | __disable_irq(desc, irq, false); |
194 | desc->status |= IRQ_DISABLED; | ||
195 | desc->chip->disable(irq); | ||
196 | } | ||
197 | spin_unlock_irqrestore(&desc->lock, flags); | 223 | spin_unlock_irqrestore(&desc->lock, flags); |
198 | } | 224 | } |
199 | EXPORT_SYMBOL(disable_irq_nosync); | 225 | EXPORT_SYMBOL(disable_irq_nosync); |
@@ -223,15 +249,21 @@ void disable_irq(unsigned int irq) | |||
223 | } | 249 | } |
224 | EXPORT_SYMBOL(disable_irq); | 250 | EXPORT_SYMBOL(disable_irq); |
225 | 251 | ||
226 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | 252 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
227 | { | 253 | { |
254 | if (resume) | ||
255 | desc->status &= ~IRQ_SUSPENDED; | ||
256 | |||
228 | switch (desc->depth) { | 257 | switch (desc->depth) { |
229 | case 0: | 258 | case 0: |
259 | err_out: | ||
230 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 260 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
231 | break; | 261 | break; |
232 | case 1: { | 262 | case 1: { |
233 | unsigned int status = desc->status & ~IRQ_DISABLED; | 263 | unsigned int status = desc->status & ~IRQ_DISABLED; |
234 | 264 | ||
265 | if (desc->status & IRQ_SUSPENDED) | ||
266 | goto err_out; | ||
235 | /* Prevent probing on this irq: */ | 267 | /* Prevent probing on this irq: */ |
236 | desc->status = status | IRQ_NOPROBE; | 268 | desc->status = status | IRQ_NOPROBE; |
237 | check_irq_resend(desc, irq); | 269 | check_irq_resend(desc, irq); |
@@ -261,7 +293,7 @@ void enable_irq(unsigned int irq) | |||
261 | return; | 293 | return; |
262 | 294 | ||
263 | spin_lock_irqsave(&desc->lock, flags); | 295 | spin_lock_irqsave(&desc->lock, flags); |
264 | __enable_irq(desc, irq); | 296 | __enable_irq(desc, irq, false); |
265 | spin_unlock_irqrestore(&desc->lock, flags); | 297 | spin_unlock_irqrestore(&desc->lock, flags); |
266 | } | 298 | } |
267 | EXPORT_SYMBOL(enable_irq); | 299 | EXPORT_SYMBOL(enable_irq); |
@@ -392,14 +424,98 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
392 | return ret; | 424 | return ret; |
393 | } | 425 | } |
394 | 426 | ||
427 | static int irq_wait_for_interrupt(struct irqaction *action) | ||
428 | { | ||
429 | while (!kthread_should_stop()) { | ||
430 | set_current_state(TASK_INTERRUPTIBLE); | ||
431 | |||
432 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | ||
433 | &action->thread_flags)) { | ||
434 | __set_current_state(TASK_RUNNING); | ||
435 | return 0; | ||
436 | } | ||
437 | schedule(); | ||
438 | } | ||
439 | return -1; | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Interrupt handler thread | ||
444 | */ | ||
445 | static int irq_thread(void *data) | ||
446 | { | ||
447 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | ||
448 | struct irqaction *action = data; | ||
449 | struct irq_desc *desc = irq_to_desc(action->irq); | ||
450 | int wake; | ||
451 | |||
452 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
453 | current->irqaction = action; | ||
454 | |||
455 | while (!irq_wait_for_interrupt(action)) { | ||
456 | |||
457 | atomic_inc(&desc->threads_active); | ||
458 | |||
459 | spin_lock_irq(&desc->lock); | ||
460 | if (unlikely(desc->status & IRQ_DISABLED)) { | ||
461 | /* | ||
462 | * CHECKME: We might need a dedicated | ||
463 | * IRQ_THREAD_PENDING flag here, which | ||
464 | * retriggers the thread in check_irq_resend() | ||
465 | * but AFAICT IRQ_PENDING should be fine as it | ||
466 | * retriggers the interrupt itself --- tglx | ||
467 | */ | ||
468 | desc->status |= IRQ_PENDING; | ||
469 | spin_unlock_irq(&desc->lock); | ||
470 | } else { | ||
471 | spin_unlock_irq(&desc->lock); | ||
472 | |||
473 | action->thread_fn(action->irq, action->dev_id); | ||
474 | } | ||
475 | |||
476 | wake = atomic_dec_and_test(&desc->threads_active); | ||
477 | |||
478 | if (wake && waitqueue_active(&desc->wait_for_threads)) | ||
479 | wake_up(&desc->wait_for_threads); | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * Clear irqaction. Otherwise exit_irq_thread() would make | ||
484 | * fuzz about an active irq thread going into nirvana. | ||
485 | */ | ||
486 | current->irqaction = NULL; | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | * Called from do_exit() | ||
492 | */ | ||
493 | void exit_irq_thread(void) | ||
494 | { | ||
495 | struct task_struct *tsk = current; | ||
496 | |||
497 | if (!tsk->irqaction) | ||
498 | return; | ||
499 | |||
500 | printk(KERN_ERR | ||
501 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
502 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | ||
503 | |||
504 | /* | ||
505 | * Set the THREAD DIED flag to prevent further wakeups of the | ||
506 | * soon to be gone threaded handler. | ||
507 | */ | ||
508 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | ||
509 | } | ||
510 | |||
395 | /* | 511 | /* |
396 | * Internal function to register an irqaction - typically used to | 512 | * Internal function to register an irqaction - typically used to |
397 | * allocate special interrupts that are part of the architecture. | 513 | * allocate special interrupts that are part of the architecture. |
398 | */ | 514 | */ |
399 | static int | 515 | static int |
400 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | 516 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
401 | { | 517 | { |
402 | struct irqaction *old, **p; | 518 | struct irqaction *old, **old_ptr; |
403 | const char *old_name = NULL; | 519 | const char *old_name = NULL; |
404 | unsigned long flags; | 520 | unsigned long flags; |
405 | int shared = 0; | 521 | int shared = 0; |
@@ -428,11 +544,31 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
428 | } | 544 | } |
429 | 545 | ||
430 | /* | 546 | /* |
547 | * Threaded handler ? | ||
548 | */ | ||
549 | if (new->thread_fn) { | ||
550 | struct task_struct *t; | ||
551 | |||
552 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | ||
553 | new->name); | ||
554 | if (IS_ERR(t)) | ||
555 | return PTR_ERR(t); | ||
556 | /* | ||
557 | * We keep the reference to the task struct even if | ||
558 | * the thread dies to avoid that the interrupt code | ||
559 | * references an already freed task_struct. | ||
560 | */ | ||
561 | get_task_struct(t); | ||
562 | new->thread = t; | ||
563 | wake_up_process(t); | ||
564 | } | ||
565 | |||
566 | /* | ||
431 | * The following block of code has to be executed atomically | 567 | * The following block of code has to be executed atomically |
432 | */ | 568 | */ |
433 | spin_lock_irqsave(&desc->lock, flags); | 569 | spin_lock_irqsave(&desc->lock, flags); |
434 | p = &desc->action; | 570 | old_ptr = &desc->action; |
435 | old = *p; | 571 | old = *old_ptr; |
436 | if (old) { | 572 | if (old) { |
437 | /* | 573 | /* |
438 | * Can't share interrupts unless both agree to and are | 574 | * Can't share interrupts unless both agree to and are |
@@ -455,8 +591,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
455 | 591 | ||
456 | /* add new interrupt at end of irq queue */ | 592 | /* add new interrupt at end of irq queue */ |
457 | do { | 593 | do { |
458 | p = &old->next; | 594 | old_ptr = &old->next; |
459 | old = *p; | 595 | old = *old_ptr; |
460 | } while (old); | 596 | } while (old); |
461 | shared = 1; | 597 | shared = 1; |
462 | } | 598 | } |
@@ -464,15 +600,15 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
464 | if (!shared) { | 600 | if (!shared) { |
465 | irq_chip_set_defaults(desc->chip); | 601 | irq_chip_set_defaults(desc->chip); |
466 | 602 | ||
603 | init_waitqueue_head(&desc->wait_for_threads); | ||
604 | |||
467 | /* Setup the type (level, edge polarity) if configured: */ | 605 | /* Setup the type (level, edge polarity) if configured: */ |
468 | if (new->flags & IRQF_TRIGGER_MASK) { | 606 | if (new->flags & IRQF_TRIGGER_MASK) { |
469 | ret = __irq_set_trigger(desc, irq, | 607 | ret = __irq_set_trigger(desc, irq, |
470 | new->flags & IRQF_TRIGGER_MASK); | 608 | new->flags & IRQF_TRIGGER_MASK); |
471 | 609 | ||
472 | if (ret) { | 610 | if (ret) |
473 | spin_unlock_irqrestore(&desc->lock, flags); | 611 | goto out_thread; |
474 | return ret; | ||
475 | } | ||
476 | } else | 612 | } else |
477 | compat_irq_chip_set_default_handler(desc); | 613 | compat_irq_chip_set_default_handler(desc); |
478 | #if defined(CONFIG_IRQ_PER_CPU) | 614 | #if defined(CONFIG_IRQ_PER_CPU) |
@@ -496,7 +632,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
496 | desc->status |= IRQ_NO_BALANCING; | 632 | desc->status |= IRQ_NO_BALANCING; |
497 | 633 | ||
498 | /* Set default affinity mask once everything is setup */ | 634 | /* Set default affinity mask once everything is setup */ |
499 | do_irq_select_affinity(irq, desc); | 635 | setup_affinity(irq, desc); |
500 | 636 | ||
501 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 637 | } else if ((new->flags & IRQF_TRIGGER_MASK) |
502 | && (new->flags & IRQF_TRIGGER_MASK) | 638 | && (new->flags & IRQF_TRIGGER_MASK) |
@@ -507,7 +643,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
507 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 643 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
508 | } | 644 | } |
509 | 645 | ||
510 | *p = new; | 646 | *old_ptr = new; |
511 | 647 | ||
512 | /* Reset broken irq detection when installing new handler */ | 648 | /* Reset broken irq detection when installing new handler */ |
513 | desc->irq_count = 0; | 649 | desc->irq_count = 0; |
@@ -519,7 +655,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
519 | */ | 655 | */ |
520 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 656 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { |
521 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 657 | desc->status &= ~IRQ_SPURIOUS_DISABLED; |
522 | __enable_irq(desc, irq); | 658 | __enable_irq(desc, irq, false); |
523 | } | 659 | } |
524 | 660 | ||
525 | spin_unlock_irqrestore(&desc->lock, flags); | 661 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -540,8 +676,19 @@ mismatch: | |||
540 | dump_stack(); | 676 | dump_stack(); |
541 | } | 677 | } |
542 | #endif | 678 | #endif |
679 | ret = -EBUSY; | ||
680 | |||
681 | out_thread: | ||
543 | spin_unlock_irqrestore(&desc->lock, flags); | 682 | spin_unlock_irqrestore(&desc->lock, flags); |
544 | return -EBUSY; | 683 | if (new->thread) { |
684 | struct task_struct *t = new->thread; | ||
685 | |||
686 | new->thread = NULL; | ||
687 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | ||
688 | kthread_stop(t); | ||
689 | put_task_struct(t); | ||
690 | } | ||
691 | return ret; | ||
545 | } | 692 | } |
546 | 693 | ||
547 | /** | 694 | /** |
@@ -557,97 +704,138 @@ int setup_irq(unsigned int irq, struct irqaction *act) | |||
557 | 704 | ||
558 | return __setup_irq(irq, desc, act); | 705 | return __setup_irq(irq, desc, act); |
559 | } | 706 | } |
707 | EXPORT_SYMBOL_GPL(setup_irq); | ||
560 | 708 | ||
561 | /** | 709 | /* |
562 | * free_irq - free an interrupt | 710 | * Internal function to unregister an irqaction - used to free |
563 | * @irq: Interrupt line to free | 711 | * regular and special interrupts that are part of the architecture. |
564 | * @dev_id: Device identity to free | ||
565 | * | ||
566 | * Remove an interrupt handler. The handler is removed and if the | ||
567 | * interrupt line is no longer in use by any driver it is disabled. | ||
568 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
569 | * on the card it drives before calling this function. The function | ||
570 | * does not return until any executing interrupts for this IRQ | ||
571 | * have completed. | ||
572 | * | ||
573 | * This function must not be called from interrupt context. | ||
574 | */ | 712 | */ |
575 | void free_irq(unsigned int irq, void *dev_id) | 713 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
576 | { | 714 | { |
577 | struct irq_desc *desc = irq_to_desc(irq); | 715 | struct irq_desc *desc = irq_to_desc(irq); |
578 | struct irqaction **p; | 716 | struct irqaction *action, **action_ptr; |
717 | struct task_struct *irqthread; | ||
579 | unsigned long flags; | 718 | unsigned long flags; |
580 | 719 | ||
581 | WARN_ON(in_interrupt()); | 720 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
582 | 721 | ||
583 | if (!desc) | 722 | if (!desc) |
584 | return; | 723 | return NULL; |
585 | 724 | ||
586 | spin_lock_irqsave(&desc->lock, flags); | 725 | spin_lock_irqsave(&desc->lock, flags); |
587 | p = &desc->action; | 726 | |
727 | /* | ||
728 | * There can be multiple actions per IRQ descriptor, find the right | ||
729 | * one based on the dev_id: | ||
730 | */ | ||
731 | action_ptr = &desc->action; | ||
588 | for (;;) { | 732 | for (;;) { |
589 | struct irqaction *action = *p; | 733 | action = *action_ptr; |
590 | 734 | ||
591 | if (action) { | 735 | if (!action) { |
592 | struct irqaction **pp = p; | 736 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
737 | spin_unlock_irqrestore(&desc->lock, flags); | ||
593 | 738 | ||
594 | p = &action->next; | 739 | return NULL; |
595 | if (action->dev_id != dev_id) | 740 | } |
596 | continue; | 741 | |
742 | if (action->dev_id == dev_id) | ||
743 | break; | ||
744 | action_ptr = &action->next; | ||
745 | } | ||
597 | 746 | ||
598 | /* Found it - now remove it from the list of entries */ | 747 | /* Found it - now remove it from the list of entries: */ |
599 | *pp = action->next; | 748 | *action_ptr = action->next; |
600 | 749 | ||
601 | /* Currently used only by UML, might disappear one day.*/ | 750 | /* Currently used only by UML, might disappear one day: */ |
602 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 751 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
603 | if (desc->chip->release) | 752 | if (desc->chip->release) |
604 | desc->chip->release(irq, dev_id); | 753 | desc->chip->release(irq, dev_id); |
605 | #endif | 754 | #endif |
606 | 755 | ||
607 | if (!desc->action) { | 756 | /* If this was the last handler, shut down the IRQ line: */ |
608 | desc->status |= IRQ_DISABLED; | 757 | if (!desc->action) { |
609 | if (desc->chip->shutdown) | 758 | desc->status |= IRQ_DISABLED; |
610 | desc->chip->shutdown(irq); | 759 | if (desc->chip->shutdown) |
611 | else | 760 | desc->chip->shutdown(irq); |
612 | desc->chip->disable(irq); | 761 | else |
613 | } | 762 | desc->chip->disable(irq); |
614 | spin_unlock_irqrestore(&desc->lock, flags); | 763 | } |
615 | unregister_handler_proc(irq, action); | 764 | |
765 | irqthread = action->thread; | ||
766 | action->thread = NULL; | ||
767 | |||
768 | spin_unlock_irqrestore(&desc->lock, flags); | ||
769 | |||
770 | unregister_handler_proc(irq, action); | ||
771 | |||
772 | /* Make sure it's not being used on another CPU: */ | ||
773 | synchronize_irq(irq); | ||
774 | |||
775 | if (irqthread) { | ||
776 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | ||
777 | kthread_stop(irqthread); | ||
778 | put_task_struct(irqthread); | ||
779 | } | ||
616 | 780 | ||
617 | /* Make sure it's not being used on another CPU */ | ||
618 | synchronize_irq(irq); | ||
619 | #ifdef CONFIG_DEBUG_SHIRQ | ||
620 | /* | ||
621 | * It's a shared IRQ -- the driver ought to be | ||
622 | * prepared for it to happen even now it's | ||
623 | * being freed, so let's make sure.... We do | ||
624 | * this after actually deregistering it, to | ||
625 | * make sure that a 'real' IRQ doesn't run in | ||
626 | * parallel with our fake | ||
627 | */ | ||
628 | if (action->flags & IRQF_SHARED) { | ||
629 | local_irq_save(flags); | ||
630 | action->handler(irq, dev_id); | ||
631 | local_irq_restore(flags); | ||
632 | } | ||
633 | #endif | ||
634 | kfree(action); | ||
635 | return; | ||
636 | } | ||
637 | printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); | ||
638 | #ifdef CONFIG_DEBUG_SHIRQ | 781 | #ifdef CONFIG_DEBUG_SHIRQ |
639 | dump_stack(); | 782 | /* |
640 | #endif | 783 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
641 | spin_unlock_irqrestore(&desc->lock, flags); | 784 | * event to happen even now it's being freed, so let's make sure that |
642 | return; | 785 | * is so by doing an extra call to the handler .... |
786 | * | ||
787 | * ( We do this after actually deregistering it, to make sure that a | ||
788 | * 'real' IRQ doesn't run in * parallel with our fake. ) | ||
789 | */ | ||
790 | if (action->flags & IRQF_SHARED) { | ||
791 | local_irq_save(flags); | ||
792 | action->handler(irq, dev_id); | ||
793 | local_irq_restore(flags); | ||
643 | } | 794 | } |
795 | #endif | ||
796 | return action; | ||
797 | } | ||
798 | |||
799 | /** | ||
800 | * remove_irq - free an interrupt | ||
801 | * @irq: Interrupt line to free | ||
802 | * @act: irqaction for the interrupt | ||
803 | * | ||
804 | * Used to remove interrupts statically setup by the early boot process. | ||
805 | */ | ||
806 | void remove_irq(unsigned int irq, struct irqaction *act) | ||
807 | { | ||
808 | __free_irq(irq, act->dev_id); | ||
809 | } | ||
810 | EXPORT_SYMBOL_GPL(remove_irq); | ||
811 | |||
812 | /** | ||
813 | * free_irq - free an interrupt allocated with request_irq | ||
814 | * @irq: Interrupt line to free | ||
815 | * @dev_id: Device identity to free | ||
816 | * | ||
817 | * Remove an interrupt handler. The handler is removed and if the | ||
818 | * interrupt line is no longer in use by any driver it is disabled. | ||
819 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
820 | * on the card it drives before calling this function. The function | ||
821 | * does not return until any executing interrupts for this IRQ | ||
822 | * have completed. | ||
823 | * | ||
824 | * This function must not be called from interrupt context. | ||
825 | */ | ||
826 | void free_irq(unsigned int irq, void *dev_id) | ||
827 | { | ||
828 | kfree(__free_irq(irq, dev_id)); | ||
644 | } | 829 | } |
645 | EXPORT_SYMBOL(free_irq); | 830 | EXPORT_SYMBOL(free_irq); |
646 | 831 | ||
647 | /** | 832 | /** |
648 | * request_irq - allocate an interrupt line | 833 | * request_threaded_irq - allocate an interrupt line |
649 | * @irq: Interrupt line to allocate | 834 | * @irq: Interrupt line to allocate |
650 | * @handler: Function to be called when the IRQ occurs | 835 | * @handler: Function to be called when the IRQ occurs. |
836 | * Primary handler for threaded interrupts | ||
837 | * @thread_fn: Function called from the irq handler thread | ||
838 | * If NULL, no irq thread is created | ||
651 | * @irqflags: Interrupt type flags | 839 | * @irqflags: Interrupt type flags |
652 | * @devname: An ascii name for the claiming device | 840 | * @devname: An ascii name for the claiming device |
653 | * @dev_id: A cookie passed back to the handler function | 841 | * @dev_id: A cookie passed back to the handler function |
@@ -659,6 +847,15 @@ EXPORT_SYMBOL(free_irq); | |||
659 | * raises, you must take care both to initialise your hardware | 847 | * raises, you must take care both to initialise your hardware |
660 | * and to set up the interrupt handler in the right order. | 848 | * and to set up the interrupt handler in the right order. |
661 | * | 849 | * |
850 | * If you want to set up a threaded irq handler for your device | ||
851 | * then you need to supply @handler and @thread_fn. @handler ist | ||
852 | * still called in hard interrupt context and has to check | ||
853 | * whether the interrupt originates from the device. If yes it | ||
854 | * needs to disable the interrupt on the device and return | ||
855 | * IRQ_THREAD_WAKE which will wake up the handler thread and run | ||
856 | * @thread_fn. This split handler design is necessary to support | ||
857 | * shared interrupts. | ||
858 | * | ||
662 | * Dev_id must be globally unique. Normally the address of the | 859 | * Dev_id must be globally unique. Normally the address of the |
663 | * device data structure is used as the cookie. Since the handler | 860 | * device data structure is used as the cookie. Since the handler |
664 | * receives this value it makes sense to use it. | 861 | * receives this value it makes sense to use it. |
@@ -674,8 +871,9 @@ EXPORT_SYMBOL(free_irq); | |||
674 | * IRQF_TRIGGER_* Specify active edge(s) or level | 871 | * IRQF_TRIGGER_* Specify active edge(s) or level |
675 | * | 872 | * |
676 | */ | 873 | */ |
677 | int request_irq(unsigned int irq, irq_handler_t handler, | 874 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
678 | unsigned long irqflags, const char *devname, void *dev_id) | 875 | irq_handler_t thread_fn, unsigned long irqflags, |
876 | const char *devname, void *dev_id) | ||
679 | { | 877 | { |
680 | struct irqaction *action; | 878 | struct irqaction *action; |
681 | struct irq_desc *desc; | 879 | struct irq_desc *desc; |
@@ -687,11 +885,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
687 | * the behavior is classified as "will not fix" so we need to | 885 | * the behavior is classified as "will not fix" so we need to |
688 | * start nudging drivers away from using that idiom. | 886 | * start nudging drivers away from using that idiom. |
689 | */ | 887 | */ |
690 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | 888 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == |
691 | == (IRQF_SHARED|IRQF_DISABLED)) | 889 | (IRQF_SHARED|IRQF_DISABLED)) { |
692 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | 890 | pr_warning( |
693 | "guaranteed on shared IRQs\n", | 891 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", |
694 | irq, devname); | 892 | irq, devname); |
893 | } | ||
695 | 894 | ||
696 | #ifdef CONFIG_LOCKDEP | 895 | #ifdef CONFIG_LOCKDEP |
697 | /* | 896 | /* |
@@ -717,15 +916,14 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
717 | if (!handler) | 916 | if (!handler) |
718 | return -EINVAL; | 917 | return -EINVAL; |
719 | 918 | ||
720 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); | 919 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
721 | if (!action) | 920 | if (!action) |
722 | return -ENOMEM; | 921 | return -ENOMEM; |
723 | 922 | ||
724 | action->handler = handler; | 923 | action->handler = handler; |
924 | action->thread_fn = thread_fn; | ||
725 | action->flags = irqflags; | 925 | action->flags = irqflags; |
726 | cpus_clear(action->mask); | ||
727 | action->name = devname; | 926 | action->name = devname; |
728 | action->next = NULL; | ||
729 | action->dev_id = dev_id; | 927 | action->dev_id = dev_id; |
730 | 928 | ||
731 | retval = __setup_irq(irq, desc, action); | 929 | retval = __setup_irq(irq, desc, action); |
@@ -753,4 +951,4 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
753 | #endif | 951 | #endif |
754 | return retval; | 952 | return retval; |
755 | } | 953 | } |
756 | EXPORT_SYMBOL(request_irq); | 954 | EXPORT_SYMBOL(request_threaded_irq); |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index bd72329e630c..e05ad9be43b7 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -18,7 +18,7 @@ void move_masked_irq(int irq) | |||
18 | 18 | ||
19 | desc->status &= ~IRQ_MOVE_PENDING; | 19 | desc->status &= ~IRQ_MOVE_PENDING; |
20 | 20 | ||
21 | if (unlikely(cpumask_empty(&desc->pending_mask))) | 21 | if (unlikely(cpumask_empty(desc->pending_mask))) |
22 | return; | 22 | return; |
23 | 23 | ||
24 | if (!desc->chip->set_affinity) | 24 | if (!desc->chip->set_affinity) |
@@ -38,13 +38,13 @@ void move_masked_irq(int irq) | |||
38 | * For correct operation this depends on the caller | 38 | * For correct operation this depends on the caller |
39 | * masking the irqs. | 39 | * masking the irqs. |
40 | */ | 40 | */ |
41 | if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) | 41 | if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) |
42 | < nr_cpu_ids)) { | 42 | < nr_cpu_ids)) { |
43 | cpumask_and(&desc->affinity, | 43 | cpumask_and(desc->affinity, |
44 | &desc->pending_mask, cpu_online_mask); | 44 | desc->pending_mask, cpu_online_mask); |
45 | desc->chip->set_affinity(irq, &desc->affinity); | 45 | desc->chip->set_affinity(irq, desc->affinity); |
46 | } | 46 | } |
47 | cpumask_clear(&desc->pending_mask); | 47 | cpumask_clear(desc->pending_mask); |
48 | } | 48 | } |
49 | 49 | ||
50 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index ecf765c6a77a..243d6121e50e 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc, | |||
17 | struct irq_desc *desc, | 17 | struct irq_desc *desc, |
18 | int cpu, int nr) | 18 | int cpu, int nr) |
19 | { | 19 | { |
20 | unsigned long bytes; | ||
21 | |||
22 | init_kstat_irqs(desc, cpu, nr); | 20 | init_kstat_irqs(desc, cpu, nr); |
23 | 21 | ||
24 | if (desc->kstat_irqs != old_desc->kstat_irqs) { | 22 | if (desc->kstat_irqs != old_desc->kstat_irqs) |
25 | /* Compute how many bytes we need per irq and allocate them */ | 23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, |
26 | bytes = nr * sizeof(unsigned int); | 24 | nr * sizeof(*desc->kstat_irqs)); |
27 | |||
28 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes); | ||
29 | } | ||
30 | } | 25 | } |
31 | 26 | ||
32 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | 27 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) |
@@ -38,15 +33,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | |||
38 | old_desc->kstat_irqs = NULL; | 33 | old_desc->kstat_irqs = NULL; |
39 | } | 34 | } |
40 | 35 | ||
41 | static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, |
42 | struct irq_desc *desc, int cpu) | 37 | struct irq_desc *desc, int cpu) |
43 | { | 38 | { |
44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
40 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | ||
42 | "for migration.\n", irq); | ||
43 | return false; | ||
44 | } | ||
45 | spin_lock_init(&desc->lock); | 45 | spin_lock_init(&desc->lock); |
46 | desc->cpu = cpu; | 46 | desc->cpu = cpu; |
47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); |
49 | init_copy_desc_masks(old_desc, desc); | ||
49 | arch_init_copy_chip_data(old_desc, desc, cpu); | 50 | arch_init_copy_chip_data(old_desc, desc, cpu); |
51 | return true; | ||
50 | } | 52 | } |
51 | 53 | ||
52 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | 54 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) |
@@ -71,23 +73,34 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
71 | desc = irq_desc_ptrs[irq]; | 73 | desc = irq_desc_ptrs[irq]; |
72 | 74 | ||
73 | if (desc && old_desc != desc) | 75 | if (desc && old_desc != desc) |
74 | goto out_unlock; | 76 | goto out_unlock; |
75 | 77 | ||
76 | node = cpu_to_node(cpu); | 78 | node = cpu_to_node(cpu); |
77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 79 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
78 | if (!desc) { | 80 | if (!desc) { |
79 | printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); | 81 | printk(KERN_ERR "irq %d: can not get new irq_desc " |
82 | "for migration.\n", irq); | ||
83 | /* still use old one */ | ||
84 | desc = old_desc; | ||
85 | goto out_unlock; | ||
86 | } | ||
87 | if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { | ||
80 | /* still use old one */ | 88 | /* still use old one */ |
89 | kfree(desc); | ||
81 | desc = old_desc; | 90 | desc = old_desc; |
82 | goto out_unlock; | 91 | goto out_unlock; |
83 | } | 92 | } |
84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | ||
85 | 93 | ||
86 | irq_desc_ptrs[irq] = desc; | 94 | irq_desc_ptrs[irq] = desc; |
95 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
87 | 96 | ||
88 | /* free the old one */ | 97 | /* free the old one */ |
89 | free_one_irq_desc(old_desc, desc); | 98 | free_one_irq_desc(old_desc, desc); |
99 | spin_unlock(&old_desc->lock); | ||
90 | kfree(old_desc); | 100 | kfree(old_desc); |
101 | spin_lock(&desc->lock); | ||
102 | |||
103 | return desc; | ||
91 | 104 | ||
92 | out_unlock: | 105 | out_unlock: |
93 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | 106 | spin_unlock_irqrestore(&sparse_irq_lock, flags); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c new file mode 100644 index 000000000000..638d8bedec14 --- /dev/null +++ b/kernel/irq/pm.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * linux/kernel/irq/pm.c | ||
3 | * | ||
4 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file contains power management functions related to interrupts. | ||
7 | */ | ||
8 | |||
9 | #include <linux/irq.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
13 | #include "internals.h" | ||
14 | |||
15 | /** | ||
16 | * suspend_device_irqs - disable all currently enabled interrupt lines | ||
17 | * | ||
18 | * During system-wide suspend or hibernation device interrupts need to be | ||
19 | * disabled at the chip level and this function is provided for this purpose. | ||
20 | * It disables all interrupt lines that are enabled at the moment and sets the | ||
21 | * IRQ_SUSPENDED flag for them. | ||
22 | */ | ||
23 | void suspend_device_irqs(void) | ||
24 | { | ||
25 | struct irq_desc *desc; | ||
26 | int irq; | ||
27 | |||
28 | for_each_irq_desc(irq, desc) { | ||
29 | unsigned long flags; | ||
30 | |||
31 | spin_lock_irqsave(&desc->lock, flags); | ||
32 | __disable_irq(desc, irq, true); | ||
33 | spin_unlock_irqrestore(&desc->lock, flags); | ||
34 | } | ||
35 | |||
36 | for_each_irq_desc(irq, desc) | ||
37 | if (desc->status & IRQ_SUSPENDED) | ||
38 | synchronize_irq(irq); | ||
39 | } | ||
40 | EXPORT_SYMBOL_GPL(suspend_device_irqs); | ||
41 | |||
42 | /** | ||
43 | * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs() | ||
44 | * | ||
45 | * Enable all interrupt lines previously disabled by suspend_device_irqs() that | ||
46 | * have the IRQ_SUSPENDED flag set. | ||
47 | */ | ||
48 | void resume_device_irqs(void) | ||
49 | { | ||
50 | struct irq_desc *desc; | ||
51 | int irq; | ||
52 | |||
53 | for_each_irq_desc(irq, desc) { | ||
54 | unsigned long flags; | ||
55 | |||
56 | if (!(desc->status & IRQ_SUSPENDED)) | ||
57 | continue; | ||
58 | |||
59 | spin_lock_irqsave(&desc->lock, flags); | ||
60 | __enable_irq(desc, irq, true); | ||
61 | spin_unlock_irqrestore(&desc->lock, flags); | ||
62 | } | ||
63 | } | ||
64 | EXPORT_SYMBOL_GPL(resume_device_irqs); | ||
65 | |||
66 | /** | ||
67 | * check_wakeup_irqs - check if any wake-up interrupts are pending | ||
68 | */ | ||
69 | int check_wakeup_irqs(void) | ||
70 | { | ||
71 | struct irq_desc *desc; | ||
72 | int irq; | ||
73 | |||
74 | for_each_irq_desc(irq, desc) | ||
75 | if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING)) | ||
76 | return -EBUSY; | ||
77 | |||
78 | return 0; | ||
79 | } | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index aae3f742bcec..692363dd591f 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir; | |||
20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
21 | { | 21 | { |
22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
23 | const struct cpumask *mask = &desc->affinity; | 23 | const struct cpumask *mask = desc->affinity; |
24 | 24 | ||
25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
27 | mask = &desc->pending_mask; | 27 | mask = desc->pending_mask; |
28 | #endif | 28 | #endif |
29 | seq_cpumask(m, mask); | 29 | seq_cpumask(m, mask); |
30 | seq_putc(m, '\n'); | 30 | seq_putc(m, '\n'); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd364c11e56e..4d568294de3e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq) | |||
104 | return ok; | 104 | return ok; |
105 | } | 105 | } |
106 | 106 | ||
107 | static void poll_spurious_irqs(unsigned long dummy) | 107 | static void poll_all_shared_irqs(void) |
108 | { | 108 | { |
109 | struct irq_desc *desc; | 109 | struct irq_desc *desc; |
110 | int i; | 110 | int i; |
@@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
123 | 123 | ||
124 | try_one_irq(i, desc); | 124 | try_one_irq(i, desc); |
125 | } | 125 | } |
126 | } | ||
127 | |||
128 | static void poll_spurious_irqs(unsigned long dummy) | ||
129 | { | ||
130 | poll_all_shared_irqs(); | ||
126 | 131 | ||
127 | mod_timer(&poll_spurious_irq_timer, | 132 | mod_timer(&poll_spurious_irq_timer, |
128 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 133 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
129 | } | 134 | } |
130 | 135 | ||
136 | #ifdef CONFIG_DEBUG_SHIRQ | ||
137 | void debug_poll_all_shared_irqs(void) | ||
138 | { | ||
139 | poll_all_shared_irqs(); | ||
140 | } | ||
141 | #endif | ||
142 | |||
131 | /* | 143 | /* |
132 | * If 99,900 of the previous 100,000 interrupts have not been handled | 144 | * If 99,900 of the previous 100,000 interrupts have not been handled |
133 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 145 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |