diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/Makefile | 1 | ||||
-rw-r--r-- | kernel/irq/autoprobe.c | 5 | ||||
-rw-r--r-- | kernel/irq/chip.c | 18 | ||||
-rw-r--r-- | kernel/irq/handle.c | 205 | ||||
-rw-r--r-- | kernel/irq/internals.h | 5 | ||||
-rw-r--r-- | kernel/irq/manage.c | 58 | ||||
-rw-r--r-- | kernel/irq/migration.c | 14 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 119 | ||||
-rw-r--r-- | kernel/irq/proc.c | 63 |
9 files changed, 428 insertions, 60 deletions
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 681c52dbfe22..4dd5b1edac98 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -3,3 +3,4 @@ obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o | |||
3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
6 | obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o | ||
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index cc0f7321b8ce..1de9700f416e 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/async.h> | ||
13 | 14 | ||
14 | #include "internals.h" | 15 | #include "internals.h" |
15 | 16 | ||
@@ -34,6 +35,10 @@ unsigned long probe_irq_on(void) | |||
34 | unsigned int status; | 35 | unsigned int status; |
35 | int i; | 36 | int i; |
36 | 37 | ||
38 | /* | ||
39 | * quiesce the kernel, or at least the asynchronous portion | ||
40 | */ | ||
41 | async_synchronize_full(); | ||
37 | mutex_lock(&probing_active); | 42 | mutex_lock(&probing_active); |
38 | /* | 43 | /* |
39 | * something may have generated an irq long ago and we want to | 44 | * something may have generated an irq long ago and we want to |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 10b5092e9bfe..f63c706d25e1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -24,9 +24,10 @@ | |||
24 | */ | 24 | */ |
25 | void dynamic_irq_init(unsigned int irq) | 25 | void dynamic_irq_init(unsigned int irq) |
26 | { | 26 | { |
27 | struct irq_desc *desc = irq_to_desc(irq); | 27 | struct irq_desc *desc; |
28 | unsigned long flags; | 28 | unsigned long flags; |
29 | 29 | ||
30 | desc = irq_to_desc(irq); | ||
30 | if (!desc) { | 31 | if (!desc) { |
31 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); | 32 | WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); |
32 | return; | 33 | return; |
@@ -45,7 +46,7 @@ void dynamic_irq_init(unsigned int irq) | |||
45 | desc->irq_count = 0; | 46 | desc->irq_count = 0; |
46 | desc->irqs_unhandled = 0; | 47 | desc->irqs_unhandled = 0; |
47 | #ifdef CONFIG_SMP | 48 | #ifdef CONFIG_SMP |
48 | cpus_setall(desc->affinity); | 49 | cpumask_setall(&desc->affinity); |
49 | #endif | 50 | #endif |
50 | spin_unlock_irqrestore(&desc->lock, flags); | 51 | spin_unlock_irqrestore(&desc->lock, flags); |
51 | } | 52 | } |
@@ -124,6 +125,7 @@ int set_irq_type(unsigned int irq, unsigned int type) | |||
124 | return -ENODEV; | 125 | return -ENODEV; |
125 | } | 126 | } |
126 | 127 | ||
128 | type &= IRQ_TYPE_SENSE_MASK; | ||
127 | if (type == IRQ_TYPE_NONE) | 129 | if (type == IRQ_TYPE_NONE) |
128 | return 0; | 130 | return 0; |
129 | 131 | ||
@@ -352,6 +354,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
352 | 354 | ||
353 | spin_lock(&desc->lock); | 355 | spin_lock(&desc->lock); |
354 | mask_ack_irq(desc, irq); | 356 | mask_ack_irq(desc, irq); |
357 | desc = irq_remap_to_desc(irq, desc); | ||
355 | 358 | ||
356 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 359 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
357 | goto out_unlock; | 360 | goto out_unlock; |
@@ -429,6 +432,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
429 | desc->status &= ~IRQ_INPROGRESS; | 432 | desc->status &= ~IRQ_INPROGRESS; |
430 | out: | 433 | out: |
431 | desc->chip->eoi(irq); | 434 | desc->chip->eoi(irq); |
435 | desc = irq_remap_to_desc(irq, desc); | ||
432 | 436 | ||
433 | spin_unlock(&desc->lock); | 437 | spin_unlock(&desc->lock); |
434 | } | 438 | } |
@@ -465,12 +469,14 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
465 | !desc->action)) { | 469 | !desc->action)) { |
466 | desc->status |= (IRQ_PENDING | IRQ_MASKED); | 470 | desc->status |= (IRQ_PENDING | IRQ_MASKED); |
467 | mask_ack_irq(desc, irq); | 471 | mask_ack_irq(desc, irq); |
472 | desc = irq_remap_to_desc(irq, desc); | ||
468 | goto out_unlock; | 473 | goto out_unlock; |
469 | } | 474 | } |
470 | kstat_incr_irqs_this_cpu(irq, desc); | 475 | kstat_incr_irqs_this_cpu(irq, desc); |
471 | 476 | ||
472 | /* Start handling the irq */ | 477 | /* Start handling the irq */ |
473 | desc->chip->ack(irq); | 478 | desc->chip->ack(irq); |
479 | desc = irq_remap_to_desc(irq, desc); | ||
474 | 480 | ||
475 | /* Mark the IRQ currently in progress.*/ | 481 | /* Mark the IRQ currently in progress.*/ |
476 | desc->status |= IRQ_INPROGRESS; | 482 | desc->status |= IRQ_INPROGRESS; |
@@ -531,8 +537,10 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
531 | if (!noirqdebug) | 537 | if (!noirqdebug) |
532 | note_interrupt(irq, desc, action_ret); | 538 | note_interrupt(irq, desc, action_ret); |
533 | 539 | ||
534 | if (desc->chip->eoi) | 540 | if (desc->chip->eoi) { |
535 | desc->chip->eoi(irq); | 541 | desc->chip->eoi(irq); |
542 | desc = irq_remap_to_desc(irq, desc); | ||
543 | } | ||
536 | } | 544 | } |
537 | 545 | ||
538 | void | 546 | void |
@@ -567,8 +575,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
567 | 575 | ||
568 | /* Uninstall? */ | 576 | /* Uninstall? */ |
569 | if (handle == handle_bad_irq) { | 577 | if (handle == handle_bad_irq) { |
570 | if (desc->chip != &no_irq_chip) | 578 | if (desc->chip != &no_irq_chip) { |
571 | mask_ack_irq(desc, irq); | 579 | mask_ack_irq(desc, irq); |
580 | desc = irq_remap_to_desc(irq, desc); | ||
581 | } | ||
572 | desc->status |= IRQ_DISABLED; | 582 | desc->status |= IRQ_DISABLED; |
573 | desc->depth = 1; | 583 | desc->depth = 1; |
574 | } | 584 | } |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index c815b42d0f5b..c20db0be9173 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -15,9 +15,16 @@ | |||
15 | #include <linux/random.h> | 15 | #include <linux/random.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | ||
19 | #include <linux/hash.h> | ||
18 | 20 | ||
19 | #include "internals.h" | 21 | #include "internals.h" |
20 | 22 | ||
23 | /* | ||
24 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
25 | */ | ||
26 | struct lock_class_key irq_desc_lock_class; | ||
27 | |||
21 | /** | 28 | /** |
22 | * handle_bad_irq - handle spurious and unhandled irqs | 29 | * handle_bad_irq - handle spurious and unhandled irqs |
23 | * @irq: the interrupt number | 30 | * @irq: the interrupt number |
@@ -49,6 +56,150 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
49 | int nr_irqs = NR_IRQS; | 56 | int nr_irqs = NR_IRQS; |
50 | EXPORT_SYMBOL_GPL(nr_irqs); | 57 | EXPORT_SYMBOL_GPL(nr_irqs); |
51 | 58 | ||
59 | #ifdef CONFIG_SPARSE_IRQ | ||
60 | static struct irq_desc irq_desc_init = { | ||
61 | .irq = -1, | ||
62 | .status = IRQ_DISABLED, | ||
63 | .chip = &no_irq_chip, | ||
64 | .handle_irq = handle_bad_irq, | ||
65 | .depth = 1, | ||
66 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
67 | #ifdef CONFIG_SMP | ||
68 | .affinity = CPU_MASK_ALL | ||
69 | #endif | ||
70 | }; | ||
71 | |||
72 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | ||
73 | { | ||
74 | unsigned long bytes; | ||
75 | char *ptr; | ||
76 | int node; | ||
77 | |||
78 | /* Compute how many bytes we need per irq and allocate them */ | ||
79 | bytes = nr * sizeof(unsigned int); | ||
80 | |||
81 | node = cpu_to_node(cpu); | ||
82 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | ||
83 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | ||
84 | |||
85 | if (ptr) | ||
86 | desc->kstat_irqs = (unsigned int *)ptr; | ||
87 | } | ||
88 | |||
89 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | ||
90 | { | ||
91 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | ||
92 | |||
93 | spin_lock_init(&desc->lock); | ||
94 | desc->irq = irq; | ||
95 | #ifdef CONFIG_SMP | ||
96 | desc->cpu = cpu; | ||
97 | #endif | ||
98 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
99 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | ||
100 | if (!desc->kstat_irqs) { | ||
101 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | ||
102 | BUG_ON(1); | ||
103 | } | ||
104 | arch_init_chip_data(desc, cpu); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Protect the sparse_irqs: | ||
109 | */ | ||
110 | DEFINE_SPINLOCK(sparse_irq_lock); | ||
111 | |||
112 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | ||
113 | |||
114 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | ||
115 | [0 ... NR_IRQS_LEGACY-1] = { | ||
116 | .irq = -1, | ||
117 | .status = IRQ_DISABLED, | ||
118 | .chip = &no_irq_chip, | ||
119 | .handle_irq = handle_bad_irq, | ||
120 | .depth = 1, | ||
121 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | ||
122 | #ifdef CONFIG_SMP | ||
123 | .affinity = CPU_MASK_ALL | ||
124 | #endif | ||
125 | } | ||
126 | }; | ||
127 | |||
128 | /* FIXME: use bootmem alloc ...*/ | ||
129 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
130 | |||
131 | int __init early_irq_init(void) | ||
132 | { | ||
133 | struct irq_desc *desc; | ||
134 | int legacy_count; | ||
135 | int i; | ||
136 | |||
137 | desc = irq_desc_legacy; | ||
138 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | ||
139 | |||
140 | for (i = 0; i < legacy_count; i++) { | ||
141 | desc[i].irq = i; | ||
142 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | ||
143 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | ||
144 | |||
145 | irq_desc_ptrs[i] = desc + i; | ||
146 | } | ||
147 | |||
148 | for (i = legacy_count; i < NR_IRQS; i++) | ||
149 | irq_desc_ptrs[i] = NULL; | ||
150 | |||
151 | return arch_early_irq_init(); | ||
152 | } | ||
153 | |||
154 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
155 | { | ||
156 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | ||
157 | } | ||
158 | |||
159 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
160 | { | ||
161 | struct irq_desc *desc; | ||
162 | unsigned long flags; | ||
163 | int node; | ||
164 | |||
165 | if (irq >= NR_IRQS) { | ||
166 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | ||
167 | irq, NR_IRQS); | ||
168 | WARN_ON(1); | ||
169 | return NULL; | ||
170 | } | ||
171 | |||
172 | desc = irq_desc_ptrs[irq]; | ||
173 | if (desc) | ||
174 | return desc; | ||
175 | |||
176 | spin_lock_irqsave(&sparse_irq_lock, flags); | ||
177 | |||
178 | /* We have to check it to avoid races with another CPU */ | ||
179 | desc = irq_desc_ptrs[irq]; | ||
180 | if (desc) | ||
181 | goto out_unlock; | ||
182 | |||
183 | node = cpu_to_node(cpu); | ||
184 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
185 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | ||
186 | irq, cpu, node); | ||
187 | if (!desc) { | ||
188 | printk(KERN_ERR "can not alloc irq_desc\n"); | ||
189 | BUG_ON(1); | ||
190 | } | ||
191 | init_one_irq_desc(irq, desc, cpu); | ||
192 | |||
193 | irq_desc_ptrs[irq] = desc; | ||
194 | |||
195 | out_unlock: | ||
196 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
197 | |||
198 | return desc; | ||
199 | } | ||
200 | |||
201 | #else /* !CONFIG_SPARSE_IRQ */ | ||
202 | |||
52 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 203 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
53 | [0 ... NR_IRQS-1] = { | 204 | [0 ... NR_IRQS-1] = { |
54 | .status = IRQ_DISABLED, | 205 | .status = IRQ_DISABLED, |
@@ -62,6 +213,32 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
62 | } | 213 | } |
63 | }; | 214 | }; |
64 | 215 | ||
216 | int __init early_irq_init(void) | ||
217 | { | ||
218 | struct irq_desc *desc; | ||
219 | int count; | ||
220 | int i; | ||
221 | |||
222 | desc = irq_desc; | ||
223 | count = ARRAY_SIZE(irq_desc); | ||
224 | |||
225 | for (i = 0; i < count; i++) | ||
226 | desc[i].irq = i; | ||
227 | |||
228 | return arch_early_irq_init(); | ||
229 | } | ||
230 | |||
231 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
232 | { | ||
233 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | ||
234 | } | ||
235 | |||
236 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
237 | { | ||
238 | return irq_to_desc(irq); | ||
239 | } | ||
240 | #endif /* !CONFIG_SPARSE_IRQ */ | ||
241 | |||
65 | /* | 242 | /* |
66 | * What should we do if we get a hw irq event on an illegal vector? | 243 | * What should we do if we get a hw irq event on an illegal vector? |
67 | * Each architecture has to answer this themself. | 244 | * Each architecture has to answer this themself. |
@@ -179,8 +356,11 @@ unsigned int __do_IRQ(unsigned int irq) | |||
179 | /* | 356 | /* |
180 | * No locking required for CPU-local interrupts: | 357 | * No locking required for CPU-local interrupts: |
181 | */ | 358 | */ |
182 | if (desc->chip->ack) | 359 | if (desc->chip->ack) { |
183 | desc->chip->ack(irq); | 360 | desc->chip->ack(irq); |
361 | /* get new one */ | ||
362 | desc = irq_remap_to_desc(irq, desc); | ||
363 | } | ||
184 | if (likely(!(desc->status & IRQ_DISABLED))) { | 364 | if (likely(!(desc->status & IRQ_DISABLED))) { |
185 | action_ret = handle_IRQ_event(irq, desc->action); | 365 | action_ret = handle_IRQ_event(irq, desc->action); |
186 | if (!noirqdebug) | 366 | if (!noirqdebug) |
@@ -191,8 +371,10 @@ unsigned int __do_IRQ(unsigned int irq) | |||
191 | } | 371 | } |
192 | 372 | ||
193 | spin_lock(&desc->lock); | 373 | spin_lock(&desc->lock); |
194 | if (desc->chip->ack) | 374 | if (desc->chip->ack) { |
195 | desc->chip->ack(irq); | 375 | desc->chip->ack(irq); |
376 | desc = irq_remap_to_desc(irq, desc); | ||
377 | } | ||
196 | /* | 378 | /* |
197 | * REPLAY is when Linux resends an IRQ that was dropped earlier | 379 | * REPLAY is when Linux resends an IRQ that was dropped earlier |
198 | * WAITING is used by probe to mark irqs that are being tested | 380 | * WAITING is used by probe to mark irqs that are being tested |
@@ -259,19 +441,22 @@ out: | |||
259 | } | 441 | } |
260 | #endif | 442 | #endif |
261 | 443 | ||
262 | |||
263 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
264 | /* | ||
265 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | ||
266 | */ | ||
267 | static struct lock_class_key irq_desc_lock_class; | ||
268 | |||
269 | void early_init_irq_lock_class(void) | 444 | void early_init_irq_lock_class(void) |
270 | { | 445 | { |
271 | struct irq_desc *desc; | 446 | struct irq_desc *desc; |
272 | int i; | 447 | int i; |
273 | 448 | ||
274 | for_each_irq_desc(i, desc) | 449 | for_each_irq_desc(i, desc) { |
275 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 450 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
451 | } | ||
452 | } | ||
453 | |||
454 | #ifdef CONFIG_SPARSE_IRQ | ||
455 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | ||
456 | { | ||
457 | struct irq_desc *desc = irq_to_desc(irq); | ||
458 | return desc ? desc->kstat_irqs[cpu] : 0; | ||
276 | } | 459 | } |
277 | #endif | 460 | #endif |
461 | EXPORT_SYMBOL(kstat_irqs_cpu); | ||
462 | |||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 64c1c7253dae..e6d0a43cc125 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -13,6 +13,11 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); | |||
13 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 13 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
14 | unsigned long flags); | 14 | unsigned long flags); |
15 | 15 | ||
16 | extern struct lock_class_key irq_desc_lock_class; | ||
17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | ||
18 | extern spinlock_t sparse_irq_lock; | ||
19 | extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; | ||
20 | |||
16 | #ifdef CONFIG_PROC_FS | 21 | #ifdef CONFIG_PROC_FS |
17 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 22 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
18 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); | 23 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 801addda3c43..cd0cd8dcb345 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -16,8 +16,15 @@ | |||
16 | #include "internals.h" | 16 | #include "internals.h" |
17 | 17 | ||
18 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
19 | cpumask_var_t irq_default_affinity; | ||
19 | 20 | ||
20 | cpumask_t irq_default_affinity = CPU_MASK_ALL; | 21 | static int init_irq_default_affinity(void) |
22 | { | ||
23 | alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL); | ||
24 | cpumask_setall(irq_default_affinity); | ||
25 | return 0; | ||
26 | } | ||
27 | core_initcall(init_irq_default_affinity); | ||
21 | 28 | ||
22 | /** | 29 | /** |
23 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 30 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
@@ -79,7 +86,7 @@ int irq_can_set_affinity(unsigned int irq) | |||
79 | * @cpumask: cpumask | 86 | * @cpumask: cpumask |
80 | * | 87 | * |
81 | */ | 88 | */ |
82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 89 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
83 | { | 90 | { |
84 | struct irq_desc *desc = irq_to_desc(irq); | 91 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; | 92 | unsigned long flags; |
@@ -91,14 +98,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
91 | 98 | ||
92 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 99 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { | 100 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
94 | desc->affinity = cpumask; | 101 | cpumask_copy(&desc->affinity, cpumask); |
95 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
96 | } else { | 103 | } else { |
97 | desc->status |= IRQ_MOVE_PENDING; | 104 | desc->status |= IRQ_MOVE_PENDING; |
98 | desc->pending_mask = cpumask; | 105 | cpumask_copy(&desc->pending_mask, cpumask); |
99 | } | 106 | } |
100 | #else | 107 | #else |
101 | desc->affinity = cpumask; | 108 | cpumask_copy(&desc->affinity, cpumask); |
102 | desc->chip->set_affinity(irq, cpumask); | 109 | desc->chip->set_affinity(irq, cpumask); |
103 | #endif | 110 | #endif |
104 | desc->status |= IRQ_AFFINITY_SET; | 111 | desc->status |= IRQ_AFFINITY_SET; |
@@ -112,26 +119,24 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
112 | */ | 119 | */ |
113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 120 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
114 | { | 121 | { |
115 | cpumask_t mask; | ||
116 | |||
117 | if (!irq_can_set_affinity(irq)) | 122 | if (!irq_can_set_affinity(irq)) |
118 | return 0; | 123 | return 0; |
119 | 124 | ||
120 | cpus_and(mask, cpu_online_map, irq_default_affinity); | ||
121 | |||
122 | /* | 125 | /* |
123 | * Preserve an userspace affinity setup, but make sure that | 126 | * Preserve an userspace affinity setup, but make sure that |
124 | * one of the targets is online. | 127 | * one of the targets is online. |
125 | */ | 128 | */ |
126 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 129 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | 130 | if (cpumask_any_and(&desc->affinity, cpu_online_mask) |
128 | mask = desc->affinity; | 131 | < nr_cpu_ids) |
132 | goto set_affinity; | ||
129 | else | 133 | else |
130 | desc->status &= ~IRQ_AFFINITY_SET; | 134 | desc->status &= ~IRQ_AFFINITY_SET; |
131 | } | 135 | } |
132 | 136 | ||
133 | desc->affinity = mask; | 137 | cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); |
134 | desc->chip->set_affinity(irq, mask); | 138 | set_affinity: |
139 | desc->chip->set_affinity(irq, &desc->affinity); | ||
135 | 140 | ||
136 | return 0; | 141 | return 0; |
137 | } | 142 | } |
@@ -370,16 +375,18 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
370 | return 0; | 375 | return 0; |
371 | } | 376 | } |
372 | 377 | ||
373 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | 378 | /* caller masked out all except trigger mode flags */ |
379 | ret = chip->set_type(irq, flags); | ||
374 | 380 | ||
375 | if (ret) | 381 | if (ret) |
376 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 382 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", |
377 | (int)(flags & IRQF_TRIGGER_MASK), | 383 | (int)flags, irq, chip->set_type); |
378 | irq, chip->set_type); | ||
379 | else { | 384 | else { |
385 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | ||
386 | flags |= IRQ_LEVEL; | ||
380 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | 387 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ |
381 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | 388 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
382 | desc->status |= flags & IRQ_TYPE_SENSE_MASK; | 389 | desc->status |= flags; |
383 | } | 390 | } |
384 | 391 | ||
385 | return ret; | 392 | return ret; |
@@ -459,7 +466,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
459 | 466 | ||
460 | /* Setup the type (level, edge polarity) if configured: */ | 467 | /* Setup the type (level, edge polarity) if configured: */ |
461 | if (new->flags & IRQF_TRIGGER_MASK) { | 468 | if (new->flags & IRQF_TRIGGER_MASK) { |
462 | ret = __irq_set_trigger(desc, irq, new->flags); | 469 | ret = __irq_set_trigger(desc, irq, |
470 | new->flags & IRQF_TRIGGER_MASK); | ||
463 | 471 | ||
464 | if (ret) { | 472 | if (ret) { |
465 | spin_unlock_irqrestore(&desc->lock, flags); | 473 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -673,6 +681,18 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
673 | struct irq_desc *desc; | 681 | struct irq_desc *desc; |
674 | int retval; | 682 | int retval; |
675 | 683 | ||
684 | /* | ||
685 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
686 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
687 | * the behavior is classified as "will not fix" so we need to | ||
688 | * start nudging drivers away from using that idiom. | ||
689 | */ | ||
690 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | ||
691 | == (IRQF_SHARED|IRQF_DISABLED)) | ||
692 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | ||
693 | "guaranteed on shared IRQs\n", | ||
694 | irq, devname); | ||
695 | |||
676 | #ifdef CONFIG_LOCKDEP | 696 | #ifdef CONFIG_LOCKDEP |
677 | /* | 697 | /* |
678 | * Lockdep wants atomic interrupt handlers: | 698 | * Lockdep wants atomic interrupt handlers: |
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index 9db681d95814..bd72329e630c 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c | |||
@@ -4,7 +4,6 @@ | |||
4 | void move_masked_irq(int irq) | 4 | void move_masked_irq(int irq) |
5 | { | 5 | { |
6 | struct irq_desc *desc = irq_to_desc(irq); | 6 | struct irq_desc *desc = irq_to_desc(irq); |
7 | cpumask_t tmp; | ||
8 | 7 | ||
9 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) | 8 | if (likely(!(desc->status & IRQ_MOVE_PENDING))) |
10 | return; | 9 | return; |
@@ -19,7 +18,7 @@ void move_masked_irq(int irq) | |||
19 | 18 | ||
20 | desc->status &= ~IRQ_MOVE_PENDING; | 19 | desc->status &= ~IRQ_MOVE_PENDING; |
21 | 20 | ||
22 | if (unlikely(cpus_empty(desc->pending_mask))) | 21 | if (unlikely(cpumask_empty(&desc->pending_mask))) |
23 | return; | 22 | return; |
24 | 23 | ||
25 | if (!desc->chip->set_affinity) | 24 | if (!desc->chip->set_affinity) |
@@ -27,8 +26,6 @@ void move_masked_irq(int irq) | |||
27 | 26 | ||
28 | assert_spin_locked(&desc->lock); | 27 | assert_spin_locked(&desc->lock); |
29 | 28 | ||
30 | cpus_and(tmp, desc->pending_mask, cpu_online_map); | ||
31 | |||
32 | /* | 29 | /* |
33 | * If there was a valid mask to work with, please | 30 | * If there was a valid mask to work with, please |
34 | * do the disable, re-program, enable sequence. | 31 | * do the disable, re-program, enable sequence. |
@@ -41,10 +38,13 @@ void move_masked_irq(int irq) | |||
41 | * For correct operation this depends on the caller | 38 | * For correct operation this depends on the caller |
42 | * masking the irqs. | 39 | * masking the irqs. |
43 | */ | 40 | */ |
44 | if (likely(!cpus_empty(tmp))) { | 41 | if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) |
45 | desc->chip->set_affinity(irq,tmp); | 42 | < nr_cpu_ids)) { |
43 | cpumask_and(&desc->affinity, | ||
44 | &desc->pending_mask, cpu_online_mask); | ||
45 | desc->chip->set_affinity(irq, &desc->affinity); | ||
46 | } | 46 | } |
47 | cpus_clear(desc->pending_mask); | 47 | cpumask_clear(&desc->pending_mask); |
48 | } | 48 | } |
49 | 49 | ||
50 | void move_native_irq(int irq) | 50 | void move_native_irq(int irq) |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c new file mode 100644 index 000000000000..ecf765c6a77a --- /dev/null +++ b/kernel/irq/numa_migrate.c | |||
@@ -0,0 +1,119 @@ | |||
1 | /* | ||
2 | * NUMA irq-desc migration code | ||
3 | * | ||
4 | * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to | ||
5 | * the new "home node" of the IRQ. | ||
6 | */ | ||
7 | |||
8 | #include <linux/irq.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/random.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/kernel_stat.h> | ||
13 | |||
14 | #include "internals.h" | ||
15 | |||
16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | ||
17 | struct irq_desc *desc, | ||
18 | int cpu, int nr) | ||
19 | { | ||
20 | unsigned long bytes; | ||
21 | |||
22 | init_kstat_irqs(desc, cpu, nr); | ||
23 | |||
24 | if (desc->kstat_irqs != old_desc->kstat_irqs) { | ||
25 | /* Compute how many bytes we need per irq and allocate them */ | ||
26 | bytes = nr * sizeof(unsigned int); | ||
27 | |||
28 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes); | ||
29 | } | ||
30 | } | ||
31 | |||
32 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | ||
33 | { | ||
34 | if (old_desc->kstat_irqs == desc->kstat_irqs) | ||
35 | return; | ||
36 | |||
37 | kfree(old_desc->kstat_irqs); | ||
38 | old_desc->kstat_irqs = NULL; | ||
39 | } | ||
40 | |||
41 | static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | ||
42 | struct irq_desc *desc, int cpu) | ||
43 | { | ||
44 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | ||
45 | spin_lock_init(&desc->lock); | ||
46 | desc->cpu = cpu; | ||
47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | ||
49 | arch_init_copy_chip_data(old_desc, desc, cpu); | ||
50 | } | ||
51 | |||
52 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | ||
53 | { | ||
54 | free_kstat_irqs(old_desc, desc); | ||
55 | arch_free_chip_data(old_desc, desc); | ||
56 | } | ||
57 | |||
58 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | ||
59 | int cpu) | ||
60 | { | ||
61 | struct irq_desc *desc; | ||
62 | unsigned int irq; | ||
63 | unsigned long flags; | ||
64 | int node; | ||
65 | |||
66 | irq = old_desc->irq; | ||
67 | |||
68 | spin_lock_irqsave(&sparse_irq_lock, flags); | ||
69 | |||
70 | /* We have to check it to avoid races with another CPU */ | ||
71 | desc = irq_desc_ptrs[irq]; | ||
72 | |||
73 | if (desc && old_desc != desc) | ||
74 | goto out_unlock; | ||
75 | |||
76 | node = cpu_to_node(cpu); | ||
77 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
78 | if (!desc) { | ||
79 | printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); | ||
80 | /* still use old one */ | ||
81 | desc = old_desc; | ||
82 | goto out_unlock; | ||
83 | } | ||
84 | init_copy_one_irq_desc(irq, old_desc, desc, cpu); | ||
85 | |||
86 | irq_desc_ptrs[irq] = desc; | ||
87 | |||
88 | /* free the old one */ | ||
89 | free_one_irq_desc(old_desc, desc); | ||
90 | kfree(old_desc); | ||
91 | |||
92 | out_unlock: | ||
93 | spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
94 | |||
95 | return desc; | ||
96 | } | ||
97 | |||
98 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) | ||
99 | { | ||
100 | int old_cpu; | ||
101 | int node, old_node; | ||
102 | |||
103 | /* those all static, do move them */ | ||
104 | if (desc->irq < NR_IRQS_LEGACY) | ||
105 | return desc; | ||
106 | |||
107 | old_cpu = desc->cpu; | ||
108 | if (old_cpu != cpu) { | ||
109 | node = cpu_to_node(cpu); | ||
110 | old_node = cpu_to_node(old_cpu); | ||
111 | if (old_node != node) | ||
112 | desc = __real_move_irq_desc(desc, cpu); | ||
113 | else | ||
114 | desc->cpu = cpu; | ||
115 | } | ||
116 | |||
117 | return desc; | ||
118 | } | ||
119 | |||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index d257e7d6a8a4..aae3f742bcec 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -20,7 +20,7 @@ static struct proc_dir_entry *root_irq_dir; | |||
20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 20 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
21 | { | 21 | { |
22 | struct irq_desc *desc = irq_to_desc((long)m->private); | 22 | struct irq_desc *desc = irq_to_desc((long)m->private); |
23 | cpumask_t *mask = &desc->affinity; | 23 | const struct cpumask *mask = &desc->affinity; |
24 | 24 | ||
25 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 25 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
26 | if (desc->status & IRQ_MOVE_PENDING) | 26 | if (desc->status & IRQ_MOVE_PENDING) |
@@ -40,33 +40,42 @@ static ssize_t irq_affinity_proc_write(struct file *file, | |||
40 | const char __user *buffer, size_t count, loff_t *pos) | 40 | const char __user *buffer, size_t count, loff_t *pos) |
41 | { | 41 | { |
42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; | 42 | unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; |
43 | cpumask_t new_value; | 43 | cpumask_var_t new_value; |
44 | int err; | 44 | int err; |
45 | 45 | ||
46 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || | 46 | if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity || |
47 | irq_balancing_disabled(irq)) | 47 | irq_balancing_disabled(irq)) |
48 | return -EIO; | 48 | return -EIO; |
49 | 49 | ||
50 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | ||
51 | return -ENOMEM; | ||
52 | |||
50 | err = cpumask_parse_user(buffer, count, new_value); | 53 | err = cpumask_parse_user(buffer, count, new_value); |
51 | if (err) | 54 | if (err) |
52 | return err; | 55 | goto free_cpumask; |
53 | 56 | ||
54 | if (!is_affinity_mask_valid(new_value)) | 57 | if (!is_affinity_mask_valid(new_value)) { |
55 | return -EINVAL; | 58 | err = -EINVAL; |
59 | goto free_cpumask; | ||
60 | } | ||
56 | 61 | ||
57 | /* | 62 | /* |
58 | * Do not allow disabling IRQs completely - it's a too easy | 63 | * Do not allow disabling IRQs completely - it's a too easy |
59 | * way to make the system unusable accidentally :-) At least | 64 | * way to make the system unusable accidentally :-) At least |
60 | * one online CPU still has to be targeted. | 65 | * one online CPU still has to be targeted. |
61 | */ | 66 | */ |
62 | if (!cpus_intersects(new_value, cpu_online_map)) | 67 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
63 | /* Special case for empty set - allow the architecture | 68 | /* Special case for empty set - allow the architecture |
64 | code to set default SMP affinity. */ | 69 | code to set default SMP affinity. */ |
65 | return irq_select_affinity_usr(irq) ? -EINVAL : count; | 70 | err = irq_select_affinity_usr(irq) ? -EINVAL : count; |
66 | 71 | } else { | |
67 | irq_set_affinity(irq, new_value); | 72 | irq_set_affinity(irq, new_value); |
73 | err = count; | ||
74 | } | ||
68 | 75 | ||
69 | return count; | 76 | free_cpumask: |
77 | free_cpumask_var(new_value); | ||
78 | return err; | ||
70 | } | 79 | } |
71 | 80 | ||
72 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) | 81 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
@@ -84,7 +93,7 @@ static const struct file_operations irq_affinity_proc_fops = { | |||
84 | 93 | ||
85 | static int default_affinity_show(struct seq_file *m, void *v) | 94 | static int default_affinity_show(struct seq_file *m, void *v) |
86 | { | 95 | { |
87 | seq_cpumask(m, &irq_default_affinity); | 96 | seq_cpumask(m, irq_default_affinity); |
88 | seq_putc(m, '\n'); | 97 | seq_putc(m, '\n'); |
89 | return 0; | 98 | return 0; |
90 | } | 99 | } |
@@ -92,27 +101,37 @@ static int default_affinity_show(struct seq_file *m, void *v) | |||
92 | static ssize_t default_affinity_write(struct file *file, | 101 | static ssize_t default_affinity_write(struct file *file, |
93 | const char __user *buffer, size_t count, loff_t *ppos) | 102 | const char __user *buffer, size_t count, loff_t *ppos) |
94 | { | 103 | { |
95 | cpumask_t new_value; | 104 | cpumask_var_t new_value; |
96 | int err; | 105 | int err; |
97 | 106 | ||
107 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | ||
108 | return -ENOMEM; | ||
109 | |||
98 | err = cpumask_parse_user(buffer, count, new_value); | 110 | err = cpumask_parse_user(buffer, count, new_value); |
99 | if (err) | 111 | if (err) |
100 | return err; | 112 | goto out; |
101 | 113 | ||
102 | if (!is_affinity_mask_valid(new_value)) | 114 | if (!is_affinity_mask_valid(new_value)) { |
103 | return -EINVAL; | 115 | err = -EINVAL; |
116 | goto out; | ||
117 | } | ||
104 | 118 | ||
105 | /* | 119 | /* |
106 | * Do not allow disabling IRQs completely - it's a too easy | 120 | * Do not allow disabling IRQs completely - it's a too easy |
107 | * way to make the system unusable accidentally :-) At least | 121 | * way to make the system unusable accidentally :-) At least |
108 | * one online CPU still has to be targeted. | 122 | * one online CPU still has to be targeted. |
109 | */ | 123 | */ |
110 | if (!cpus_intersects(new_value, cpu_online_map)) | 124 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
111 | return -EINVAL; | 125 | err = -EINVAL; |
126 | goto out; | ||
127 | } | ||
112 | 128 | ||
113 | irq_default_affinity = new_value; | 129 | cpumask_copy(irq_default_affinity, new_value); |
130 | err = count; | ||
114 | 131 | ||
115 | return count; | 132 | out: |
133 | free_cpumask_var(new_value); | ||
134 | return err; | ||
116 | } | 135 | } |
117 | 136 | ||
118 | static int default_affinity_open(struct inode *inode, struct file *file) | 137 | static int default_affinity_open(struct inode *inode, struct file *file) |
@@ -243,7 +262,11 @@ void init_irq_proc(void) | |||
243 | /* | 262 | /* |
244 | * Create entries for all existing IRQs. | 263 | * Create entries for all existing IRQs. |
245 | */ | 264 | */ |
246 | for_each_irq_desc(irq, desc) | 265 | for_each_irq_desc(irq, desc) { |
266 | if (!desc) | ||
267 | continue; | ||
268 | |||
247 | register_irq_proc(irq, desc); | 269 | register_irq_proc(irq, desc); |
270 | } | ||
248 | } | 271 | } |
249 | 272 | ||