diff options
author | Yinghai Lu <yhlu.kernel@gmail.com> | 2008-08-19 23:50:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-16 10:52:29 -0400 |
commit | 08678b0841267c1d00d771fe01548d86043d065e (patch) | |
tree | 7debb21f9e9a768ced43077f7376797a0c46f8c0 /arch/x86/kernel/irq_64.c | |
parent | bfea1238beac9d306eeac081c67de5ca6aec4c7a (diff) |
generic: sparse irqs: use irq_desc() together with dyn_array, instead of irq_desc[]
add CONFIG_HAVE_SPARSE_IRQ to for use condensed array.
Get rid of irq_desc[] array assumptions.
Preallocate 32 irq_desc, and irq_desc() will try to get more.
( No change in functionality is expected anywhere, except the odd build
failure where we missed a code site or where a crossing commit itroduces
new irq_desc[] usage. )
v2: according to Eric, change get_irq_desc() to irq_desc()
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/irq_64.c')
-rw-r--r-- | arch/x86/kernel/irq_64.c | 35 |
1 files changed, 19 insertions, 16 deletions
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index e1f0839430d2..738eb65a924e 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
@@ -83,15 +83,16 @@ int show_interrupts(struct seq_file *p, void *v) | |||
83 | 83 | ||
84 | if (i < nr_irqs) { | 84 | if (i < nr_irqs) { |
85 | unsigned any_count = 0; | 85 | unsigned any_count = 0; |
86 | struct irq_desc *desc = irq_to_desc(i); | ||
86 | 87 | ||
87 | spin_lock_irqsave(&irq_desc[i].lock, flags); | 88 | spin_lock_irqsave(&desc->lock, flags); |
88 | #ifndef CONFIG_SMP | 89 | #ifndef CONFIG_SMP |
89 | any_count = kstat_irqs(i); | 90 | any_count = kstat_irqs(i); |
90 | #else | 91 | #else |
91 | for_each_online_cpu(j) | 92 | for_each_online_cpu(j) |
92 | any_count |= kstat_cpu(j).irqs[i]; | 93 | any_count |= kstat_cpu(j).irqs[i]; |
93 | #endif | 94 | #endif |
94 | action = irq_desc[i].action; | 95 | action = desc->action; |
95 | if (!action && !any_count) | 96 | if (!action && !any_count) |
96 | goto skip; | 97 | goto skip; |
97 | seq_printf(p, "%3d: ",i); | 98 | seq_printf(p, "%3d: ",i); |
@@ -101,8 +102,8 @@ int show_interrupts(struct seq_file *p, void *v) | |||
101 | for_each_online_cpu(j) | 102 | for_each_online_cpu(j) |
102 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | 103 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
103 | #endif | 104 | #endif |
104 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 105 | seq_printf(p, " %8s", desc->chip->name); |
105 | seq_printf(p, "-%-8s", irq_desc[i].name); | 106 | seq_printf(p, "-%-8s", desc->name); |
106 | 107 | ||
107 | if (action) { | 108 | if (action) { |
108 | seq_printf(p, " %s", action->name); | 109 | seq_printf(p, " %s", action->name); |
@@ -111,7 +112,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
111 | } | 112 | } |
112 | seq_putc(p, '\n'); | 113 | seq_putc(p, '\n'); |
113 | skip: | 114 | skip: |
114 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 115 | spin_unlock_irqrestore(&desc->lock, flags); |
115 | } else if (i == nr_irqs) { | 116 | } else if (i == nr_irqs) { |
116 | seq_printf(p, "NMI: "); | 117 | seq_printf(p, "NMI: "); |
117 | for_each_online_cpu(j) | 118 | for_each_online_cpu(j) |
@@ -228,37 +229,39 @@ void fixup_irqs(cpumask_t map) | |||
228 | cpumask_t mask; | 229 | cpumask_t mask; |
229 | int break_affinity = 0; | 230 | int break_affinity = 0; |
230 | int set_affinity = 1; | 231 | int set_affinity = 1; |
232 | struct irq_desc *desc; | ||
231 | 233 | ||
232 | if (irq == 2) | 234 | if (irq == 2) |
233 | continue; | 235 | continue; |
234 | 236 | ||
237 | desc = irq_to_desc(irq); | ||
235 | /* interrupt's are disabled at this point */ | 238 | /* interrupt's are disabled at this point */ |
236 | spin_lock(&irq_desc[irq].lock); | 239 | spin_lock(&desc->lock); |
237 | 240 | ||
238 | if (!irq_has_action(irq) || | 241 | if (!irq_has_action(irq) || |
239 | cpus_equal(irq_desc[irq].affinity, map)) { | 242 | cpus_equal(desc->affinity, map)) { |
240 | spin_unlock(&irq_desc[irq].lock); | 243 | spin_unlock(&desc->lock); |
241 | continue; | 244 | continue; |
242 | } | 245 | } |
243 | 246 | ||
244 | cpus_and(mask, irq_desc[irq].affinity, map); | 247 | cpus_and(mask, desc->affinity, map); |
245 | if (cpus_empty(mask)) { | 248 | if (cpus_empty(mask)) { |
246 | break_affinity = 1; | 249 | break_affinity = 1; |
247 | mask = map; | 250 | mask = map; |
248 | } | 251 | } |
249 | 252 | ||
250 | if (irq_desc[irq].chip->mask) | 253 | if (desc->chip->mask) |
251 | irq_desc[irq].chip->mask(irq); | 254 | desc->chip->mask(irq); |
252 | 255 | ||
253 | if (irq_desc[irq].chip->set_affinity) | 256 | if (desc->chip->set_affinity) |
254 | irq_desc[irq].chip->set_affinity(irq, mask); | 257 | desc->chip->set_affinity(irq, mask); |
255 | else if (!(warned++)) | 258 | else if (!(warned++)) |
256 | set_affinity = 0; | 259 | set_affinity = 0; |
257 | 260 | ||
258 | if (irq_desc[irq].chip->unmask) | 261 | if (desc->chip->unmask) |
259 | irq_desc[irq].chip->unmask(irq); | 262 | desc->chip->unmask(irq); |
260 | 263 | ||
261 | spin_unlock(&irq_desc[irq].lock); | 264 | spin_unlock(&desc->lock); |
262 | 265 | ||
263 | if (break_affinity && set_affinity) | 266 | if (break_affinity && set_affinity) |
264 | printk("Broke affinity for irq %i\n", irq); | 267 | printk("Broke affinity for irq %i\n", irq); |