aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2009-10-26 18:24:31 -0400
committerIngo Molnar <mingo@elte.hu>2009-11-02 09:56:34 -0500
commit7a7732bc0f7c46f217dbec723f25366b6285cc42 (patch)
tree6238e0cf1d22c1939558b693e9a51f2703099639 /arch/x86/kernel
parent6f9b41006af1bc489030f84ee247abc0df1edccd (diff)
x86: Unify fixup_irqs() for 32-bit and 64-bit kernels
There is no reason to have different fixup_irqs() for 32-bit and 64-bit kernels. Unify by using the superior 64-bit version for both the kernels. Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Gary Hade <garyhade@us.ibm.com> Cc: Eric W. Biederman <ebiederm@xmission.com> LKML-Reference: <20091026230001.562512739@sbs-t61.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/irq.c59
-rw-r--r--arch/x86/kernel/irq_32.c45
-rw-r--r--arch/x86/kernel/irq_64.c58
3 files changed, 59 insertions, 103 deletions
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 391206199515..3ea66556e5e1 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -276,3 +276,62 @@ void smp_generic_interrupt(struct pt_regs *regs)
276} 276}
277 277
278EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); 278EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
279
280#ifdef CONFIG_HOTPLUG_CPU
281/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
282void fixup_irqs(void)
283{
284 unsigned int irq;
285 static int warned;
286 struct irq_desc *desc;
287
288 for_each_irq_desc(irq, desc) {
289 int break_affinity = 0;
290 int set_affinity = 1;
291 const struct cpumask *affinity;
292
293 if (!desc)
294 continue;
295 if (irq == 2)
296 continue;
297
298 /* interrupt's are disabled at this point */
299 spin_lock(&desc->lock);
300
301 affinity = desc->affinity;
302 if (!irq_has_action(irq) ||
303 cpumask_equal(affinity, cpu_online_mask)) {
304 spin_unlock(&desc->lock);
305 continue;
306 }
307
308 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
309 break_affinity = 1;
310 affinity = cpu_all_mask;
311 }
312
313 if (desc->chip->mask)
314 desc->chip->mask(irq);
315
316 if (desc->chip->set_affinity)
317 desc->chip->set_affinity(irq, affinity);
318 else if (!(warned++))
319 set_affinity = 0;
320
321 if (desc->chip->unmask)
322 desc->chip->unmask(irq);
323
324 spin_unlock(&desc->lock);
325
326 if (break_affinity && set_affinity)
327 printk("Broke affinity for irq %i\n", irq);
328 else if (!set_affinity)
329 printk("Cannot set affinity for irq %i\n", irq);
330 }
331
332 /* That doesn't seem sufficient. Give it 1ms. */
333 local_irq_enable();
334 mdelay(1);
335 local_irq_disable();
336}
337#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 7d35d0fe2329..10709f29d166 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -211,48 +211,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
211 211
212 return true; 212 return true;
213} 213}
214
215#ifdef CONFIG_HOTPLUG_CPU
216
217/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
218void fixup_irqs(void)
219{
220 unsigned int irq;
221 struct irq_desc *desc;
222
223 for_each_irq_desc(irq, desc) {
224 const struct cpumask *affinity;
225
226 if (!desc)
227 continue;
228 if (irq == 2)
229 continue;
230
231 affinity = desc->affinity;
232 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
233 printk("Breaking affinity for irq %i\n", irq);
234 affinity = cpu_all_mask;
235 }
236 if (desc->chip->set_affinity)
237 desc->chip->set_affinity(irq, affinity);
238 else if (desc->action)
239 printk_once("Cannot set affinity for irq %i\n", irq);
240 }
241
242#if 0
243 barrier();
244 /* Ingo Molnar says: "after the IO-APIC masks have been redirected
245 [note the nop - the interrupt-enable boundary on x86 is two
246 instructions from sti] - to flush out pending hardirqs and
247 IPIs. After this point nothing is supposed to reach this CPU." */
248 __asm__ __volatile__("sti; nop; cli");
249 barrier();
250#else
251 /* That doesn't seem sufficient. Give it 1ms. */
252 local_irq_enable();
253 mdelay(1);
254 local_irq_disable();
255#endif
256}
257#endif
258
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 977d8b43a0dd..acf8fbf8fbda 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -62,64 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
62 return true; 62 return true;
63} 63}
64 64
65#ifdef CONFIG_HOTPLUG_CPU
66/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
67void fixup_irqs(void)
68{
69 unsigned int irq;
70 static int warned;
71 struct irq_desc *desc;
72
73 for_each_irq_desc(irq, desc) {
74 int break_affinity = 0;
75 int set_affinity = 1;
76 const struct cpumask *affinity;
77
78 if (!desc)
79 continue;
80 if (irq == 2)
81 continue;
82
83 /* interrupt's are disabled at this point */
84 spin_lock(&desc->lock);
85
86 affinity = desc->affinity;
87 if (!irq_has_action(irq) ||
88 cpumask_equal(affinity, cpu_online_mask)) {
89 spin_unlock(&desc->lock);
90 continue;
91 }
92
93 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
94 break_affinity = 1;
95 affinity = cpu_all_mask;
96 }
97
98 if (desc->chip->mask)
99 desc->chip->mask(irq);
100
101 if (desc->chip->set_affinity)
102 desc->chip->set_affinity(irq, affinity);
103 else if (!(warned++))
104 set_affinity = 0;
105
106 if (desc->chip->unmask)
107 desc->chip->unmask(irq);
108
109 spin_unlock(&desc->lock);
110
111 if (break_affinity && set_affinity)
112 printk("Broke affinity for irq %i\n", irq);
113 else if (!set_affinity)
114 printk("Cannot set affinity for irq %i\n", irq);
115 }
116
117 /* That doesn't seem sufficient. Give it 1ms. */
118 local_irq_enable();
119 mdelay(1);
120 local_irq_disable();
121}
122#endif
123 65
124extern void call_softirq(void); 66extern void call_softirq(void);
125 67