aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/sgi-ip27/ip27-irq.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2005-03-21 13:59:38 -0500
committerRalf Baechle <ralf@linux-mips.org>2005-10-29 14:30:59 -0400
commit4f12bfe5a498747a9a66f135a67aa8e1caa819dc (patch)
treef358bd77f56b4014c1e5a9b5804995fd521c7853 /arch/mips/sgi-ip27/ip27-irq.c
parent6cbe0631591ca45177d52364dec81cdfba19fec0 (diff)
HUB interrupts are allocated per node, not per slice. Make manipulation
of the interrupt mask register atomic by disabling interrupts. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/sgi-ip27/ip27-irq.c')
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c50
1 files changed, 27 insertions, 23 deletions
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 61817a18aed..5d374e6ce63 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -74,14 +74,15 @@ extern int irq_to_slot[];
74 74
75static inline int alloc_level(int cpu, int irq) 75static inline int alloc_level(int cpu, int irq)
76{ 76{
77 struct hub_data *hub = hub_data(cpu_to_node(cpu));
77 struct slice_data *si = cpu_data[cpu].data; 78 struct slice_data *si = cpu_data[cpu].data;
78 int level; /* pre-allocated entries */ 79 int level;
79 80
80 level = find_first_zero_bit(si->irq_alloc_mask, LEVELS_PER_SLICE); 81 level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
81 if (level >= LEVELS_PER_SLICE) 82 if (level >= LEVELS_PER_SLICE)
82 panic("Cpu %d flooded with devices\n", cpu); 83 panic("Cpu %d flooded with devices\n", cpu);
83 84
84 __set_bit(level, si->irq_alloc_mask); 85 __set_bit(level, hub->irq_alloc_mask);
85 si->level_to_irq[level] = irq; 86 si->level_to_irq[level] = irq;
86 87
87 return level; 88 return level;
@@ -216,9 +217,11 @@ static int intr_connect_level(int cpu, int bit)
216{ 217{
217 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 218 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
218 struct slice_data *si = cpu_data[cpu].data; 219 struct slice_data *si = cpu_data[cpu].data;
220 unsigned long flags;
219 221
220 __set_bit(bit, si->irq_enable_mask); 222 set_bit(bit, si->irq_enable_mask);
221 223
224 local_irq_save(flags);
222 if (!cputoslice(cpu)) { 225 if (!cputoslice(cpu)) {
223 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 226 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
224 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); 227 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
@@ -226,6 +229,7 @@ static int intr_connect_level(int cpu, int bit)
226 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); 229 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
227 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); 230 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
228 } 231 }
232 local_irq_restore(flags);
229 233
230 return 0; 234 return 0;
231} 235}
@@ -235,7 +239,7 @@ static int intr_disconnect_level(int cpu, int bit)
235 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 239 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
236 struct slice_data *si = cpu_data[cpu].data; 240 struct slice_data *si = cpu_data[cpu].data;
237 241
238 __clear_bit(bit, si->irq_enable_mask); 242 clear_bit(bit, si->irq_enable_mask);
239 243
240 if (!cputoslice(cpu)) { 244 if (!cputoslice(cpu)) {
241 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 245 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
@@ -298,6 +302,7 @@ static unsigned int startup_bridge_irq(unsigned int irq)
298static void shutdown_bridge_irq(unsigned int irq) 302static void shutdown_bridge_irq(unsigned int irq)
299{ 303{
300 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq); 304 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
305 struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
301 bridge_t *bridge = bc->base; 306 bridge_t *bridge = bc->base;
302 struct slice_data *si = cpu_data[bc->irq_cpu].data; 307 struct slice_data *si = cpu_data[bc->irq_cpu].data;
303 int pin, swlevel; 308 int pin, swlevel;
@@ -313,7 +318,7 @@ static void shutdown_bridge_irq(unsigned int irq)
313 swlevel = find_level(&cpu, irq); 318 swlevel = find_level(&cpu, irq);
314 intr_disconnect_level(cpu, swlevel); 319 intr_disconnect_level(cpu, swlevel);
315 320
316 __clear_bit(swlevel, si->irq_alloc_mask); 321 __clear_bit(swlevel, hub->irq_alloc_mask);
317 si->level_to_irq[swlevel] = -1; 322 si->level_to_irq[swlevel] = -1;
318 323
319 bridge->b_int_enable &= ~(1 << pin); 324 bridge->b_int_enable &= ~(1 << pin);
@@ -433,25 +438,24 @@ void install_ipi(void)
433 int slice = LOCAL_HUB_L(PI_CPU_NUM); 438 int slice = LOCAL_HUB_L(PI_CPU_NUM);
434 int cpu = smp_processor_id(); 439 int cpu = smp_processor_id();
435 struct slice_data *si = cpu_data[cpu].data; 440 struct slice_data *si = cpu_data[cpu].data;
436 hubreg_t mask, set; 441 struct hub_data *hub = hub_data(cpu_to_node(cpu));
442 int resched, call;
443
444 resched = CPU_RESCHED_A_IRQ + slice;
445 __set_bit(resched, hub->irq_alloc_mask);
446 __set_bit(resched, si->irq_enable_mask);
447 LOCAL_HUB_CLR_INTR(resched);
448
449 call = CPU_CALL_A_IRQ + slice;
450 __set_bit(call, hub->irq_alloc_mask);
451 __set_bit(call, si->irq_enable_mask);
452 LOCAL_HUB_CLR_INTR(call);
437 453
438 if (slice == 0) { 454 if (slice == 0) {
439 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); 455 LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
440 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 456 LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
441 mask = LOCAL_HUB_L(PI_INT_MASK0_A); /* Slice A */
442 set = (1UL << CPU_RESCHED_A_IRQ) | (1UL << CPU_CALL_A_IRQ);
443 mask |= set;
444 si->irq_enable_mask[0] |= set;
445 si->irq_alloc_mask[0] |= set;
446 LOCAL_HUB_S(PI_INT_MASK0_A, mask);
447 } else { 457 } else {
448 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); 458 LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
449 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 459 LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
450 mask = LOCAL_HUB_L(PI_INT_MASK0_B); /* Slice B */
451 set = (1UL << CPU_RESCHED_B_IRQ) | (1UL << CPU_CALL_B_IRQ);
452 mask |= set;
453 si->irq_enable_mask[1] |= set;
454 si->irq_alloc_mask[1] |= set;
455 LOCAL_HUB_S(PI_INT_MASK0_B, mask);
456 } 460 }
457} 461}