aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c44
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c50
2 files changed, 48 insertions, 46 deletions
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index ad1e86b54fae..8651a0e75404 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -56,12 +56,12 @@ static void __init per_hub_init(cnodeid_t cnode)
56{ 56{
57 struct hub_data *hub = hub_data(cnode); 57 struct hub_data *hub = hub_data(cnode);
58 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 58 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
59 int i;
59 60
60 cpu_set(smp_processor_id(), hub->h_cpus); 61 cpu_set(smp_processor_id(), hub->h_cpus);
61 62
62 if (test_and_set_bit(cnode, hub_init_mask)) 63 if (test_and_set_bit(cnode, hub_init_mask))
63 return; 64 return;
64
65 /* 65 /*
66 * Set CRB timeout at 5ms, (< PI timeout of 10ms) 66 * Set CRB timeout at 5ms, (< PI timeout of 10ms)
67 */ 67 */
@@ -88,6 +88,24 @@ static void __init per_hub_init(cnodeid_t cnode)
88 __flush_cache_all(); 88 __flush_cache_all();
89 } 89 }
90#endif 90#endif
91
92 /*
93 * Some interrupts are reserved by hardware or by software convention.
94 * Mark these as reserved right away so they won't be used accidently
95 * later.
96 */
97 for (i = 0; i <= BASE_PCI_IRQ; i++) {
98 __set_bit(i, hub->irq_alloc_mask);
99 LOCAL_HUB_CLR_INTR(INT_PEND0_BASELVL + i);
100 }
101
102 __set_bit(IP_PEND0_6_63, hub->irq_alloc_mask);
103 LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
104
105 for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
106 __set_bit(i, hub->irq_alloc_mask);
107 LOCAL_HUB_CLR_INTR(INT_PEND1_BASELVL + i);
108 }
91} 109}
92 110
93void __init per_cpu_init(void) 111void __init per_cpu_init(void)
@@ -104,30 +122,12 @@ void __init per_cpu_init(void)
104 122
105 clear_c0_status(ST0_IM); 123 clear_c0_status(ST0_IM);
106 124
125 per_hub_init(cnode);
126
107 for (i = 0; i < LEVELS_PER_SLICE; i++) 127 for (i = 0; i < LEVELS_PER_SLICE; i++)
108 si->level_to_irq[i] = -1; 128 si->level_to_irq[i] = -1;
109 129
110 /* 130 /*
111 * Some interrupts are reserved by hardware or by software convention.
112 * Mark these as reserved right away so they won't be used accidently
113 * later.
114 */
115 for (i = 0; i <= BASE_PCI_IRQ; i++) {
116 __set_bit(i, si->irq_alloc_mask);
117 LOCAL_HUB_S(PI_INT_PEND_MOD, i);
118 }
119
120 __set_bit(IP_PEND0_6_63, si->irq_alloc_mask);
121 LOCAL_HUB_S(PI_INT_PEND_MOD, IP_PEND0_6_63);
122
123 for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++) {
124 __set_bit(i, si->irq_alloc_mask + 1);
125 LOCAL_HUB_S(PI_INT_PEND_MOD, i);
126 }
127
128 LOCAL_HUB_L(PI_INT_PEND0);
129
130 /*
131 * We use this so we can find the local hub's data as fast as only 131 * We use this so we can find the local hub's data as fast as only
132 * possible. 132 * possible.
133 */ 133 */
@@ -140,8 +140,6 @@ void __init per_cpu_init(void)
140 install_cpu_nmi_handler(cputoslice(cpu)); 140 install_cpu_nmi_handler(cputoslice(cpu));
141 141
142 set_c0_status(SRB_DEV0 | SRB_DEV1); 142 set_c0_status(SRB_DEV0 | SRB_DEV1);
143
144 per_hub_init(cnode);
145} 143}
146 144
147/* 145/*
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 61817a18aed2..5d374e6ce63d 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -74,14 +74,15 @@ extern int irq_to_slot[];
74 74
75static inline int alloc_level(int cpu, int irq) 75static inline int alloc_level(int cpu, int irq)
76{ 76{
77 struct hub_data *hub = hub_data(cpu_to_node(cpu));
77 struct slice_data *si = cpu_data[cpu].data; 78 struct slice_data *si = cpu_data[cpu].data;
78 int level; /* pre-allocated entries */ 79 int level;
79 80
80 level = find_first_zero_bit(si->irq_alloc_mask, LEVELS_PER_SLICE); 81 level = find_first_zero_bit(hub->irq_alloc_mask, LEVELS_PER_SLICE);
81 if (level >= LEVELS_PER_SLICE) 82 if (level >= LEVELS_PER_SLICE)
82 panic("Cpu %d flooded with devices\n", cpu); 83 panic("Cpu %d flooded with devices\n", cpu);
83 84
84 __set_bit(level, si->irq_alloc_mask); 85 __set_bit(level, hub->irq_alloc_mask);
85 si->level_to_irq[level] = irq; 86 si->level_to_irq[level] = irq;
86 87
87 return level; 88 return level;
@@ -216,9 +217,11 @@ static int intr_connect_level(int cpu, int bit)
216{ 217{
217 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 218 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
218 struct slice_data *si = cpu_data[cpu].data; 219 struct slice_data *si = cpu_data[cpu].data;
220 unsigned long flags;
219 221
220 __set_bit(bit, si->irq_enable_mask); 222 set_bit(bit, si->irq_enable_mask);
221 223
224 local_irq_save(flags);
222 if (!cputoslice(cpu)) { 225 if (!cputoslice(cpu)) {
223 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 226 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
224 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]); 227 REMOTE_HUB_S(nasid, PI_INT_MASK1_A, si->irq_enable_mask[1]);
@@ -226,6 +229,7 @@ static int intr_connect_level(int cpu, int bit)
226 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]); 229 REMOTE_HUB_S(nasid, PI_INT_MASK0_B, si->irq_enable_mask[0]);
227 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]); 230 REMOTE_HUB_S(nasid, PI_INT_MASK1_B, si->irq_enable_mask[1]);
228 } 231 }
232 local_irq_restore(flags);
229 233
230 return 0; 234 return 0;
231} 235}
@@ -235,7 +239,7 @@ static int intr_disconnect_level(int cpu, int bit)
235 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu)); 239 nasid_t nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
236 struct slice_data *si = cpu_data[cpu].data; 240 struct slice_data *si = cpu_data[cpu].data;
237 241
238 __clear_bit(bit, si->irq_enable_mask); 242 clear_bit(bit, si->irq_enable_mask);
239 243
240 if (!cputoslice(cpu)) { 244 if (!cputoslice(cpu)) {
241 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]); 245 REMOTE_HUB_S(nasid, PI_INT_MASK0_A, si->irq_enable_mask[0]);
@@ -298,6 +302,7 @@ static unsigned int startup_bridge_irq(unsigned int irq)
298static void shutdown_bridge_irq(unsigned int irq) 302static void shutdown_bridge_irq(unsigned int irq)
299{ 303{
300 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq); 304 struct bridge_controller *bc = IRQ_TO_BRIDGE(irq);
305 struct hub_data *hub = hub_data(cpu_to_node(bc->irq_cpu));
301 bridge_t *bridge = bc->base; 306 bridge_t *bridge = bc->base;
302 struct slice_data *si = cpu_data[bc->irq_cpu].data; 307 struct slice_data *si = cpu_data[bc->irq_cpu].data;
303 int pin, swlevel; 308 int pin, swlevel;
@@ -313,7 +318,7 @@ static void shutdown_bridge_irq(unsigned int irq)
313 swlevel = find_level(&cpu, irq); 318 swlevel = find_level(&cpu, irq);
314 intr_disconnect_level(cpu, swlevel); 319 intr_disconnect_level(cpu, swlevel);
315 320
316 __clear_bit(swlevel, si->irq_alloc_mask); 321 __clear_bit(swlevel, hub->irq_alloc_mask);
317 si->level_to_irq[swlevel] = -1; 322 si->level_to_irq[swlevel] = -1;
318 323
319 bridge->b_int_enable &= ~(1 << pin); 324 bridge->b_int_enable &= ~(1 << pin);
@@ -433,25 +438,24 @@ void install_ipi(void)
433 int slice = LOCAL_HUB_L(PI_CPU_NUM); 438 int slice = LOCAL_HUB_L(PI_CPU_NUM);
434 int cpu = smp_processor_id(); 439 int cpu = smp_processor_id();
435 struct slice_data *si = cpu_data[cpu].data; 440 struct slice_data *si = cpu_data[cpu].data;
436 hubreg_t mask, set; 441 struct hub_data *hub = hub_data(cpu_to_node(cpu));
442 int resched, call;
443
444 resched = CPU_RESCHED_A_IRQ + slice;
445 __set_bit(resched, hub->irq_alloc_mask);
446 __set_bit(resched, si->irq_enable_mask);
447 LOCAL_HUB_CLR_INTR(resched);
448
449 call = CPU_CALL_A_IRQ + slice;
450 __set_bit(call, hub->irq_alloc_mask);
451 __set_bit(call, si->irq_enable_mask);
452 LOCAL_HUB_CLR_INTR(call);
437 453
438 if (slice == 0) { 454 if (slice == 0) {
439 LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ); 455 LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
440 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 456 LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
441 mask = LOCAL_HUB_L(PI_INT_MASK0_A); /* Slice A */
442 set = (1UL << CPU_RESCHED_A_IRQ) | (1UL << CPU_CALL_A_IRQ);
443 mask |= set;
444 si->irq_enable_mask[0] |= set;
445 si->irq_alloc_mask[0] |= set;
446 LOCAL_HUB_S(PI_INT_MASK0_A, mask);
447 } else { 457 } else {
448 LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ); 458 LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
449 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 459 LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
450 mask = LOCAL_HUB_L(PI_INT_MASK0_B); /* Slice B */
451 set = (1UL << CPU_RESCHED_B_IRQ) | (1UL << CPU_CALL_B_IRQ);
452 mask |= set;
453 si->irq_enable_mask[1] |= set;
454 si->irq_alloc_mask[1] |= set;
455 LOCAL_HUB_S(PI_INT_MASK0_B, mask);
456 } 460 }
457} 461}