aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/sun4d_irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/sun4d_irq.c')
-rw-r--r--arch/sparc/kernel/sun4d_irq.c494
1 files changed, 181 insertions, 313 deletions
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index 77b4a8992710..a9ea60eb2c10 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -14,6 +14,7 @@
14#include <asm/io.h> 14#include <asm/io.h>
15#include <asm/sbi.h> 15#include <asm/sbi.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/setup.h>
17 18
18#include "kernel.h" 19#include "kernel.h"
19#include "irq.h" 20#include "irq.h"
@@ -22,22 +23,20 @@
22 * cpu local. CPU local interrupts cover the timer interrupts 23 * cpu local. CPU local interrupts cover the timer interrupts
23 * and whatnot, and we encode those as normal PILs between 24 * and whatnot, and we encode those as normal PILs between
24 * 0 and 15. 25 * 0 and 15.
25 * 26 * SBUS interrupts are encodes as a combination of board, level and slot.
26 * SBUS interrupts are encoded integers including the board number
27 * (plus one), the SBUS level, and the SBUS slot number. Sun4D
28 * IRQ dispatch is done by:
29 *
30 * 1) Reading the BW local interrupt table in order to get the bus
31 * interrupt mask.
32 *
33 * This table is indexed by SBUS interrupt level which can be
34 * derived from the PIL we got interrupted on.
35 *
36 * 2) For each bus showing interrupt pending from #1, read the
37 * SBI interrupt state register. This will indicate which slots
38 * have interrupts pending for that SBUS interrupt level.
39 */ 27 */
40 28
29struct sun4d_handler_data {
30 unsigned int cpuid; /* target cpu */
31 unsigned int real_irq; /* interrupt level */
32};
33
34
35static unsigned int sun4d_encode_irq(int board, int lvl, int slot)
36{
37 return (board + 1) << 5 | (lvl << 2) | slot;
38}
39
41struct sun4d_timer_regs { 40struct sun4d_timer_regs {
42 u32 l10_timer_limit; 41 u32 l10_timer_limit;
43 u32 l10_cur_countx; 42 u32 l10_cur_countx;
@@ -48,17 +47,12 @@ struct sun4d_timer_regs {
48 47
49static struct sun4d_timer_regs __iomem *sun4d_timers; 48static struct sun4d_timer_regs __iomem *sun4d_timers;
50 49
51#define TIMER_IRQ 10 50#define SUN4D_TIMER_IRQ 10
52
53#define MAX_STATIC_ALLOC 4
54static unsigned char sbus_tid[32];
55
56static struct irqaction *irq_action[NR_IRQS];
57 51
58static struct sbus_action { 52/* Specify which cpu handle interrupts from which board.
59 struct irqaction *action; 53 * Index is board - value is cpu.
60 /* For SMP this needs to be extended */ 54 */
61} *sbus_actions; 55static unsigned char board_to_cpu[32];
62 56
63static int pil_to_sbus[] = { 57static int pil_to_sbus[] = {
64 0, 58 0,
@@ -79,152 +73,81 @@ static int pil_to_sbus[] = {
79 0, 73 0,
80}; 74};
81 75
82static int sbus_to_pil[] = {
83 0,
84 2,
85 3,
86 5,
87 7,
88 9,
89 11,
90 13,
91};
92
93static int nsbi;
94
95/* Exported for sun4d_smp.c */ 76/* Exported for sun4d_smp.c */
96DEFINE_SPINLOCK(sun4d_imsk_lock); 77DEFINE_SPINLOCK(sun4d_imsk_lock);
97 78
98int show_sun4d_interrupts(struct seq_file *p, void *v) 79/* SBUS interrupts are encoded integers including the board number
80 * (plus one), the SBUS level, and the SBUS slot number. Sun4D
81 * IRQ dispatch is done by:
82 *
83 * 1) Reading the BW local interrupt table in order to get the bus
84 * interrupt mask.
85 *
86 * This table is indexed by SBUS interrupt level which can be
87 * derived from the PIL we got interrupted on.
88 *
89 * 2) For each bus showing interrupt pending from #1, read the
90 * SBI interrupt state register. This will indicate which slots
91 * have interrupts pending for that SBUS interrupt level.
92 *
93 * 3) Call the genreric IRQ support.
94 */
95static void sun4d_sbus_handler_irq(int sbusl)
99{ 96{
100 int i = *(loff_t *) v, j = 0, k = 0, sbusl; 97 unsigned int bus_mask;
101 struct irqaction *action; 98 unsigned int sbino, slot;
102 unsigned long flags; 99 unsigned int sbil;
103#ifdef CONFIG_SMP 100
104 int x; 101 bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff;
105#endif 102 bw_clear_intr_mask(sbusl, bus_mask);
106 103
107 spin_lock_irqsave(&irq_action_lock, flags); 104 sbil = (sbusl << 2);
108 if (i < NR_IRQS) { 105 /* Loop for each pending SBI */
109 sbusl = pil_to_sbus[i]; 106 for (sbino = 0; bus_mask; sbino++) {
110 if (!sbusl) { 107 unsigned int idx, mask;
111 action = *(i + irq_action); 108
112 if (!action) 109 bus_mask >>= 1;
113 goto out_unlock; 110 if (!(bus_mask & 1))
114 } else { 111 continue;
115 for (j = 0; j < nsbi; j++) { 112 /* XXX This seems to ACK the irq twice. acquire_sbi()
116 for (k = 0; k < 4; k++) 113 * XXX uses swap, therefore this writes 0xf << sbil,
117 action = sbus_actions[(j << 5) + (sbusl << 2) + k].action; 114 * XXX then later release_sbi() will write the individual
118 if (action) 115 * XXX bits which were set again.
119 goto found_it; 116 */
120 } 117 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
121 goto out_unlock; 118 mask &= (0xf << sbil);
122 } 119
123found_it: seq_printf(p, "%3d: ", i); 120 /* Loop for each pending SBI slot */
124#ifndef CONFIG_SMP 121 idx = 0;
125 seq_printf(p, "%10u ", kstat_irqs(i)); 122 slot = (1 << sbil);
126#else 123 while (mask != 0) {
127 for_each_online_cpu(x) 124 unsigned int pil;
128 seq_printf(p, "%10u ", 125 struct irq_bucket *p;
129 kstat_cpu(cpu_logical_map(x)).irqs[i]); 126
130#endif 127 idx++;
131 seq_printf(p, "%c %s", 128 slot <<= 1;
132 (action->flags & IRQF_DISABLED) ? '+' : ' ', 129 if (!(mask & slot))
133 action->name); 130 continue;
134 action = action->next; 131
135 for (;;) { 132 mask &= ~slot;
136 for (; action; action = action->next) { 133 pil = sun4d_encode_irq(sbino, sbil, idx);
137 seq_printf(p, ",%s %s", 134
138 (action->flags & IRQF_DISABLED) ? " +" : "", 135 p = irq_map[pil];
139 action->name); 136 while (p) {
140 } 137 struct irq_bucket *next;
141 if (!sbusl) 138
142 break; 139 next = p->next;
143 k++; 140 generic_handle_irq(p->irq);
144 if (k < 4) { 141 p = next;
145 action = sbus_actions[(j << 5) + (sbusl << 2) + k].action;
146 } else {
147 j++;
148 if (j == nsbi)
149 break;
150 k = 0;
151 action = sbus_actions[(j << 5) + (sbusl << 2)].action;
152 } 142 }
143 release_sbi(SBI2DEVID(sbino), slot);
153 } 144 }
154 seq_putc(p, '\n');
155 } 145 }
156out_unlock:
157 spin_unlock_irqrestore(&irq_action_lock, flags);
158 return 0;
159}
160
161void sun4d_free_irq(unsigned int irq, void *dev_id)
162{
163 struct irqaction *action, **actionp;
164 struct irqaction *tmp = NULL;
165 unsigned long flags;
166
167 spin_lock_irqsave(&irq_action_lock, flags);
168 if (irq < 15)
169 actionp = irq + irq_action;
170 else
171 actionp = &(sbus_actions[irq - (1 << 5)].action);
172 action = *actionp;
173 if (!action) {
174 printk(KERN_ERR "Trying to free free IRQ%d\n", irq);
175 goto out_unlock;
176 }
177 if (dev_id) {
178 for (; action; action = action->next) {
179 if (action->dev_id == dev_id)
180 break;
181 tmp = action;
182 }
183 if (!action) {
184 printk(KERN_ERR "Trying to free free shared IRQ%d\n",
185 irq);
186 goto out_unlock;
187 }
188 } else if (action->flags & IRQF_SHARED) {
189 printk(KERN_ERR "Trying to free shared IRQ%d with NULL device ID\n",
190 irq);
191 goto out_unlock;
192 }
193 if (action->flags & SA_STATIC_ALLOC) {
194 /*
195 * This interrupt is marked as specially allocated
196 * so it is a bad idea to free it.
197 */
198 printk(KERN_ERR "Attempt to free statically allocated IRQ%d (%s)\n",
199 irq, action->name);
200 goto out_unlock;
201 }
202
203 if (tmp)
204 tmp->next = action->next;
205 else
206 *actionp = action->next;
207
208 spin_unlock_irqrestore(&irq_action_lock, flags);
209
210 synchronize_irq(irq);
211
212 spin_lock_irqsave(&irq_action_lock, flags);
213
214 kfree(action);
215
216 if (!(*actionp))
217 __disable_irq(irq);
218
219out_unlock:
220 spin_unlock_irqrestore(&irq_action_lock, flags);
221} 146}
222 147
223void sun4d_handler_irq(int pil, struct pt_regs *regs) 148void sun4d_handler_irq(int pil, struct pt_regs *regs)
224{ 149{
225 struct pt_regs *old_regs; 150 struct pt_regs *old_regs;
226 struct irqaction *action;
227 int cpu = smp_processor_id();
228 /* SBUS IRQ level (1 - 7) */ 151 /* SBUS IRQ level (1 - 7) */
229 int sbusl = pil_to_sbus[pil]; 152 int sbusl = pil_to_sbus[pil];
230 153
@@ -233,160 +156,96 @@ void sun4d_handler_irq(int pil, struct pt_regs *regs)
233 156
234 cc_set_iclr(1 << pil); 157 cc_set_iclr(1 << pil);
235 158
159#ifdef CONFIG_SMP
160 /*
161 * Check IPI data structures after IRQ has been cleared. Hard and Soft
162 * IRQ can happen at the same time, so both cases are always handled.
163 */
164 if (pil == SUN4D_IPI_IRQ)
165 sun4d_ipi_interrupt();
166#endif
167
236 old_regs = set_irq_regs(regs); 168 old_regs = set_irq_regs(regs);
237 irq_enter(); 169 irq_enter();
238 kstat_cpu(cpu).irqs[pil]++; 170 if (sbusl == 0) {
239 if (!sbusl) { 171 /* cpu interrupt */
240 action = *(pil + irq_action); 172 struct irq_bucket *p;
241 if (!action) 173
242 unexpected_irq(pil, NULL, regs); 174 p = irq_map[pil];
243 do { 175 while (p) {
244 action->handler(pil, action->dev_id); 176 struct irq_bucket *next;
245 action = action->next; 177
246 } while (action); 178 next = p->next;
179 generic_handle_irq(p->irq);
180 p = next;
181 }
247 } else { 182 } else {
248 int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff; 183 /* SBUS interrupt */
249 int sbino; 184 sun4d_sbus_handler_irq(sbusl);
250 struct sbus_action *actionp;
251 unsigned mask, slot;
252 int sbil = (sbusl << 2);
253
254 bw_clear_intr_mask(sbusl, bus_mask);
255
256 /* Loop for each pending SBI */
257 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1)
258 if (bus_mask & 1) {
259 mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil);
260 mask &= (0xf << sbil);
261 actionp = sbus_actions + (sbino << 5) + (sbil);
262 /* Loop for each pending SBI slot */
263 for (slot = (1 << sbil); mask; slot <<= 1, actionp++)
264 if (mask & slot) {
265 mask &= ~slot;
266 action = actionp->action;
267
268 if (!action)
269 unexpected_irq(pil, NULL, regs);
270 do {
271 action->handler(pil, action->dev_id);
272 action = action->next;
273 } while (action);
274 release_sbi(SBI2DEVID(sbino), slot);
275 }
276 }
277 } 185 }
278 irq_exit(); 186 irq_exit();
279 set_irq_regs(old_regs); 187 set_irq_regs(old_regs);
280} 188}
281 189
282int sun4d_request_irq(unsigned int irq, 190
283 irq_handler_t handler, 191static void sun4d_mask_irq(struct irq_data *data)
284 unsigned long irqflags, const char *devname, void *dev_id)
285{ 192{
286 struct irqaction *action, *tmp = NULL, **actionp; 193 struct sun4d_handler_data *handler_data = data->handler_data;
194 unsigned int real_irq;
195#ifdef CONFIG_SMP
196 int cpuid = handler_data->cpuid;
287 unsigned long flags; 197 unsigned long flags;
288 int ret; 198#endif
289 199 real_irq = handler_data->real_irq;
290 if (irq > 14 && irq < (1 << 5)) { 200#ifdef CONFIG_SMP
291 ret = -EINVAL; 201 spin_lock_irqsave(&sun4d_imsk_lock, flags);
292 goto out; 202 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq));
293 } 203 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
294 204#else
295 if (!handler) { 205 cc_set_imsk(cc_get_imsk() | (1 << real_irq));
296 ret = -EINVAL; 206#endif
297 goto out;
298 }
299
300 spin_lock_irqsave(&irq_action_lock, flags);
301
302 if (irq >= (1 << 5))
303 actionp = &(sbus_actions[irq - (1 << 5)].action);
304 else
305 actionp = irq + irq_action;
306 action = *actionp;
307
308 if (action) {
309 if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) {
310 for (tmp = action; tmp->next; tmp = tmp->next)
311 /* find last entry - tmp used below */;
312 } else {
313 ret = -EBUSY;
314 goto out_unlock;
315 }
316 if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) {
317 printk(KERN_ERR "Attempt to mix fast and slow interrupts on IRQ%d denied\n",
318 irq);
319 ret = -EBUSY;
320 goto out_unlock;
321 }
322 action = NULL; /* Or else! */
323 }
324
325 /* If this is flagged as statically allocated then we use our
326 * private struct which is never freed.
327 */
328 if (irqflags & SA_STATIC_ALLOC) {
329 if (static_irq_count < MAX_STATIC_ALLOC)
330 action = &static_irqaction[static_irq_count++];
331 else
332 printk(KERN_ERR "Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n",
333 irq, devname);
334 }
335
336 if (action == NULL)
337 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
338
339 if (!action) {
340 ret = -ENOMEM;
341 goto out_unlock;
342 }
343
344 action->handler = handler;
345 action->flags = irqflags;
346 action->name = devname;
347 action->next = NULL;
348 action->dev_id = dev_id;
349
350 if (tmp)
351 tmp->next = action;
352 else
353 *actionp = action;
354
355 __enable_irq(irq);
356
357 ret = 0;
358out_unlock:
359 spin_unlock_irqrestore(&irq_action_lock, flags);
360out:
361 return ret;
362} 207}
363 208
364static void sun4d_disable_irq(unsigned int irq) 209static void sun4d_unmask_irq(struct irq_data *data)
365{ 210{
366 int tid = sbus_tid[(irq >> 5) - 1]; 211 struct sun4d_handler_data *handler_data = data->handler_data;
212 unsigned int real_irq;
213#ifdef CONFIG_SMP
214 int cpuid = handler_data->cpuid;
367 unsigned long flags; 215 unsigned long flags;
216#endif
217 real_irq = handler_data->real_irq;
368 218
369 if (irq < NR_IRQS) 219#ifdef CONFIG_SMP
370 return;
371
372 spin_lock_irqsave(&sun4d_imsk_lock, flags); 220 spin_lock_irqsave(&sun4d_imsk_lock, flags);
373 cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7])); 221 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq));
374 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 222 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
223#else
224 cc_set_imsk(cc_get_imsk() | ~(1 << real_irq));
225#endif
375} 226}
376 227
377static void sun4d_enable_irq(unsigned int irq) 228static unsigned int sun4d_startup_irq(struct irq_data *data)
378{ 229{
379 int tid = sbus_tid[(irq >> 5) - 1]; 230 irq_link(data->irq);
380 unsigned long flags; 231 sun4d_unmask_irq(data);
381 232 return 0;
382 if (irq < NR_IRQS) 233}
383 return;
384 234
385 spin_lock_irqsave(&sun4d_imsk_lock, flags); 235static void sun4d_shutdown_irq(struct irq_data *data)
386 cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7])); 236{
387 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 237 sun4d_mask_irq(data);
238 irq_unlink(data->irq);
388} 239}
389 240
241struct irq_chip sun4d_irq = {
242 .name = "sun4d",
243 .irq_startup = sun4d_startup_irq,
244 .irq_shutdown = sun4d_shutdown_irq,
245 .irq_unmask = sun4d_unmask_irq,
246 .irq_mask = sun4d_mask_irq,
247};
248
390#ifdef CONFIG_SMP 249#ifdef CONFIG_SMP
391static void sun4d_set_cpu_int(int cpu, int level) 250static void sun4d_set_cpu_int(int cpu, int level)
392{ 251{
@@ -413,7 +272,7 @@ void __init sun4d_distribute_irqs(void)
413 for_each_node_by_name(dp, "sbi") { 272 for_each_node_by_name(dp, "sbi") {
414 int devid = of_getintprop_default(dp, "device-id", 0); 273 int devid = of_getintprop_default(dp, "device-id", 0);
415 int board = of_getintprop_default(dp, "board#", 0); 274 int board = of_getintprop_default(dp, "board#", 0);
416 sbus_tid[board] = cpuid; 275 board_to_cpu[board] = cpuid;
417 set_sbi_tid(devid, cpuid << 3); 276 set_sbi_tid(devid, cpuid << 3);
418 } 277 }
419 printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid); 278 printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid);
@@ -443,15 +302,16 @@ static void __init sun4d_load_profile_irqs(void)
443unsigned int sun4d_build_device_irq(struct platform_device *op, 302unsigned int sun4d_build_device_irq(struct platform_device *op,
444 unsigned int real_irq) 303 unsigned int real_irq)
445{ 304{
446 static int pil_to_sbus[] = {
447 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
448 };
449 struct device_node *dp = op->dev.of_node; 305 struct device_node *dp = op->dev.of_node;
450 struct device_node *io_unit, *sbi = dp->parent; 306 struct device_node *io_unit, *sbi = dp->parent;
451 const struct linux_prom_registers *regs; 307 const struct linux_prom_registers *regs;
308 struct sun4d_handler_data *handler_data;
309 unsigned int pil;
310 unsigned int irq;
452 int board, slot; 311 int board, slot;
453 int sbusl; 312 int sbusl;
454 313
314 irq = 0;
455 while (sbi) { 315 while (sbi) {
456 if (!strcmp(sbi->name, "sbi")) 316 if (!strcmp(sbi->name, "sbi"))
457 break; 317 break;
@@ -484,7 +344,28 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
484 344
485 sbusl = pil_to_sbus[real_irq]; 345 sbusl = pil_to_sbus[real_irq];
486 if (sbusl) 346 if (sbusl)
487 return (((board + 1) << 5) + (sbusl << 2) + slot); 347 pil = sun4d_encode_irq(board, sbusl, slot);
348 else
349 pil = real_irq;
350
351 irq = irq_alloc(real_irq, pil);
352 if (irq == 0)
353 goto err_out;
354
355 handler_data = irq_get_handler_data(irq);
356 if (unlikely(handler_data))
357 goto err_out;
358
359 handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
360 if (unlikely(!handler_data)) {
361 prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
362 prom_halt();
363 }
364 handler_data->cpuid = board_to_cpu[board];
365 handler_data->real_irq = real_irq;
366 irq_set_chip_and_handler_name(irq, &sun4d_irq,
367 handle_level_irq, "level");
368 irq_set_handler_data(irq, handler_data);
488 369
489err_out: 370err_out:
490 return real_irq; 371 return real_irq;
@@ -518,6 +399,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
518{ 399{
519 struct device_node *dp; 400 struct device_node *dp;
520 struct resource res; 401 struct resource res;
402 unsigned int irq;
521 const u32 *reg; 403 const u32 *reg;
522 int err; 404 int err;
523 405
@@ -552,9 +434,8 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
552 434
553 master_l10_counter = &sun4d_timers->l10_cur_count; 435 master_l10_counter = &sun4d_timers->l10_cur_count;
554 436
555 err = request_irq(TIMER_IRQ, counter_fn, 437 irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ);
556 (IRQF_DISABLED | SA_STATIC_ALLOC), 438 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
557 "timer", NULL);
558 if (err) { 439 if (err) {
559 prom_printf("sun4d_init_timers: request_irq() failed with %d\n", 440 prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
560 err); 441 err);
@@ -567,27 +448,16 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
567void __init sun4d_init_sbi_irq(void) 448void __init sun4d_init_sbi_irq(void)
568{ 449{
569 struct device_node *dp; 450 struct device_node *dp;
570 int target_cpu = 0; 451 int target_cpu;
571 452
572#ifdef CONFIG_SMP
573 target_cpu = boot_cpu_id; 453 target_cpu = boot_cpu_id;
574#endif
575
576 nsbi = 0;
577 for_each_node_by_name(dp, "sbi")
578 nsbi++;
579 sbus_actions = kzalloc(nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC);
580 if (!sbus_actions) {
581 prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n");
582 prom_halt();
583 }
584 for_each_node_by_name(dp, "sbi") { 454 for_each_node_by_name(dp, "sbi") {
585 int devid = of_getintprop_default(dp, "device-id", 0); 455 int devid = of_getintprop_default(dp, "device-id", 0);
586 int board = of_getintprop_default(dp, "board#", 0); 456 int board = of_getintprop_default(dp, "board#", 0);
587 unsigned int mask; 457 unsigned int mask;
588 458
589 set_sbi_tid(devid, target_cpu << 3); 459 set_sbi_tid(devid, target_cpu << 3);
590 sbus_tid[board] = target_cpu; 460 board_to_cpu[board] = target_cpu;
591 461
592 /* Get rid of pending irqs from PROM */ 462 /* Get rid of pending irqs from PROM */
593 mask = acquire_sbi(devid, 0xffffffff); 463 mask = acquire_sbi(devid, 0xffffffff);
@@ -603,12 +473,10 @@ void __init sun4d_init_IRQ(void)
603{ 473{
604 local_irq_disable(); 474 local_irq_disable();
605 475
606 BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM);
607 BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM);
608 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM); 476 BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM);
609 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM); 477 BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
610 478
611 sparc_irq_config.init_timers = sun4d_init_timers; 479 sparc_irq_config.init_timers = sun4d_init_timers;
612 sparc_irq_config.build_device_irq = sun4d_build_device_irq; 480 sparc_irq_config.build_device_irq = sun4d_build_device_irq;
613 481
614#ifdef CONFIG_SMP 482#ifdef CONFIG_SMP