aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-06-20 04:23:32 -0400
committerDavid S. Miller <davem@davemloft.net>2006-06-20 04:23:32 -0400
commite18e2a00efc8352c131eb8d5a460149fb5776f1c (patch)
treebee6e965d77f4289c37300714d7976c3e19f2994
parent8047e247c899f80c33a23ad7e9e250224f0d26a5 (diff)
[SPARC64]: Move over to GENERIC_HARDIRQS.
This is the long overdue conversion of sparc64 over to the generic IRQ layer. The kernel image is slightly larger, but the BSS is ~60K smaller due to the reduced size of struct ino_bucket. A lot of IRQ implementation details, including ino_bucket, were moved out of asm-sparc64/irq.h and are now private to arch/sparc64/kernel/irq.c, and most of the code in irq.c totally disappeared. One thing that's different at the moment is IRQ distribution, we do it at enable_irq() time. If the cpu mask is ALL then we round-robin using a global rotating cpu counter, else we pick the first cpu in the mask to support single cpu targetting. This is similar to what powerpc's XICS IRQ support code does. This works fine on my UP SB1000, and the SMP build goes fine and runs on that machine, but lots of testing on different setups is needed. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/Kconfig4
-rw-r--r--arch/sparc64/kernel/devices.c2
-rw-r--r--arch/sparc64/kernel/entry.S2
-rw-r--r--arch/sparc64/kernel/irq.c957
-rw-r--r--arch/sparc64/kernel/pci.c38
-rw-r--r--arch/sparc64/kernel/pci_psycho.c2
-rw-r--r--arch/sparc64/kernel/pci_sabre.c4
-rw-r--r--arch/sparc64/kernel/pci_schizo.c6
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c2
-rw-r--r--arch/sparc64/kernel/sbus.c2
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c4
-rw-r--r--arch/sparc64/kernel/sun4v_ivec.S2
-rw-r--r--include/asm-sparc64/hardirq.h2
-rw-r--r--include/asm-sparc64/hw_irq.h2
-rw-r--r--include/asm-sparc64/irq.h78
15 files changed, 305 insertions, 802 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 43a66f5407f..a7a111db25b 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -87,6 +87,10 @@ config SYSVIPC_COMPAT
87 depends on COMPAT && SYSVIPC 87 depends on COMPAT && SYSVIPC
88 default y 88 default y
89 89
90config GENERIC_HARDIRQS
91 bool
92 default y
93
90menu "General machine setup" 94menu "General machine setup"
91 95
92config SMP 96config SMP
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index 0684899d998..0dd95ae50e1 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -157,7 +157,7 @@ unsigned int sun4v_vdev_device_interrupt(unsigned int dev_node)
157 return 0; 157 return 0;
158 } 158 }
159 159
160 return sun4v_build_irq(sun4v_vdev_devhandle, irq, 0); 160 return sun4v_build_irq(sun4v_vdev_devhandle, irq);
161} 161}
162 162
163static const char *cpu_mid_prop(void) 163static const char *cpu_mid_prop(void)
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index c87365e59e7..be85ce2a4ad 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -432,7 +432,7 @@ do_ivec:
432 membar #Sync 432 membar #Sync
433 433
434 sethi %hi(ivector_table), %g2 434 sethi %hi(ivector_table), %g2
435 sllx %g3, 5, %g3 435 sllx %g3, 3, %g3
436 or %g2, %lo(ivector_table), %g2 436 or %g2, %lo(ivector_table), %g2
437 add %g2, %g3, %g3 437 add %g2, %g3, %g3
438 438
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 49ad9cd0f8c..a8c9dc8d195 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -22,6 +22,7 @@
22#include <linux/proc_fs.h> 22#include <linux/proc_fs.h>
23#include <linux/seq_file.h> 23#include <linux/seq_file.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/irq.h>
25 26
26#include <asm/ptrace.h> 27#include <asm/ptrace.h>
27#include <asm/processor.h> 28#include <asm/processor.h>
@@ -42,10 +43,6 @@
42#include <asm/auxio.h> 43#include <asm/auxio.h>
43#include <asm/head.h> 44#include <asm/head.h>
44 45
45#ifdef CONFIG_SMP
46static void distribute_irqs(void);
47#endif
48
49/* UPA nodes send interrupt packet to UltraSparc with first data reg 46/* UPA nodes send interrupt packet to UltraSparc with first data reg
50 * value low 5 (7 on Starfire) bits holding the IRQ identifier being 47 * value low 5 (7 on Starfire) bits holding the IRQ identifier being
51 * delivered. We must translate this into a non-vector IRQ so we can 48 * delivered. We must translate this into a non-vector IRQ so we can
@@ -57,10 +54,29 @@ static void distribute_irqs(void);
57 * The IVEC handler does not need to act atomically, the PIL dispatch 54 * The IVEC handler does not need to act atomically, the PIL dispatch
58 * code uses CAS to get an atomic snapshot of the list and clear it 55 * code uses CAS to get an atomic snapshot of the list and clear it
59 * at the same time. 56 * at the same time.
57 *
58 * If you make changes to ino_bucket, please update hand coded assembler
59 * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
60 */ 60 */
61struct ino_bucket {
62 /* Next handler in per-CPU IRQ worklist. We know that
63 * bucket pointers have the high 32-bits clear, so to
64 * save space we only store the bits we need.
65 */
66/*0x00*/unsigned int irq_chain;
61 67
68 /* Virtual interrupt number assigned to this INO. */
69/*0x04*/unsigned int virt_irq;
70};
71
72#define NUM_IVECS (IMAP_INR + 1)
62struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES))); 73struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
63 74
75#define __irq_ino(irq) \
76 (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
77#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
78#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
79
64/* This has to be in the main kernel image, it cannot be 80/* This has to be in the main kernel image, it cannot be
65 * turned into per-cpu data. The reason is that the main 81 * turned into per-cpu data. The reason is that the main
66 * kernel image is locked into the TLB and this structure 82 * kernel image is locked into the TLB and this structure
@@ -70,18 +86,6 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
70 */ 86 */
71#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) 87#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
72 88
73static struct irqaction timer_irq_action = {
74 .name = "timer",
75};
76static struct irqaction *irq_action[NR_IRQS] = { &timer_irq_action, };
77
78/* This only synchronizes entities which modify IRQ handler
79 * state and some selected user-level spots that want to
80 * read things in the table. IRQ handler processing orders
81 * its' accesses such that no locking is needed.
82 */
83static DEFINE_SPINLOCK(irq_action_lock);
84
85static unsigned int virt_to_real_irq_table[NR_IRQS]; 89static unsigned int virt_to_real_irq_table[NR_IRQS];
86static unsigned char virt_irq_cur = 1; 90static unsigned char virt_irq_cur = 1;
87 91
@@ -117,69 +121,45 @@ static unsigned int virt_to_real_irq(unsigned char virt_irq)
117 return virt_to_real_irq_table[virt_irq]; 121 return virt_to_real_irq_table[virt_irq];
118} 122}
119 123
120void irq_install_pre_handler(int virt_irq,
121 void (*func)(struct ino_bucket *, void *, void *),
122 void *arg1, void *arg2)
123{
124 unsigned int real_irq = virt_to_real_irq(virt_irq);
125 struct ino_bucket *bucket;
126 struct irq_desc *d;
127
128 if (unlikely(!real_irq))
129 return;
130
131 bucket = __bucket(real_irq);
132 d = bucket->irq_info;
133 d->pre_handler = func;
134 d->pre_handler_arg1 = arg1;
135 d->pre_handler_arg2 = arg2;
136}
137
138static void register_irq_proc (unsigned int irq);
139
140/* 124/*
141 * Upper 2b of irqaction->flags holds the ino. 125 * /proc/interrupts printing:
142 * irqaction->mask holds the smp affinity information.
143 */ 126 */
144#define put_ino_in_irqaction(action, irq) \
145 action->flags &= 0xffffffffffffUL; \
146 action->flags |= __irq_ino(irq) << 48;
147
148#define get_ino_in_irqaction(action) (action->flags >> 48)
149
150#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
151#define get_smpaff_in_irqaction(action) ((action)->mask)
152 127
153int show_interrupts(struct seq_file *p, void *v) 128int show_interrupts(struct seq_file *p, void *v)
154{ 129{
130 int i = *(loff_t *) v, j;
131 struct irqaction * action;
155 unsigned long flags; 132 unsigned long flags;
156 int i = *(loff_t *) v;
157 struct irqaction *action;
158#ifdef CONFIG_SMP
159 int j;
160#endif
161 133
162 spin_lock_irqsave(&irq_action_lock, flags); 134 if (i == 0) {
163 if (i <= NR_IRQS) { 135 seq_printf(p, " ");
164 if (!(action = *(i + irq_action))) 136 for_each_online_cpu(j)
165 goto out_unlock; 137 seq_printf(p, "CPU%d ",j);
166 seq_printf(p, "%3d: ", i); 138 seq_putc(p, '\n');
139 }
140
141 if (i < NR_IRQS) {
142 spin_lock_irqsave(&irq_desc[i].lock, flags);
143 action = irq_desc[i].action;
144 if (!action)
145 goto skip;
146 seq_printf(p, "%3d: ",i);
167#ifndef CONFIG_SMP 147#ifndef CONFIG_SMP
168 seq_printf(p, "%10u ", kstat_irqs(i)); 148 seq_printf(p, "%10u ", kstat_irqs(i));
169#else 149#else
170 for_each_online_cpu(j) { 150 for_each_online_cpu(j)
171 seq_printf(p, "%10u ", 151 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
172 kstat_cpu(j).irqs[i]);
173 }
174#endif 152#endif
175 seq_printf(p, " %s", action->name); 153 seq_printf(p, " %9s", irq_desc[i].handler->typename);
176 for (action = action->next; action; action = action->next) 154 seq_printf(p, " %s", action->name);
155
156 for (action=action->next; action; action = action->next)
177 seq_printf(p, ", %s", action->name); 157 seq_printf(p, ", %s", action->name);
158
178 seq_putc(p, '\n'); 159 seq_putc(p, '\n');
160skip:
161 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
179 } 162 }
180out_unlock:
181 spin_unlock_irqrestore(&irq_action_lock, flags);
182
183 return 0; 163 return 0;
184} 164}
185 165
@@ -220,509 +200,321 @@ static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
220 return tid; 200 return tid;
221} 201}
222 202
223void enable_irq(unsigned int virt_irq) 203struct irq_handler_data {
224{ 204 unsigned long iclr;
225 unsigned int real_irq = virt_to_real_irq(virt_irq); 205 unsigned long imap;
226 struct ino_bucket *bucket;
227 unsigned long imap, cpuid;
228
229 if (unlikely(!real_irq))
230 return;
231 206
232 bucket = __bucket(real_irq); 207 void (*pre_handler)(unsigned int, void *, void *);
233 imap = bucket->imap; 208 void *pre_handler_arg1;
234 if (unlikely(imap == 0UL)) 209 void *pre_handler_arg2;
235 return; 210};
236
237 preempt_disable();
238
239 /* This gets the physical processor ID, even on uniprocessor,
240 * so we can always program the interrupt target correctly.
241 */
242 cpuid = real_hard_smp_processor_id();
243
244 if (tlb_type == hypervisor) {
245 unsigned int ino = __irq_ino(real_irq);
246 int err;
247
248 err = sun4v_intr_settarget(ino, cpuid);
249 if (err != HV_EOK)
250 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
251 ino, cpuid, err);
252 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
253 if (err != HV_EOK)
254 printk("sun4v_intr_setenabled(%x): err(%d)\n",
255 ino, err);
256 } else {
257 unsigned int tid = sun4u_compute_tid(imap, cpuid);
258
259 /* NOTE NOTE NOTE, IGN and INO are read-only, IGN is a product
260 * of this SYSIO's preconfigured IGN in the SYSIO Control
261 * Register, the hardware just mirrors that value here.
262 * However for Graphics and UPA Slave devices the full
263 * IMAP_INR field can be set by the programmer here.
264 *
265 * Things like FFB can now be handled via the new IRQ
266 * mechanism.
267 */
268 upa_writel(tid | IMAP_VALID, imap);
269 }
270
271 preempt_enable();
272}
273 211
274void disable_irq(unsigned int virt_irq) 212static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
275{ 213{
276 unsigned int real_irq = virt_to_real_irq(virt_irq); 214 unsigned int real_irq = virt_to_real_irq(virt_irq);
277 struct ino_bucket *bucket; 215 struct ino_bucket *bucket = NULL;
278 unsigned long imap;
279 216
280 if (unlikely(!real_irq)) 217 if (likely(real_irq))
281 return; 218 bucket = __bucket(real_irq);
282
283 bucket = __bucket(real_irq);
284 imap = bucket->imap;
285 if (unlikely(imap == 0UL))
286 return;
287
288 if (tlb_type == hypervisor) {
289 unsigned int ino = __irq_ino(real_irq);
290 int err;
291 219
292 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); 220 return bucket;
293 if (err != HV_EOK)
294 printk("sun4v_intr_setenabled(%x): "
295 "err(%d)\n", ino, err);
296 } else {
297 u32 tmp;
298
299 /* NOTE: We do not want to futz with the IRQ clear registers
300 * and move the state to IDLE, the SCSI code does call
301 * disable_irq() to assure atomicity in the queue cmd
302 * SCSI adapter driver code. Thus we'd lose interrupts.
303 */
304 tmp = upa_readl(imap);
305 tmp &= ~IMAP_VALID;
306 upa_writel(tmp, imap);
307 }
308} 221}
309 222
310static void build_irq_error(const char *msg, unsigned int ino, int inofixup, 223#ifdef CONFIG_SMP
311 unsigned long iclr, unsigned long imap, 224static int irq_choose_cpu(unsigned int virt_irq)
312 struct ino_bucket *bucket)
313{ 225{
314 prom_printf("IRQ: INO %04x (%016lx:%016lx) --> " 226 cpumask_t mask = irq_affinity[virt_irq];
315 "(%d:%016lx:%016lx), halting...\n", 227 int cpuid;
316 ino, bucket->iclr, bucket->imap,
317 inofixup, iclr, imap);
318 prom_halt();
319}
320 228
321unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags) 229 if (cpus_equal(mask, CPU_MASK_ALL)) {
322{ 230 static int irq_rover;
323 struct ino_bucket *bucket; 231 static DEFINE_SPINLOCK(irq_rover_lock);
324 int ino; 232 unsigned long flags;
325 233
326 BUG_ON(tlb_type == hypervisor); 234 /* Round-robin distribution... */
235 do_round_robin:
236 spin_lock_irqsave(&irq_rover_lock, flags);
327 237
328 /* RULE: Both must be specified. */ 238 while (!cpu_online(irq_rover)) {
329 if (iclr == 0UL || imap == 0UL) { 239 if (++irq_rover >= NR_CPUS)
330 prom_printf("Invalid build_irq %d %016lx %016lx\n", 240 irq_rover = 0;
331 inofixup, iclr, imap); 241 }
332 prom_halt(); 242 cpuid = irq_rover;
333 } 243 do {
334 244 if (++irq_rover >= NR_CPUS)
335 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; 245 irq_rover = 0;
336 if (ino > NUM_IVECS) { 246 } while (!cpu_online(irq_rover));
337 prom_printf("Invalid INO %04x (%d:%016lx:%016lx)\n",
338 ino, inofixup, iclr, imap);
339 prom_halt();
340 }
341 247
342 bucket = &ivector_table[ino]; 248 spin_unlock_irqrestore(&irq_rover_lock, flags);
343 if (bucket->flags & IBF_ACTIVE) 249 } else {
344 build_irq_error("IRQ: Trying to build active INO bucket.\n", 250 cpumask_t tmp;
345 ino, inofixup, iclr, imap, bucket);
346 251
347 if (bucket->irq_info) { 252 cpus_and(tmp, cpu_online_map, mask);
348 if (bucket->imap != imap || bucket->iclr != iclr)
349 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
350 ino, inofixup, iclr, imap, bucket);
351 253
352 goto out; 254 if (cpus_empty(tmp))
353 } 255 goto do_round_robin;
354 256
355 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); 257 cpuid = first_cpu(tmp);
356 if (!bucket->irq_info) {
357 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
358 prom_halt();
359 } 258 }
360 259
361 /* Ok, looks good, set it up. Don't touch the irq_chain or 260 return cpuid;
362 * the pending flag. 261}
363 */ 262#else
364 bucket->imap = imap; 263static int irq_choose_cpu(unsigned int virt_irq)
365 bucket->iclr = iclr; 264{
366 if (!bucket->virt_irq) 265 return real_hard_smp_processor_id();
367 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
368 bucket->flags = flags;
369
370out:
371 return bucket->virt_irq;
372} 266}
267#endif
373 268
374unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags) 269static void sun4u_irq_enable(unsigned int virt_irq)
375{ 270{
376 struct ino_bucket *bucket; 271 irq_desc_t *desc = irq_desc + virt_irq;
377 unsigned long sysino; 272 struct irq_handler_data *data = desc->handler_data;
378 273
379 sysino = sun4v_devino_to_sysino(devhandle, devino); 274 if (likely(data)) {
275 unsigned long cpuid, imap;
276 unsigned int tid;
380 277
381 bucket = &ivector_table[sysino]; 278 cpuid = irq_choose_cpu(virt_irq);
279 imap = data->imap;
382 280
383 /* Catch accidental accesses to these things. IMAP/ICLR handling 281 tid = sun4u_compute_tid(imap, cpuid);
384 * is done by hypervisor calls on sun4v platforms, not by direct
385 * register accesses.
386 *
387 * But we need to make them look unique for the disable_irq() logic
388 * in free_irq().
389 */
390 bucket->imap = ~0UL - sysino;
391 bucket->iclr = ~0UL - sysino;
392 if (!bucket->virt_irq)
393 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
394 bucket->flags = flags;
395 282
396 bucket->irq_info = kzalloc(sizeof(struct irq_desc), GFP_ATOMIC); 283 upa_writel(tid | IMAP_VALID, imap);
397 if (!bucket->irq_info) {
398 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
399 prom_halt();
400 } 284 }
401
402 return bucket->virt_irq;
403} 285}
404 286
405static void atomic_bucket_insert(struct ino_bucket *bucket) 287static void sun4u_irq_disable(unsigned int virt_irq)
406{ 288{
407 unsigned long pstate; 289 irq_desc_t *desc = irq_desc + virt_irq;
408 unsigned int *ent; 290 struct irq_handler_data *data = desc->handler_data;
409 291
410 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 292 if (likely(data)) {
411 __asm__ __volatile__("wrpr %0, %1, %%pstate" 293 unsigned long imap = data->imap;
412 : : "r" (pstate), "i" (PSTATE_IE)); 294 u32 tmp = upa_readl(imap);
413 ent = irq_work(smp_processor_id());
414 bucket->irq_chain = *ent;
415 *ent = __irq(bucket);
416 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
417}
418 295
419static int check_irq_sharing(int pil, unsigned long irqflags) 296 tmp &= ~IMAP_VALID;
420{ 297 upa_writel(tmp, imap);
421 struct irqaction *action;
422
423 action = *(irq_action + pil);
424 if (action) {
425 if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ))
426 return -EBUSY;
427 } 298 }
428 return 0;
429} 299}
430 300
431static void append_irq_action(int pil, struct irqaction *action) 301static void sun4u_irq_end(unsigned int virt_irq)
432{ 302{
433 struct irqaction **pp = irq_action + pil; 303 irq_desc_t *desc = irq_desc + virt_irq;
304 struct irq_handler_data *data = desc->handler_data;
434 305
435 while (*pp) 306 if (likely(data))
436 pp = &((*pp)->next); 307 upa_writel(ICLR_IDLE, data->iclr);
437 *pp = action;
438} 308}
439 309
440static struct irqaction *get_action_slot(struct ino_bucket *bucket) 310static void sun4v_irq_enable(unsigned int virt_irq)
441{ 311{
442 struct irq_desc *desc = bucket->irq_info; 312 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
443 int max_irq, i; 313 unsigned int ino = bucket - &ivector_table[0];
444 314
445 max_irq = 1; 315 if (likely(bucket)) {
446 if (bucket->flags & IBF_PCI) 316 unsigned long cpuid;
447 max_irq = MAX_IRQ_DESC_ACTION; 317 int err;
448 for (i = 0; i < max_irq; i++) {
449 struct irqaction *p = &desc->action[i];
450 u32 mask = (1 << i);
451 318
452 if (desc->action_active_mask & mask) 319 cpuid = irq_choose_cpu(virt_irq);
453 continue;
454 320
455 desc->action_active_mask |= mask; 321 err = sun4v_intr_settarget(ino, cpuid);
456 return p; 322 if (err != HV_EOK)
323 printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
324 ino, cpuid, err);
325 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
326 if (err != HV_EOK)
327 printk("sun4v_intr_setenabled(%x): err(%d)\n",
328 ino, err);
457 } 329 }
458 return NULL;
459} 330}
460 331
461int request_irq(unsigned int virt_irq, 332static void sun4v_irq_disable(unsigned int virt_irq)
462 irqreturn_t (*handler)(int, void *, struct pt_regs *),
463 unsigned long irqflags, const char *name, void *dev_id)
464{ 333{
465 struct irqaction *action; 334 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
466 struct ino_bucket *bucket; 335 unsigned int ino = bucket - &ivector_table[0];
467 unsigned long flags;
468 unsigned int real_irq;
469 int pending = 0;
470
471 real_irq = virt_to_real_irq(virt_irq);
472 if (unlikely(!real_irq))
473 return -EINVAL;
474
475 if (unlikely(!handler))
476 return -EINVAL;
477
478 bucket = __bucket(real_irq);
479 if (unlikely(!bucket->irq_info))
480 return -ENODEV;
481
482 if (irqflags & SA_SAMPLE_RANDOM) {
483 /*
484 * This function might sleep, we want to call it first,
485 * outside of the atomic block.
486 * Yes, this might clear the entropy pool if the wrong
487 * driver is attempted to be loaded, without actually
488 * installing a new handler, but is this really a problem,
489 * only the sysadmin is able to do this.
490 */
491 rand_initialize_irq(virt_irq);
492 }
493
494 spin_lock_irqsave(&irq_action_lock, flags);
495 336
496 if (check_irq_sharing(virt_irq, irqflags)) { 337 if (likely(bucket)) {
497 spin_unlock_irqrestore(&irq_action_lock, flags); 338 int err;
498 return -EBUSY;
499 }
500 339
501 action = get_action_slot(bucket); 340 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
502 if (!action) { 341 if (err != HV_EOK)
503 spin_unlock_irqrestore(&irq_action_lock, flags); 342 printk("sun4v_intr_setenabled(%x): "
504 return -ENOMEM; 343 "err(%d)\n", ino, err);
505 } 344 }
345}
506 346
507 bucket->flags |= IBF_ACTIVE; 347static void sun4v_irq_end(unsigned int virt_irq)
508 pending = bucket->pending; 348{
509 if (pending) 349 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
510 bucket->pending = 0; 350 unsigned int ino = bucket - &ivector_table[0];
511
512 action->handler = handler;
513 action->flags = irqflags;
514 action->name = name;
515 action->next = NULL;
516 action->dev_id = dev_id;
517 put_ino_in_irqaction(action, __irq_ino(real_irq));
518 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
519
520 append_irq_action(virt_irq, action);
521 351
522 enable_irq(virt_irq); 352 if (likely(bucket)) {
353 int err;
523 354
524 /* We ate the IVEC already, this makes sure it does not get lost. */ 355 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
525 if (pending) { 356 if (err != HV_EOK)
526 atomic_bucket_insert(bucket); 357 printk("sun4v_intr_setstate(%x): "
527 set_softint(1 << PIL_DEVICE_IRQ); 358 "err(%d)\n", ino, err);
528 } 359 }
529
530 spin_unlock_irqrestore(&irq_action_lock, flags);
531
532 register_irq_proc(virt_irq);
533
534#ifdef CONFIG_SMP
535 distribute_irqs();
536#endif
537 return 0;
538} 360}
539 361
540EXPORT_SYMBOL(request_irq); 362static void run_pre_handler(unsigned int virt_irq)
541
542static struct irqaction *unlink_irq_action(unsigned int virt_irq, void *dev_id)
543{ 363{
544 struct irqaction *action, **pp; 364 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
545 365 irq_desc_t *desc = irq_desc + virt_irq;
546 pp = irq_action + virt_irq; 366 struct irq_handler_data *data = desc->handler_data;
547 action = *pp;
548 if (unlikely(!action))
549 return NULL;
550 367
551 if (unlikely(!action->handler)) { 368 if (likely(data->pre_handler)) {
552 printk("Freeing free IRQ %d\n", virt_irq); 369 data->pre_handler(__irq_ino(__irq(bucket)),
553 return NULL; 370 data->pre_handler_arg1,
371 data->pre_handler_arg2);
554 } 372 }
555
556 while (action && action->dev_id != dev_id) {
557 pp = &action->next;
558 action = *pp;
559 }
560
561 if (likely(action))
562 *pp = action->next;
563
564 return action;
565} 373}
566 374
567void free_irq(unsigned int virt_irq, void *dev_id) 375static struct hw_interrupt_type sun4u_irq = {
568{ 376 .typename = "sun4u",
569 struct irqaction *action; 377 .enable = sun4u_irq_enable,
570 struct ino_bucket *bucket; 378 .disable = sun4u_irq_disable,
571 struct irq_desc *desc; 379 .end = sun4u_irq_end,
572 unsigned long flags; 380};
573 unsigned int real_irq;
574 int ent, i;
575
576 real_irq = virt_to_real_irq(virt_irq);
577 if (unlikely(!real_irq))
578 return;
579 381
580 spin_lock_irqsave(&irq_action_lock, flags); 382static struct hw_interrupt_type sun4u_irq_ack = {
383 .typename = "sun4u+ack",
384 .enable = sun4u_irq_enable,
385 .disable = sun4u_irq_disable,
386 .ack = run_pre_handler,
387 .end = sun4u_irq_end,
388};
581 389
582 action = unlink_irq_action(virt_irq, dev_id); 390static struct hw_interrupt_type sun4v_irq = {
391 .typename = "sun4v",
392 .enable = sun4v_irq_enable,
393 .disable = sun4v_irq_disable,
394 .end = sun4v_irq_end,
395};
583 396
584 spin_unlock_irqrestore(&irq_action_lock, flags); 397static struct hw_interrupt_type sun4v_irq_ack = {
398 .typename = "sun4v+ack",
399 .enable = sun4v_irq_enable,
400 .disable = sun4v_irq_disable,
401 .ack = run_pre_handler,
402 .end = sun4v_irq_end,
403};
585 404
586 if (unlikely(!action)) 405void irq_install_pre_handler(int virt_irq,
587 return; 406 void (*func)(unsigned int, void *, void *),
407 void *arg1, void *arg2)
408{
409 irq_desc_t *desc = irq_desc + virt_irq;
410 struct irq_handler_data *data = desc->handler_data;
588 411
589 synchronize_irq(virt_irq); 412 data->pre_handler = func;
413 data->pre_handler_arg1 = arg1;
414 data->pre_handler_arg2 = arg2;
590 415
591 spin_lock_irqsave(&irq_action_lock, flags); 416 desc->handler = (desc->handler == &sun4u_irq ?
417 &sun4u_irq_ack : &sun4v_irq_ack);
418}
592 419
593 bucket = __bucket(real_irq); 420unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
594 desc = bucket->irq_info; 421{
422 struct ino_bucket *bucket;
423 struct irq_handler_data *data;
424 irq_desc_t *desc;
425 int ino;
595 426
596 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 427 BUG_ON(tlb_type == hypervisor);
597 struct irqaction *p = &desc->action[i];
598 428
599 if (p == action) { 429 ino = (upa_readl(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
600 desc->action_active_mask &= ~(1 << i); 430 bucket = &ivector_table[ino];
601 break; 431 if (!bucket->virt_irq) {
602 } 432 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
433 irq_desc[bucket->virt_irq].handler = &sun4u_irq;
603 } 434 }
604 435
605 if (!desc->action_active_mask) { 436 desc = irq_desc + bucket->virt_irq;
606 unsigned long imap = bucket->imap; 437 if (unlikely(desc->handler_data))
607 438 goto out;
608 /* This unique interrupt source is now inactive. */
609 bucket->flags &= ~IBF_ACTIVE;
610
611 /* See if any other buckets share this bucket's IMAP
612 * and are still active.
613 */
614 for (ent = 0; ent < NUM_IVECS; ent++) {
615 struct ino_bucket *bp = &ivector_table[ent];
616 if (bp != bucket &&
617 bp->imap == imap &&
618 (bp->flags & IBF_ACTIVE) != 0)
619 break;
620 }
621 439
622 /* Only disable when no other sub-irq levels of 440 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
623 * the same IMAP are active. 441 if (unlikely(!data)) {
624 */ 442 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
625 if (ent == NUM_IVECS) 443 prom_halt();
626 disable_irq(virt_irq);
627 } 444 }
445 desc->handler_data = data;
628 446
629 spin_unlock_irqrestore(&irq_action_lock, flags); 447 data->imap = imap;
630} 448 data->iclr = iclr;
631 449
632EXPORT_SYMBOL(free_irq); 450out:
451 return bucket->virt_irq;
452}
633 453
634#ifdef CONFIG_SMP 454unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
635void synchronize_irq(unsigned int virt_irq)
636{ 455{
637 unsigned int real_irq = virt_to_real_irq(virt_irq);
638 struct ino_bucket *bucket; 456 struct ino_bucket *bucket;
457 struct irq_handler_data *data;
458 unsigned long sysino;
459 irq_desc_t *desc;
639 460
640 if (unlikely(!real_irq)) 461 BUG_ON(tlb_type != hypervisor);
641 return;
642 462
643 bucket = __bucket(real_irq); 463 sysino = sun4v_devino_to_sysino(devhandle, devino);
644#if 0 464 bucket = &ivector_table[sysino];
645 /* The following is how I wish I could implement this. 465 if (!bucket->virt_irq) {
646 * Unfortunately the ICLR registers are read-only, you can 466 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
647 * only write ICLR_foo values to them. To get the current 467 irq_desc[bucket->virt_irq].handler = &sun4v_irq;
648 * IRQ status you would need to get at the IRQ diag registers
649 * in the PCI/SBUS controller and the layout of those vary
650 * from one controller to the next, sigh... -DaveM
651 */
652 unsigned long iclr = bucket->iclr;
653
654 while (1) {
655 u32 tmp = upa_readl(iclr);
656
657 if (tmp == ICLR_TRANSMIT ||
658 tmp == ICLR_PENDING) {
659 cpu_relax();
660 continue;
661 }
662 break;
663 } 468 }
664#else
665 /* So we have to do this with a INPROGRESS bit just like x86. */
666 while (bucket->flags & IBF_INPROGRESS)
667 cpu_relax();
668#endif
669}
670#endif /* CONFIG_SMP */
671
672static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
673{
674 struct irq_desc *desc = bp->irq_info;
675 unsigned char flags = bp->flags;
676 u32 action_mask, i;
677 int random;
678
679 bp->flags |= IBF_INPROGRESS;
680 469
681 if (unlikely(!(flags & IBF_ACTIVE))) { 470 desc = irq_desc + bucket->virt_irq;
682 bp->pending = 1; 471 if (unlikely(desc->handler_data))
683 goto out; 472 goto out;
684 }
685
686 if (desc->pre_handler)
687 desc->pre_handler(bp,
688 desc->pre_handler_arg1,
689 desc->pre_handler_arg2);
690 473
691 action_mask = desc->action_active_mask; 474 data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
692 random = 0; 475 if (unlikely(!data)) {
693 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { 476 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
694 struct irqaction *p = &desc->action[i]; 477 prom_halt();
695 u32 mask = (1 << i); 478 }
479 desc->handler_data = data;
696 480
697 if (!(action_mask & mask)) 481 /* Catch accidental accesses to these things. IMAP/ICLR handling
698 continue; 482 * is done by hypervisor calls on sun4v platforms, not by direct
483 * register accesses.
484 */
485 data->imap = ~0UL;
486 data->iclr = ~0UL;
699 487
700 action_mask &= ~mask; 488out:
489 return bucket->virt_irq;
490}
701 491
702 if (p->handler(bp->virt_irq, p->dev_id, regs) == IRQ_HANDLED) 492void hw_resend_irq(struct hw_interrupt_type *handler, unsigned int virt_irq)
703 random |= p->flags; 493{
494 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
495 unsigned long pstate;
496 unsigned int *ent;
704 497
705 if (!action_mask) 498 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
706 break; 499 __asm__ __volatile__("wrpr %0, %1, %%pstate"
707 } 500 : : "r" (pstate), "i" (PSTATE_IE));
501 ent = irq_work(smp_processor_id());
502 bucket->irq_chain = *ent;
503 *ent = __irq(bucket);
504 set_softint(1 << PIL_DEVICE_IRQ);
505 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
506}
708 507
709 if (tlb_type == hypervisor) { 508void ack_bad_irq(unsigned int virt_irq)
710 unsigned int ino = __irq_ino(bp); 509{
711 int err; 510 struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
511 unsigned int ino = 0xdeadbeef;
712 512
713 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); 513 if (bucket)
714 if (err != HV_EOK) 514 ino = bucket - &ivector_table[0];
715 printk("sun4v_intr_setstate(%x): "
716 "err(%d)\n", ino, err);
717 } else {
718 upa_writel(ICLR_IDLE, bp->iclr);
719 }
720 515
721 /* Test and add entropy */ 516 printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
722 if (random & SA_SAMPLE_RANDOM) 517 ino, virt_irq);
723 add_interrupt_randomness(bp->virt_irq);
724out:
725 bp->flags &= ~IBF_INPROGRESS;
726} 518}
727 519
728#ifndef CONFIG_SMP 520#ifndef CONFIG_SMP
@@ -740,35 +532,33 @@ void timer_irq(int irq, struct pt_regs *regs)
740 clear_softint(clr_mask); 532 clear_softint(clr_mask);
741 533
742 irq_enter(); 534 irq_enter();
535
743 kstat_this_cpu.irqs[0]++; 536 kstat_this_cpu.irqs[0]++;
744 timer_interrupt(irq, NULL, regs); 537 timer_interrupt(irq, NULL, regs);
538
745 irq_exit(); 539 irq_exit();
746} 540}
747#endif 541#endif
748 542
749void handler_irq(int irq, struct pt_regs *regs) 543void handler_irq(int irq, struct pt_regs *regs)
750{ 544{
751 struct ino_bucket *bp; 545 struct ino_bucket *bucket;
752 int cpu = smp_processor_id();
753 546
754 /* XXX at this point we should be able to assert that
755 * XXX irq is PIL_DEVICE_IRQ...
756 */
757 clear_softint(1 << irq); 547 clear_softint(1 << irq);
758 548
759 irq_enter(); 549 irq_enter();
760 550
761 /* Sliiiick... */ 551 /* Sliiiick... */
762 bp = __bucket(xchg32(irq_work(cpu), 0)); 552 bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
763 while (bp) { 553 while (bucket) {
764 struct ino_bucket *nbp = __bucket(bp->irq_chain); 554 struct ino_bucket *next = __bucket(bucket->irq_chain);
765 555
766 kstat_this_cpu.irqs[bp->virt_irq]++; 556 bucket->irq_chain = 0;
557 __do_IRQ(bucket->virt_irq, regs);
767 558
768 bp->irq_chain = 0; 559 bucket = next;
769 process_bucket(bp, regs);
770 bp = nbp;
771 } 560 }
561
772 irq_exit(); 562 irq_exit();
773} 563}
774 564
@@ -833,74 +623,6 @@ main_interrupt:
833EXPORT_SYMBOL(sparc_floppy_irq); 623EXPORT_SYMBOL(sparc_floppy_irq);
834#endif 624#endif
835 625
836/* We really don't need these at all on the Sparc. We only have
837 * stubs here because they are exported to modules.
838 */
839unsigned long probe_irq_on(void)
840{
841 return 0;
842}
843
844EXPORT_SYMBOL(probe_irq_on);
845
846int probe_irq_off(unsigned long mask)
847{
848 return 0;
849}
850
851EXPORT_SYMBOL(probe_irq_off);
852
853#ifdef CONFIG_SMP
854static int retarget_one_irq(struct irqaction *p, int goal_cpu)
855{
856 struct ino_bucket *bucket = get_ino_in_irqaction(p) + ivector_table;
857
858 while (!cpu_online(goal_cpu)) {
859 if (++goal_cpu >= NR_CPUS)
860 goal_cpu = 0;
861 }
862
863 if (tlb_type == hypervisor) {
864 unsigned int ino = __irq_ino(bucket);
865
866 sun4v_intr_settarget(ino, goal_cpu);
867 sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
868 } else {
869 unsigned long imap = bucket->imap;
870 unsigned int tid = sun4u_compute_tid(imap, goal_cpu);
871
872 upa_writel(tid | IMAP_VALID, imap);
873 }
874
875 do {
876 if (++goal_cpu >= NR_CPUS)
877 goal_cpu = 0;
878 } while (!cpu_online(goal_cpu));
879
880 return goal_cpu;
881}
882
883/* Called from request_irq. */
884static void distribute_irqs(void)
885{
886 unsigned long flags;
887 int cpu, level;
888
889 spin_lock_irqsave(&irq_action_lock, flags);
890 cpu = 0;
891
892 for (level = 1; level < NR_IRQS; level++) {
893 struct irqaction *p = irq_action[level];
894
895 while(p) {
896 cpu = retarget_one_irq(p, cpu);
897 p = p->next;
898 }
899 }
900 spin_unlock_irqrestore(&irq_action_lock, flags);
901}
902#endif
903
904struct sun5_timer { 626struct sun5_timer {
905 u64 count0; 627 u64 count0;
906 u64 limit0; 628 u64 limit0;
@@ -1076,6 +798,10 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int
1076 } 798 }
1077} 799}
1078 800
801static struct irqaction timer_irq_action = {
802 .name = "timer",
803};
804
1079/* Only invoked on boot processor. */ 805/* Only invoked on boot processor. */
1080void __init init_IRQ(void) 806void __init init_IRQ(void)
1081{ 807{
@@ -1103,121 +829,6 @@ void __init init_IRQ(void)
1103 : /* No outputs */ 829 : /* No outputs */
1104 : "i" (PSTATE_IE) 830 : "i" (PSTATE_IE)
1105 : "g1"); 831 : "g1");
1106}
1107 832
1108static struct proc_dir_entry *root_irq_dir; 833 irq_desc[0].action = &timer_irq_action;
1109static struct proc_dir_entry *irq_dir[NR_IRQS];
1110
1111#ifdef CONFIG_SMP
1112
1113static int irq_affinity_read_proc(char *page, char **start, off_t off,
1114 int count, int *eof, void *data)
1115{
1116 struct ino_bucket *bp = ivector_table + (long)data;
1117 struct irq_desc *desc = bp->irq_info;
1118 struct irqaction *ap = desc->action;
1119 cpumask_t mask;
1120 int len;
1121
1122 mask = get_smpaff_in_irqaction(ap);
1123 if (cpus_empty(mask))
1124 mask = cpu_online_map;
1125
1126 len = cpumask_scnprintf(page, count, mask);
1127 if (count - len < 2)
1128 return -EINVAL;
1129 len += sprintf(page + len, "\n");
1130 return len;
1131} 834}
1132
1133static inline void set_intr_affinity(int virt_irq, cpumask_t hw_aff)
1134{
1135 struct ino_bucket *bp;
1136 struct irq_desc *desc;
1137 struct irqaction *ap;
1138 unsigned int real_irq;
1139
1140 real_irq = virt_to_real_irq(virt_irq);
1141 if (unlikely(!real_irq))
1142 return;
1143
1144 bp = __bucket(real_irq);
1145 desc = bp->irq_info;
1146 ap = desc->action;
1147
1148 /* Users specify affinity in terms of hw cpu ids.
1149 * As soon as we do this, handler_irq() might see and take action.
1150 */
1151 put_smpaff_in_irqaction(ap, hw_aff);
1152
1153 /* Migration is simply done by the next cpu to service this
1154 * interrupt.
1155 *
1156 * XXX Broken, this doesn't happen anymore...
1157 */
1158}
1159
1160static int irq_affinity_write_proc(struct file *file,
1161 const char __user *buffer,
1162 unsigned long count, void *data)
1163{
1164 int virt_irq = (long) data, full_count = count, err;
1165 cpumask_t new_value;
1166
1167 err = cpumask_parse(buffer, count, new_value);
1168
1169 /*
1170 * Do not allow disabling IRQs completely - it's a too easy
1171 * way to make the system unusable accidentally :-) At least
1172 * one online CPU still has to be targeted.
1173 */
1174 cpus_and(new_value, new_value, cpu_online_map);
1175 if (cpus_empty(new_value))
1176 return -EINVAL;
1177
1178 set_intr_affinity(virt_irq, new_value);
1179
1180 return full_count;
1181}
1182
1183#endif
1184
1185#define MAX_NAMELEN 10
1186
1187static void register_irq_proc(unsigned int virt_irq)
1188{
1189 char name [MAX_NAMELEN];
1190
1191 if (!root_irq_dir || irq_dir[virt_irq])
1192 return;
1193
1194 memset(name, 0, MAX_NAMELEN);
1195 sprintf(name, "%d", virt_irq);
1196
1197 /* create /proc/irq/1234 */
1198 irq_dir[virt_irq] = proc_mkdir(name, root_irq_dir);
1199
1200#ifdef CONFIG_SMP
1201 /* XXX SMP affinity not supported on starfire yet. */
1202 if (this_is_starfire == 0) {
1203 struct proc_dir_entry *entry;
1204
1205 /* create /proc/irq/1234/smp_affinity */
1206 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
1207
1208 if (entry) {
1209 entry->nlink = 1;
1210 entry->data = (void *)(long)virt_irq;
1211 entry->read_proc = irq_affinity_read_proc;
1212 entry->write_proc = irq_affinity_write_proc;
1213 }
1214 }
1215#endif
1216}
1217
1218void init_irq_proc(void)
1219{
1220 /* create /proc/irq */
1221 root_irq_dir = proc_mkdir("irq", NULL);
1222}
1223
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c
index f97ddeb105a..9472580a431 100644
--- a/arch/sparc64/kernel/pci.c
+++ b/arch/sparc64/kernel/pci.c
@@ -47,12 +47,6 @@ struct pci_controller_info *pci_controller_root = NULL;
47/* Each PCI controller found gets a unique index. */ 47/* Each PCI controller found gets a unique index. */
48int pci_num_controllers = 0; 48int pci_num_controllers = 0;
49 49
50/* At boot time the user can give the kernel a command
51 * line option which controls if and how PCI devices
52 * are reordered at PCI bus probing time.
53 */
54int pci_device_reorder = 0;
55
56volatile int pci_poke_in_progress; 50volatile int pci_poke_in_progress;
57volatile int pci_poke_cpu = -1; 51volatile int pci_poke_cpu = -1;
58volatile int pci_poke_faulted; 52volatile int pci_poke_faulted;
@@ -316,27 +310,6 @@ static void __init pci_scan_each_controller_bus(void)
316 p->scan_bus(p); 310 p->scan_bus(p);
317} 311}
318 312
319/* Reorder the pci_dev chain, so that onboard devices come first
320 * and then come the pluggable cards.
321 */
322static void __init pci_reorder_devs(void)
323{
324 struct list_head *pci_onboard = &pci_devices;
325 struct list_head *walk = pci_onboard->next;
326
327 while (walk != pci_onboard) {
328 struct pci_dev *pdev = pci_dev_g(walk);
329 struct list_head *walk_next = walk->next;
330
331 if (pdev->irq && (__irq_ino(pdev->irq) & 0x20)) {
332 list_del(walk);
333 list_add(walk, pci_onboard);
334 }
335
336 walk = walk_next;
337 }
338}
339
340extern void clock_probe(void); 313extern void clock_probe(void);
341extern void power_init(void); 314extern void power_init(void);
342 315
@@ -348,9 +321,6 @@ static int __init pcibios_init(void)
348 321
349 pci_scan_each_controller_bus(); 322 pci_scan_each_controller_bus();
350 323
351 if (pci_device_reorder)
352 pci_reorder_devs();
353
354 isa_init(); 324 isa_init();
355 ebus_init(); 325 ebus_init();
356 clock_probe(); 326 clock_probe();
@@ -441,14 +411,6 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
441 411
442char * __init pcibios_setup(char *str) 412char * __init pcibios_setup(char *str)
443{ 413{
444 if (!strcmp(str, "onboardfirst")) {
445 pci_device_reorder = 1;
446 return NULL;
447 }
448 if (!strcmp(str, "noreorder")) {
449 pci_device_reorder = 0;
450 return NULL;
451 }
452 return str; 414 return str;
453} 415}
454 416
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index f2d1097f541..24db22aa972 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -308,7 +308,7 @@ static unsigned int psycho_irq_build(struct pci_pbm_info *pbm,
308 if ((ino & 0x20) == 0) 308 if ((ino & 0x20) == 0)
309 inofixup = ino & 0x03; 309 inofixup = ino & 0x03;
310 310
311 return build_irq(inofixup, iclr, imap, IBF_PCI); 311 return build_irq(inofixup, iclr, imap);
312} 312}
313 313
314/* PSYCHO error handling support. */ 314/* PSYCHO error handling support. */
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 846c1205aa9..b7d997b55f0 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -530,7 +530,7 @@ static unsigned long __onboard_imap_off[] = {
530 * side of the non-APB bridge, then perform a read of Sabre's DMA 530 * side of the non-APB bridge, then perform a read of Sabre's DMA
531 * write-sync register. 531 * write-sync register.
532 */ 532 */
533static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) 533static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
534{ 534{
535 struct pci_dev *pdev = _arg1; 535 struct pci_dev *pdev = _arg1;
536 unsigned long sync_reg = (unsigned long) _arg2; 536 unsigned long sync_reg = (unsigned long) _arg2;
@@ -573,7 +573,7 @@ static unsigned int sabre_irq_build(struct pci_pbm_info *pbm,
573 if ((ino & 0x20) == 0) 573 if ((ino & 0x20) == 0)
574 inofixup = ino & 0x03; 574 inofixup = ino & 0x03;
575 575
576 virt_irq = build_irq(inofixup, iclr, imap, IBF_PCI); 576 virt_irq = build_irq(inofixup, iclr, imap);
577 577
578 if (pdev) { 578 if (pdev) {
579 struct pcidev_cookie *pcp = pdev->sysdata; 579 struct pcidev_cookie *pcp = pdev->sysdata;
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 0c400b5fa5b..cc662e915d3 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -232,10 +232,10 @@ static unsigned long schizo_iclr_offset(unsigned long ino)
232 return SCHIZO_ICLR_BASE + (ino * 8UL); 232 return SCHIZO_ICLR_BASE + (ino * 8UL);
233} 233}
234 234
235static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2) 235static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
236{ 236{
237 unsigned long sync_reg = (unsigned long) _arg2; 237 unsigned long sync_reg = (unsigned long) _arg2;
238 u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO); 238 u64 mask = 1UL << (ino & IMAP_INO);
239 u64 val; 239 u64 val;
240 int limit; 240 int limit;
241 241
@@ -313,7 +313,7 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
313 ign_fixup = (1 << 6); 313 ign_fixup = (1 << 6);
314 } 314 }
315 315
316 virt_irq = build_irq(ign_fixup, iclr, imap, IBF_PCI); 316 virt_irq = build_irq(ign_fixup, iclr, imap);
317 317
318 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) { 318 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
319 irq_install_pre_handler(virt_irq, 319 irq_install_pre_handler(virt_irq,
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index b97c81ba883..5419480edf4 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -844,7 +844,7 @@ static unsigned int pci_sun4v_irq_build(struct pci_pbm_info *pbm,
844{ 844{
845 u32 devhandle = pbm->devhandle; 845 u32 devhandle = pbm->devhandle;
846 846
847 return sun4v_build_irq(devhandle, devino, IBF_PCI); 847 return sun4v_build_irq(devhandle, devino);
848} 848}
849 849
850static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource) 850static void pci_sun4v_base_address_update(struct pci_dev *pdev, int resource)
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 5544cf5d38b..8812417247d 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -821,7 +821,7 @@ unsigned int sbus_build_irq(void *buscookie, unsigned int ino)
821 821
822 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 822 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
823 } 823 }
824 return build_irq(sbus_level, iclr, imap, 0); 824 return build_irq(sbus_level, iclr, imap);
825} 825}
826 826
827/* Error interrupt handling. */ 827/* Error interrupt handling. */
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 38e569f786d..af72bf5c3db 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -175,10 +175,6 @@ EXPORT_SYMBOL(set_bit);
175EXPORT_SYMBOL(clear_bit); 175EXPORT_SYMBOL(clear_bit);
176EXPORT_SYMBOL(change_bit); 176EXPORT_SYMBOL(change_bit);
177 177
178EXPORT_SYMBOL(ivector_table);
179EXPORT_SYMBOL(enable_irq);
180EXPORT_SYMBOL(disable_irq);
181
182EXPORT_SYMBOL(__flushw_user); 178EXPORT_SYMBOL(__flushw_user);
183 179
184EXPORT_SYMBOL(tlb_type); 180EXPORT_SYMBOL(tlb_type);
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S
index f70e4774649..49703c3c576 100644
--- a/arch/sparc64/kernel/sun4v_ivec.S
+++ b/arch/sparc64/kernel/sun4v_ivec.S
@@ -103,7 +103,7 @@ sun4v_dev_mondo:
103 103
104 /* Get &ivector_table[IVEC] into %g4. */ 104 /* Get &ivector_table[IVEC] into %g4. */
105 sethi %hi(ivector_table), %g4 105 sethi %hi(ivector_table), %g4
106 sllx %g3, 5, %g3 106 sllx %g3, 3, %g3
107 or %g4, %lo(ivector_table), %g4 107 or %g4, %lo(ivector_table), %g4
108 add %g4, %g3, %g4 108 add %g4, %g3, %g4
109 109
diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h
index f0cf71376ec..7c29fd1a87a 100644
--- a/include/asm-sparc64/hardirq.h
+++ b/include/asm-sparc64/hardirq.h
@@ -12,6 +12,8 @@
12#define local_softirq_pending() \ 12#define local_softirq_pending() \
13 (local_cpu_data().__softirq_pending) 13 (local_cpu_data().__softirq_pending)
14 14
15void ack_bad_irq(unsigned int irq);
16
15#define HARDIRQ_BITS 8 17#define HARDIRQ_BITS 8
16 18
17#endif /* !(__SPARC64_HARDIRQ_H) */ 19#endif /* !(__SPARC64_HARDIRQ_H) */
diff --git a/include/asm-sparc64/hw_irq.h b/include/asm-sparc64/hw_irq.h
index 153cae2ddae..599b3b07345 100644
--- a/include/asm-sparc64/hw_irq.h
+++ b/include/asm-sparc64/hw_irq.h
@@ -1,6 +1,6 @@
1#ifndef __ASM_SPARC64_HW_IRQ_H 1#ifndef __ASM_SPARC64_HW_IRQ_H
2#define __ASM_SPARC64_HW_IRQ_H 2#define __ASM_SPARC64_HW_IRQ_H
3 3
4/* Dummy include. */ 4extern void hw_resend_irq(struct hw_interrupt_type *handler, unsigned int virt_irq);
5 5
6#endif 6#endif
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 9edcd90495f..77a4f63cba7 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -16,58 +16,6 @@
16#include <asm/pil.h> 16#include <asm/pil.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18 18
19struct ino_bucket;
20
21#define MAX_IRQ_DESC_ACTION 4
22
23struct irq_desc {
24 void (*pre_handler)(struct ino_bucket *, void *, void *);
25 void *pre_handler_arg1;
26 void *pre_handler_arg2;
27 u32 action_active_mask;
28 struct irqaction action[MAX_IRQ_DESC_ACTION];
29};
30
31/* You should not mess with this directly. That's the job of irq.c.
32 *
33 * If you make changes here, please update hand coded assembler of
34 * the vectored interrupt trap handler in entry.S -DaveM
35 *
36 * This is currently one DCACHE line, two buckets per L2 cache
37 * line. Keep this in mind please.
38 */
39struct ino_bucket {
40 /* Next handler in per-CPU IRQ worklist. We know that
41 * bucket pointers have the high 32-bits clear, so to
42 * save space we only store the bits we need.
43 */
44/*0x00*/unsigned int irq_chain;
45
46 /* Virtual interrupt number assigned to this INO. */
47/*0x04*/unsigned char virt_irq;
48
49 /* If an IVEC arrives while irq_info is NULL, we
50 * set this to notify request_irq() about the event.
51 */
52/*0x05*/unsigned char pending;
53
54 /* Miscellaneous flags. */
55/*0x06*/unsigned char flags;
56
57 /* Currently unused. */
58/*0x07*/unsigned char __pad;
59
60 /* Reference to IRQ descriptor for this bucket. */
61/*0x08*/struct irq_desc *irq_info;
62
63 /* Sun5 Interrupt Clear Register. */
64/*0x10*/unsigned long iclr;
65
66 /* Sun5 Interrupt Mapping Register. */
67/*0x18*/unsigned long imap;
68
69};
70
71/* IMAP/ICLR register defines */ 19/* IMAP/ICLR register defines */
72#define IMAP_VALID 0x80000000 /* IRQ Enabled */ 20#define IMAP_VALID 0x80000000 /* IRQ Enabled */
73#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ 21#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
@@ -85,19 +33,6 @@ struct ino_bucket {
85#define ICLR_TRANSMIT 0x00000001 /* Transmit state */ 33#define ICLR_TRANSMIT 0x00000001 /* Transmit state */
86#define ICLR_PENDING 0x00000003 /* Pending state */ 34#define ICLR_PENDING 0x00000003 /* Pending state */
87 35
88/* Only 8-bits are available, be careful. -DaveM */
89#define IBF_PCI 0x02 /* PSYCHO/SABRE/SCHIZO PCI interrupt. */
90#define IBF_ACTIVE 0x04 /* Interrupt is active and has a handler.*/
91#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
92
93#define NUM_IVECS (IMAP_INR + 1)
94extern struct ino_bucket ivector_table[NUM_IVECS];
95
96#define __irq_ino(irq) \
97 (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
98#define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
99#define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
100
101/* The largest number of unique interrupt sources we support. 36/* The largest number of unique interrupt sources we support.
102 * If this needs to ever be larger than 255, you need to change 37 * If this needs to ever be larger than 255, you need to change
103 * the type of ino_bucket->virt_irq as appropriate. 38 * the type of ino_bucket->virt_irq as appropriate.
@@ -107,14 +42,11 @@ extern struct ino_bucket ivector_table[NUM_IVECS];
107#define NR_IRQS 255 42#define NR_IRQS 255
108 43
109extern void irq_install_pre_handler(int virt_irq, 44extern void irq_install_pre_handler(int virt_irq,
110 void (*func)(struct ino_bucket *, void *, void *), 45 void (*func)(unsigned int, void *, void *),
111 void *arg1, void *arg2); 46 void *arg1, void *arg2);
112#define irq_canonicalize(irq) (irq) 47#define irq_canonicalize(irq) (irq)
113extern void disable_irq(unsigned int); 48extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
114#define disable_irq_nosync disable_irq 49extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
115extern void enable_irq(unsigned int);
116extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap, unsigned char flags);
117extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino, unsigned char flags);
118extern unsigned int sbus_build_irq(void *sbus, unsigned int ino); 50extern unsigned int sbus_build_irq(void *sbus, unsigned int ino);
119 51
120static __inline__ void set_softint(unsigned long bits) 52static __inline__ void set_softint(unsigned long bits)
@@ -140,8 +72,4 @@ static __inline__ unsigned long get_softint(void)
140 return retval; 72 return retval;
141} 73}
142 74
143struct irqaction;
144struct pt_regs;
145int handle_IRQ_event(unsigned int, struct pt_regs *, struct irqaction *);
146
147#endif 75#endif