aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/Kconfig18
-rw-r--r--arch/sparc64/kernel/entry.S21
-rw-r--r--arch/sparc64/kernel/irq.c581
-rw-r--r--arch/sparc64/kernel/pci_psycho.c3
-rw-r--r--arch/sparc64/kernel/pci_sabre.c46
-rw-r--r--arch/sparc64/kernel/pci_schizo.c78
-rw-r--r--arch/sparc64/kernel/time.c2
-rw-r--r--drivers/sbus/char/bpp.c20
-rw-r--r--include/asm-sparc64/irq.h49
-rw-r--r--include/asm-sparc64/pbm.h3
-rw-r--r--include/asm-sparc64/signal.h15
-rw-r--r--include/linux/compat_ioctl.h19
12 files changed, 349 insertions, 506 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index e2b050eb3b96..d78bc13ebbb9 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -444,6 +444,24 @@ config PRINTER
444 If you have more than 8 printers, you need to increase the LP_NO 444 If you have more than 8 printers, you need to increase the LP_NO
445 macro in lp.c and the PARPORT_MAX macro in parport.h. 445 macro in lp.c and the PARPORT_MAX macro in parport.h.
446 446
447config PPDEV
448 tristate "Support for user-space parallel port device drivers"
449 depends on PARPORT
450 ---help---
451 Saying Y to this adds support for /dev/parport device nodes. This
452 is needed for programs that want portable access to the parallel
453 port, for instance deviceid (which displays Plug-and-Play device
454 IDs).
455
456 This is the parallel port equivalent of SCSI generic support (sg).
457 It is safe to say N to this -- it is not needed for normal printing
458 or parallel port CD-ROM/disk support.
459
460 To compile this driver as a module, choose M here: the
461 module will be called ppdev.
462
463 If unsure, say N.
464
447config ENVCTRL 465config ENVCTRL
448 tristate "SUNW, envctrl support" 466 tristate "SUNW, envctrl support"
449 depends on PCI 467 depends on PCI
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index eee516a71c14..d3973d8a7195 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -553,13 +553,11 @@ do_ivec:
553 sllx %g3, 5, %g3 553 sllx %g3, 5, %g3
554 or %g2, %lo(ivector_table), %g2 554 or %g2, %lo(ivector_table), %g2
555 add %g2, %g3, %g3 555 add %g2, %g3, %g3
556 ldx [%g3 + 0x08], %g2 /* irq_info */
557 ldub [%g3 + 0x04], %g4 /* pil */ 556 ldub [%g3 + 0x04], %g4 /* pil */
558 brz,pn %g2, do_ivec_spurious 557 mov 1, %g2
559 mov 1, %g2
560
561 sllx %g2, %g4, %g2 558 sllx %g2, %g4, %g2
562 sllx %g4, 2, %g4 559 sllx %g4, 2, %g4
560
563 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ 561 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
564 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ 562 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
565 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ 563 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
@@ -567,9 +565,9 @@ do_ivec:
567 retry 565 retry
568do_ivec_xcall: 566do_ivec_xcall:
569 mov 0x50, %g1 567 mov 0x50, %g1
570
571 ldxa [%g1 + %g0] ASI_INTR_R, %g1 568 ldxa [%g1 + %g0] ASI_INTR_R, %g1
572 srl %g3, 0, %g3 569 srl %g3, 0, %g3
570
573 mov 0x60, %g7 571 mov 0x60, %g7
574 ldxa [%g7 + %g0] ASI_INTR_R, %g7 572 ldxa [%g7 + %g0] ASI_INTR_R, %g7
575 stxa %g0, [%g0] ASI_INTR_RECEIVE 573 stxa %g0, [%g0] ASI_INTR_RECEIVE
@@ -581,19 +579,6 @@ do_ivec_xcall:
5811: jmpl %g3, %g0 5791: jmpl %g3, %g0
582 nop 580 nop
583 581
584do_ivec_spurious:
585 stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */
586 rdpr %pstate, %g5
587
588 wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
589 sethi %hi(109f), %g7
590 ba,pt %xcc, etrap
591109: or %g7, %lo(109b), %g7
592 call catch_disabled_ivec
593 add %sp, PTREGS_OFF, %o0
594 ba,pt %xcc, rtrap
595 clr %l6
596
597 .globl save_alternate_globals 582 .globl save_alternate_globals
598save_alternate_globals: /* %o0 = save_area */ 583save_alternate_globals: /* %o0 = save_area */
599 rdpr %pstate, %o5 584 rdpr %pstate, %o5
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 424712577307..74a2e0808cbc 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -71,31 +71,7 @@ struct irq_work_struct {
71struct irq_work_struct __irq_work[NR_CPUS]; 71struct irq_work_struct __irq_work[NR_CPUS];
72#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) 72#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
73 73
74#ifdef CONFIG_PCI 74static struct irqaction *irq_action[NR_IRQS+1];
75/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77 * for devices behind busses other than APB on Sabre systems.
78 *
79 * Currently these physical addresses are just config space accesses
80 * to the command register for that device.
81 */
82unsigned long pci_dma_wsync;
83unsigned long dma_sync_reg_table[256];
84unsigned char dma_sync_reg_table_entry = 0;
85#endif
86
87/* This is based upon code in the 32-bit Sparc kernel written mostly by
88 * David Redman (djhr@tadpole.co.uk).
89 */
90#define MAX_STATIC_ALLOC 4
91static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92static int static_irq_count;
93
94/* This is exported so that fast IRQ handlers can get at it... -DaveM */
95struct irqaction *irq_action[NR_IRQS+1] = {
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
98};
99 75
100/* This only synchronizes entities which modify IRQ handler 76/* This only synchronizes entities which modify IRQ handler
101 * state and some selected user-level spots that want to 77 * state and some selected user-level spots that want to
@@ -241,17 +217,22 @@ void disable_irq(unsigned int irq)
241 * the CPU %tick register and not by some normal vectored interrupt 217 * the CPU %tick register and not by some normal vectored interrupt
242 * source. To handle this special case, we use this dummy INO bucket. 218 * source. To handle this special case, we use this dummy INO bucket.
243 */ 219 */
220static struct irq_desc pil0_dummy_desc;
244static struct ino_bucket pil0_dummy_bucket = { 221static struct ino_bucket pil0_dummy_bucket = {
245 0, /* irq_chain */ 222 .irq_info = &pil0_dummy_desc,
246 0, /* pil */
247 0, /* pending */
248 0, /* flags */
249 0, /* __unused */
250 NULL, /* irq_info */
251 0UL, /* iclr */
252 0UL, /* imap */
253}; 223};
254 224
225static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
226 unsigned long iclr, unsigned long imap,
227 struct ino_bucket *bucket)
228{
229 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "
230 "(%d:%d:%016lx:%016lx), halting...\n",
231 ino, bucket->pil, bucket->iclr, bucket->imap,
232 pil, inofixup, iclr, imap);
233 prom_halt();
234}
235
255unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) 236unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
256{ 237{
257 struct ino_bucket *bucket; 238 struct ino_bucket *bucket;
@@ -280,28 +261,35 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
280 prom_halt(); 261 prom_halt();
281 } 262 }
282 263
283 /* Ok, looks good, set it up. Don't touch the irq_chain or
284 * the pending flag.
285 */
286 bucket = &ivector_table[ino]; 264 bucket = &ivector_table[ino];
287 if ((bucket->flags & IBF_ACTIVE) || 265 if (bucket->flags & IBF_ACTIVE)
288 (bucket->irq_info != NULL)) { 266 build_irq_error("IRQ: Trying to build active INO bucket.\n",
289 /* This is a gross fatal error if it happens here. */ 267 ino, pil, inofixup, iclr, imap, bucket);
290 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); 268
291 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", 269 if (bucket->irq_info) {
292 ino, pil, inofixup, iclr, imap); 270 if (bucket->imap != imap || bucket->iclr != iclr)
293 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", 271 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
294 bucket->pil, bucket->iclr, bucket->imap); 272 ino, pil, inofixup, iclr, imap, bucket);
295 prom_printf("IRQ: Cannot continue, halting...\n"); 273
274 goto out;
275 }
276
277 bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
278 if (!bucket->irq_info) {
279 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
296 prom_halt(); 280 prom_halt();
297 } 281 }
282 memset(bucket->irq_info, 0, sizeof(struct irq_desc));
283
284 /* Ok, looks good, set it up. Don't touch the irq_chain or
285 * the pending flag.
286 */
298 bucket->imap = imap; 287 bucket->imap = imap;
299 bucket->iclr = iclr; 288 bucket->iclr = iclr;
300 bucket->pil = pil; 289 bucket->pil = pil;
301 bucket->flags = 0; 290 bucket->flags = 0;
302 291
303 bucket->irq_info = NULL; 292out:
304
305 return __irq(bucket); 293 return __irq(bucket);
306} 294}
307 295
@@ -319,26 +307,65 @@ static void atomic_bucket_insert(struct ino_bucket *bucket)
319 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); 307 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
320} 308}
321 309
310static int check_irq_sharing(int pil, unsigned long irqflags)
311{
312 struct irqaction *action, *tmp;
313
314 action = *(irq_action + pil);
315 if (action) {
316 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
317 for (tmp = action; tmp->next; tmp = tmp->next)
318 ;
319 } else {
320 return -EBUSY;
321 }
322 }
323 return 0;
324}
325
326static void append_irq_action(int pil, struct irqaction *action)
327{
328 struct irqaction **pp = irq_action + pil;
329
330 while (*pp)
331 pp = &((*pp)->next);
332 *pp = action;
333}
334
335static struct irqaction *get_action_slot(struct ino_bucket *bucket)
336{
337 struct irq_desc *desc = bucket->irq_info;
338 int max_irq, i;
339
340 max_irq = 1;
341 if (bucket->flags & IBF_PCI)
342 max_irq = MAX_IRQ_DESC_ACTION;
343 for (i = 0; i < max_irq; i++) {
344 struct irqaction *p = &desc->action[i];
345 u32 mask = (1 << i);
346
347 if (desc->action_active_mask & mask)
348 continue;
349
350 desc->action_active_mask |= mask;
351 return p;
352 }
353 return NULL;
354}
355
322int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 356int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
323 unsigned long irqflags, const char *name, void *dev_id) 357 unsigned long irqflags, const char *name, void *dev_id)
324{ 358{
325 struct irqaction *action, *tmp = NULL; 359 struct irqaction *action;
326 struct ino_bucket *bucket = __bucket(irq); 360 struct ino_bucket *bucket = __bucket(irq);
327 unsigned long flags; 361 unsigned long flags;
328 int pending = 0; 362 int pending = 0;
329 363
330 if ((bucket != &pil0_dummy_bucket) && 364 if (unlikely(!handler))
331 (bucket < &ivector_table[0] ||
332 bucket >= &ivector_table[NUM_IVECS])) {
333 unsigned int *caller;
334
335 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
336 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
337 "from %p, irq %08x.\n", caller, irq);
338 return -EINVAL; 365 return -EINVAL;
339 } 366
340 if (!handler) 367 if (unlikely(!bucket->irq_info))
341 return -EINVAL; 368 return -ENODEV;
342 369
343 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { 370 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
344 /* 371 /*
@@ -356,93 +383,20 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
356 383
357 spin_lock_irqsave(&irq_action_lock, flags); 384 spin_lock_irqsave(&irq_action_lock, flags);
358 385
359 action = *(bucket->pil + irq_action); 386 if (check_irq_sharing(bucket->pil, irqflags)) {
360 if (action) { 387 spin_unlock_irqrestore(&irq_action_lock, flags);
361 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) 388 return -EBUSY;
362 for (tmp = action; tmp->next; tmp = tmp->next)
363 ;
364 else {
365 spin_unlock_irqrestore(&irq_action_lock, flags);
366 return -EBUSY;
367 }
368 action = NULL; /* Or else! */
369 } 389 }
370 390
371 /* If this is flagged as statically allocated then we use our 391 action = get_action_slot(bucket);
372 * private struct which is never freed.
373 */
374 if (irqflags & SA_STATIC_ALLOC) {
375 if (static_irq_count < MAX_STATIC_ALLOC)
376 action = &static_irqaction[static_irq_count++];
377 else
378 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
379 "using kmalloc\n", irq, name);
380 }
381 if (action == NULL)
382 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
383 GFP_ATOMIC);
384
385 if (!action) { 392 if (!action) {
386 spin_unlock_irqrestore(&irq_action_lock, flags); 393 spin_unlock_irqrestore(&irq_action_lock, flags);
387 return -ENOMEM; 394 return -ENOMEM;
388 } 395 }
389 396
390 if (bucket == &pil0_dummy_bucket) { 397 bucket->flags |= IBF_ACTIVE;
391 bucket->irq_info = action; 398 pending = 0;
392 bucket->flags |= IBF_ACTIVE; 399 if (bucket != &pil0_dummy_bucket) {
393 } else {
394 if ((bucket->flags & IBF_ACTIVE) != 0) {
395 void *orig = bucket->irq_info;
396 void **vector = NULL;
397
398 if ((bucket->flags & IBF_PCI) == 0) {
399 printk("IRQ: Trying to share non-PCI bucket.\n");
400 goto free_and_ebusy;
401 }
402 if ((bucket->flags & IBF_MULTI) == 0) {
403 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
404 if (vector == NULL)
405 goto free_and_enomem;
406
407 /* We might have slept. */
408 if ((bucket->flags & IBF_MULTI) != 0) {
409 int ent;
410
411 kfree(vector);
412 vector = (void **)bucket->irq_info;
413 for(ent = 0; ent < 4; ent++) {
414 if (vector[ent] == NULL) {
415 vector[ent] = action;
416 break;
417 }
418 }
419 if (ent == 4)
420 goto free_and_ebusy;
421 } else {
422 vector[0] = orig;
423 vector[1] = action;
424 vector[2] = NULL;
425 vector[3] = NULL;
426 bucket->irq_info = vector;
427 bucket->flags |= IBF_MULTI;
428 }
429 } else {
430 int ent;
431
432 vector = (void **)orig;
433 for (ent = 0; ent < 4; ent++) {
434 if (vector[ent] == NULL) {
435 vector[ent] = action;
436 break;
437 }
438 }
439 if (ent == 4)
440 goto free_and_ebusy;
441 }
442 } else {
443 bucket->irq_info = action;
444 bucket->flags |= IBF_ACTIVE;
445 }
446 pending = bucket->pending; 400 pending = bucket->pending;
447 if (pending) 401 if (pending)
448 bucket->pending = 0; 402 bucket->pending = 0;
@@ -456,10 +410,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
456 put_ino_in_irqaction(action, irq); 410 put_ino_in_irqaction(action, irq);
457 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 411 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
458 412
459 if (tmp) 413 append_irq_action(bucket->pil, action);
460 tmp->next = action;
461 else
462 *(bucket->pil + irq_action) = action;
463 414
464 enable_irq(irq); 415 enable_irq(irq);
465 416
@@ -468,147 +419,103 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
468 atomic_bucket_insert(bucket); 419 atomic_bucket_insert(bucket);
469 set_softint(1 << bucket->pil); 420 set_softint(1 << bucket->pil);
470 } 421 }
422
471 spin_unlock_irqrestore(&irq_action_lock, flags); 423 spin_unlock_irqrestore(&irq_action_lock, flags);
472 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) 424
425 if (bucket != &pil0_dummy_bucket)
473 register_irq_proc(__irq_ino(irq)); 426 register_irq_proc(__irq_ino(irq));
474 427
475#ifdef CONFIG_SMP 428#ifdef CONFIG_SMP
476 distribute_irqs(); 429 distribute_irqs();
477#endif 430#endif
478 return 0; 431 return 0;
479
480free_and_ebusy:
481 kfree(action);
482 spin_unlock_irqrestore(&irq_action_lock, flags);
483 return -EBUSY;
484
485free_and_enomem:
486 kfree(action);
487 spin_unlock_irqrestore(&irq_action_lock, flags);
488 return -ENOMEM;
489} 432}
490 433
491EXPORT_SYMBOL(request_irq); 434EXPORT_SYMBOL(request_irq);
492 435
493void free_irq(unsigned int irq, void *dev_id) 436static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
494{ 437{
495 struct irqaction *action; 438 struct ino_bucket *bucket = __bucket(irq);
496 struct irqaction *tmp = NULL; 439 struct irqaction *action, **pp;
497 unsigned long flags;
498 struct ino_bucket *bucket = __bucket(irq), *bp;
499
500 if ((bucket != &pil0_dummy_bucket) &&
501 (bucket < &ivector_table[0] ||
502 bucket >= &ivector_table[NUM_IVECS])) {
503 unsigned int *caller;
504 440
505 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 441 pp = irq_action + bucket->pil;
506 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " 442 action = *pp;
507 "from %p, irq %08x.\n", caller, irq); 443 if (unlikely(!action))
508 return; 444 return NULL;
509 }
510
511 spin_lock_irqsave(&irq_action_lock, flags);
512 445
513 action = *(bucket->pil + irq_action); 446 if (unlikely(!action->handler)) {
514 if (!action->handler) {
515 printk("Freeing free IRQ %d\n", bucket->pil); 447 printk("Freeing free IRQ %d\n", bucket->pil);
516 return; 448 return NULL;
517 }
518 if (dev_id) {
519 for ( ; action; action = action->next) {
520 if (action->dev_id == dev_id)
521 break;
522 tmp = action;
523 }
524 if (!action) {
525 printk("Trying to free free shared IRQ %d\n", bucket->pil);
526 spin_unlock_irqrestore(&irq_action_lock, flags);
527 return;
528 }
529 } else if (action->flags & SA_SHIRQ) {
530 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
531 spin_unlock_irqrestore(&irq_action_lock, flags);
532 return;
533 } 449 }
534 450
535 if (action->flags & SA_STATIC_ALLOC) { 451 while (action && action->dev_id != dev_id) {
536 printk("Attempt to free statically allocated IRQ %d (%s)\n", 452 pp = &action->next;
537 bucket->pil, action->name); 453 action = *pp;
538 spin_unlock_irqrestore(&irq_action_lock, flags);
539 return;
540 } 454 }
541 455
542 if (action && tmp) 456 if (likely(action))
543 tmp->next = action->next; 457 *pp = action->next;
544 else 458
545 *(bucket->pil + irq_action) = action->next; 459 return action;
460}
461
462void free_irq(unsigned int irq, void *dev_id)
463{
464 struct irqaction *action;
465 struct ino_bucket *bucket;
466 unsigned long flags;
467
468 spin_lock_irqsave(&irq_action_lock, flags);
469
470 action = unlink_irq_action(irq, dev_id);
546 471
547 spin_unlock_irqrestore(&irq_action_lock, flags); 472 spin_unlock_irqrestore(&irq_action_lock, flags);
548 473
474 if (unlikely(!action))
475 return;
476
549 synchronize_irq(irq); 477 synchronize_irq(irq);
550 478
551 spin_lock_irqsave(&irq_action_lock, flags); 479 spin_lock_irqsave(&irq_action_lock, flags);
552 480
481 bucket = __bucket(irq);
553 if (bucket != &pil0_dummy_bucket) { 482 if (bucket != &pil0_dummy_bucket) {
483 struct irq_desc *desc = bucket->irq_info;
554 unsigned long imap = bucket->imap; 484 unsigned long imap = bucket->imap;
555 void **vector, *orig; 485 int ent, i;
556 int ent;
557
558 orig = bucket->irq_info;
559 vector = (void **)orig;
560
561 if ((bucket->flags & IBF_MULTI) != 0) {
562 int other = 0;
563 void *orphan = NULL;
564 for (ent = 0; ent < 4; ent++) {
565 if (vector[ent] == action)
566 vector[ent] = NULL;
567 else if (vector[ent] != NULL) {
568 orphan = vector[ent];
569 other++;
570 }
571 }
572 486
573 /* Only free when no other shared irq 487 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
574 * uses this bucket. 488 struct irqaction *p = &desc->action[i];
575 */ 489
576 if (other) { 490 if (p == action) {
577 if (other == 1) { 491 desc->action_active_mask &= ~(1 << i);
578 /* Convert back to non-shared bucket. */ 492 break;
579 bucket->irq_info = orphan;
580 bucket->flags &= ~(IBF_MULTI);
581 kfree(vector);
582 }
583 goto out;
584 } 493 }
585 } else {
586 bucket->irq_info = NULL;
587 } 494 }
588 495
589 /* This unique interrupt source is now inactive. */ 496 if (!desc->action_active_mask) {
590 bucket->flags &= ~IBF_ACTIVE; 497 /* This unique interrupt source is now inactive. */
498 bucket->flags &= ~IBF_ACTIVE;
591 499
592 /* See if any other buckets share this bucket's IMAP 500 /* See if any other buckets share this bucket's IMAP
593 * and are still active. 501 * and are still active.
594 */ 502 */
595 for (ent = 0; ent < NUM_IVECS; ent++) { 503 for (ent = 0; ent < NUM_IVECS; ent++) {
596 bp = &ivector_table[ent]; 504 struct ino_bucket *bp = &ivector_table[ent];
597 if (bp != bucket && 505 if (bp != bucket &&
598 bp->imap == imap && 506 bp->imap == imap &&
599 (bp->flags & IBF_ACTIVE) != 0) 507 (bp->flags & IBF_ACTIVE) != 0)
600 break; 508 break;
601 } 509 }
602 510
603 /* Only disable when no other sub-irq levels of 511 /* Only disable when no other sub-irq levels of
604 * the same IMAP are active. 512 * the same IMAP are active.
605 */ 513 */
606 if (ent == NUM_IVECS) 514 if (ent == NUM_IVECS)
607 disable_irq(irq); 515 disable_irq(irq);
516 }
608 } 517 }
609 518
610out:
611 kfree(action);
612 spin_unlock_irqrestore(&irq_action_lock, flags); 519 spin_unlock_irqrestore(&irq_action_lock, flags);
613} 520}
614 521
@@ -647,99 +554,55 @@ void synchronize_irq(unsigned int irq)
647} 554}
648#endif /* CONFIG_SMP */ 555#endif /* CONFIG_SMP */
649 556
650void catch_disabled_ivec(struct pt_regs *regs) 557static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
651{
652 int cpu = smp_processor_id();
653 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
654
655 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
656 * to other devices. Here a single IMAP enabled potentially multiple
657 * unique interrupt sources (which each do have a unique ICLR register.
658 *
659 * So what we do is just register that the IVEC arrived, when registered
660 * for real the request_irq() code will check the bit and signal
661 * a local CPU interrupt for it.
662 */
663#if 0
664 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
665 bucket - &ivector_table[0], regs->tpc);
666#endif
667 *irq_work(cpu, 0) = 0;
668 bucket->pending = 1;
669}
670
671/* Tune this... */
672#define FORWARD_VOLUME 12
673
674#ifdef CONFIG_SMP
675
676static inline void redirect_intr(int cpu, struct ino_bucket *bp)
677{ 558{
678 /* Ok, here is what is going on: 559 struct irq_desc *desc = bp->irq_info;
679 * 1) Retargeting IRQs on Starfire is very 560 unsigned char flags = bp->flags;
680 * expensive so just forget about it on them. 561 u32 action_mask, i;
681 * 2) Moving around very high priority interrupts 562 int random;
682 * is a losing game.
683 * 3) If the current cpu is idle, interrupts are
684 * useful work, so keep them here. But do not
685 * pass to our neighbour if he is not very idle.
686 * 4) If sysadmin explicitly asks for directed intrs,
687 * Just Do It.
688 */
689 struct irqaction *ap = bp->irq_info;
690 cpumask_t cpu_mask;
691 unsigned int buddy, ticks;
692 563
693 cpu_mask = get_smpaff_in_irqaction(ap); 564 bp->flags |= IBF_INPROGRESS;
694 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
695 if (cpus_empty(cpu_mask))
696 cpu_mask = cpu_online_map;
697 565
698 if (this_is_starfire != 0 || 566 if (unlikely(!(flags & IBF_ACTIVE))) {
699 bp->pil >= 10 || current->pid == 0) 567 bp->pending = 1;
700 goto out; 568 goto out;
701
702 /* 'cpu' is the MID (ie. UPAID), calculate the MID
703 * of our buddy.
704 */
705 buddy = cpu + 1;
706 if (buddy >= NR_CPUS)
707 buddy = 0;
708
709 ticks = 0;
710 while (!cpu_isset(buddy, cpu_mask)) {
711 if (++buddy >= NR_CPUS)
712 buddy = 0;
713 if (++ticks > NR_CPUS) {
714 put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
715 goto out;
716 }
717 } 569 }
718 570
719 if (buddy == cpu) 571 if (desc->pre_handler)
720 goto out; 572 desc->pre_handler(bp,
573 desc->pre_handler_arg1,
574 desc->pre_handler_arg2);
721 575
722 /* Voo-doo programming. */ 576 action_mask = desc->action_active_mask;
723 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) 577 random = 0;
724 goto out; 578 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
579 struct irqaction *p = &desc->action[i];
580 u32 mask = (1 << i);
725 581
726 /* This just so happens to be correct on Cheetah 582 if (!(action_mask & mask))
727 * at the moment. 583 continue;
728 */ 584
729 buddy <<= 26; 585 action_mask &= ~mask;
730 586
731 /* Push it to our buddy. */ 587 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
732 upa_writel(buddy | IMAP_VALID, bp->imap); 588 random |= p->flags;
733 589
590 if (!action_mask)
591 break;
592 }
593 if (bp->pil != 0) {
594 upa_writel(ICLR_IDLE, bp->iclr);
595 /* Test and add entropy */
596 if (random & SA_SAMPLE_RANDOM)
597 add_interrupt_randomness(irq);
598 }
734out: 599out:
735 return; 600 bp->flags &= ~IBF_INPROGRESS;
736} 601}
737 602
738#endif
739
740void handler_irq(int irq, struct pt_regs *regs) 603void handler_irq(int irq, struct pt_regs *regs)
741{ 604{
742 struct ino_bucket *bp, *nbp; 605 struct ino_bucket *bp;
743 int cpu = smp_processor_id(); 606 int cpu = smp_processor_id();
744 607
745#ifndef CONFIG_SMP 608#ifndef CONFIG_SMP
@@ -757,8 +620,6 @@ void handler_irq(int irq, struct pt_regs *regs)
757 clear_softint(clr_mask); 620 clear_softint(clr_mask);
758 } 621 }
759#else 622#else
760 int should_forward = 0;
761
762 clear_softint(1 << irq); 623 clear_softint(1 << irq);
763#endif 624#endif
764 625
@@ -773,63 +634,12 @@ void handler_irq(int irq, struct pt_regs *regs)
773#else 634#else
774 bp = __bucket(xchg32(irq_work(cpu, irq), 0)); 635 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
775#endif 636#endif
776 for ( ; bp != NULL; bp = nbp) { 637 while (bp) {
777 unsigned char flags = bp->flags; 638 struct ino_bucket *nbp = __bucket(bp->irq_chain);
778 unsigned char random = 0;
779 639
780 nbp = __bucket(bp->irq_chain);
781 bp->irq_chain = 0; 640 bp->irq_chain = 0;
782 641 process_bucket(irq, bp, regs);
783 bp->flags |= IBF_INPROGRESS; 642 bp = nbp;
784
785 if ((flags & IBF_ACTIVE) != 0) {
786#ifdef CONFIG_PCI
787 if ((flags & IBF_DMA_SYNC) != 0) {
788 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
789 upa_readq(pci_dma_wsync);
790 }
791#endif
792 if ((flags & IBF_MULTI) == 0) {
793 struct irqaction *ap = bp->irq_info;
794 int ret;
795
796 ret = ap->handler(__irq(bp), ap->dev_id, regs);
797 if (ret == IRQ_HANDLED)
798 random |= ap->flags;
799 } else {
800 void **vector = (void **)bp->irq_info;
801 int ent;
802 for (ent = 0; ent < 4; ent++) {
803 struct irqaction *ap = vector[ent];
804 if (ap != NULL) {
805 int ret;
806
807 ret = ap->handler(__irq(bp),
808 ap->dev_id,
809 regs);
810 if (ret == IRQ_HANDLED)
811 random |= ap->flags;
812 }
813 }
814 }
815 /* Only the dummy bucket lacks IMAP/ICLR. */
816 if (bp->pil != 0) {
817#ifdef CONFIG_SMP
818 if (should_forward) {
819 redirect_intr(cpu, bp);
820 should_forward = 0;
821 }
822#endif
823 upa_writel(ICLR_IDLE, bp->iclr);
824
825 /* Test and add entropy */
826 if (random & SA_SAMPLE_RANDOM)
827 add_interrupt_randomness(irq);
828 }
829 } else
830 bp->pending = 1;
831
832 bp->flags &= ~IBF_INPROGRESS;
833 } 643 }
834 irq_exit(); 644 irq_exit();
835} 645}
@@ -959,7 +769,10 @@ static void distribute_irqs(void)
959 */ 769 */
960 for (level = 1; level < NR_IRQS; level++) { 770 for (level = 1; level < NR_IRQS; level++) {
961 struct irqaction *p = irq_action[level]; 771 struct irqaction *p = irq_action[level];
962 if (level == 12) continue; 772
773 if (level == 12)
774 continue;
775
963 while(p) { 776 while(p) {
964 cpu = retarget_one_irq(p, cpu); 777 cpu = retarget_one_irq(p, cpu);
965 p = p->next; 778 p = p->next;
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 534320ef0db2..91ab466d6c66 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1303,8 +1303,7 @@ static void psycho_controller_hwinit(struct pci_controller_info *p)
1303{ 1303{
1304 u64 tmp; 1304 u64 tmp;
1305 1305
1306 /* PROM sets the IRQ retry value too low, increase it. */ 1306 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 5);
1307 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff);
1308 1307
1309 /* Enable arbiter for all PCI slots. */ 1308 /* Enable arbiter for all PCI slots. */
1310 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL); 1309 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL);
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 53d333b4a4e8..52bf3431a422 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -595,6 +595,23 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
595 return ret; 595 return ret;
596} 596}
597 597
598/* When a device lives behind a bridge deeper in the PCI bus topology
599 * than APB, a special sequence must run to make sure all pending DMA
600 * transfers at the time of IRQ delivery are visible in the coherency
601 * domain by the cpu. This sequence is to perform a read on the far
602 * side of the non-APB bridge, then perform a read of Sabre's DMA
603 * write-sync register.
604 */
605static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
606{
607 struct pci_dev *pdev = _arg1;
608 unsigned long sync_reg = (unsigned long) _arg2;
609 u16 _unused;
610
611 pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused);
612 sabre_read(sync_reg);
613}
614
598static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, 615static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
599 struct pci_dev *pdev, 616 struct pci_dev *pdev,
600 unsigned int ino) 617 unsigned int ino)
@@ -639,24 +656,14 @@ static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
639 if (pdev) { 656 if (pdev) {
640 struct pcidev_cookie *pcp = pdev->sysdata; 657 struct pcidev_cookie *pcp = pdev->sysdata;
641 658
642 /* When a device lives behind a bridge deeper in the
643 * PCI bus topology than APB, a special sequence must
644 * run to make sure all pending DMA transfers at the
645 * time of IRQ delivery are visible in the coherency
646 * domain by the cpu. This sequence is to perform
647 * a read on the far side of the non-APB bridge, then
648 * perform a read of Sabre's DMA write-sync register.
649 *
650 * Currently, the PCI_CONFIG register for the device
651 * is used for this read from the far side of the bridge.
652 */
653 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 659 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
654 bucket->flags |= IBF_DMA_SYNC; 660 struct pci_controller_info *p = pcp->pbm->parent;
655 bucket->synctab_ent = dma_sync_reg_table_entry++; 661 struct irq_desc *d = bucket->irq_info;
656 dma_sync_reg_table[bucket->synctab_ent] = 662
657 (unsigned long) sabre_pci_config_mkaddr( 663 d->pre_handler = sabre_wsync_handler;
658 pcp->pbm, 664 d->pre_handler_arg1 = pdev;
659 pdev->bus->number, pdev->devfn, PCI_COMMAND); 665 d->pre_handler_arg2 = (void *)
666 p->pbm_A.controller_regs + SABRE_WRSYNC;
660 } 667 }
661 } 668 }
662 return __irq(bucket); 669 return __irq(bucket);
@@ -1626,10 +1633,9 @@ void __init sabre_init(int pnode, char *model_name)
1626 */ 1633 */
1627 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1634 p->pbm_A.controller_regs = pr_regs[0].phys_addr;
1628 p->pbm_B.controller_regs = pr_regs[0].phys_addr; 1635 p->pbm_B.controller_regs = pr_regs[0].phys_addr;
1629 pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC;
1630 1636
1631 printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n", 1637 printk("PCI: Found SABRE, main regs at %016lx\n",
1632 p->pbm_A.controller_regs, pci_dma_wsync); 1638 p->pbm_A.controller_regs);
1633 1639
1634 /* Clear interrupts */ 1640 /* Clear interrupts */
1635 1641
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 5753175b94e6..6a182bb66281 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -15,6 +15,7 @@
15#include <asm/iommu.h> 15#include <asm/iommu.h>
16#include <asm/irq.h> 16#include <asm/irq.h>
17#include <asm/upa.h> 17#include <asm/upa.h>
18#include <asm/pstate.h>
18 19
19#include "pci_impl.h" 20#include "pci_impl.h"
20#include "iommu_common.h" 21#include "iommu_common.h"
@@ -326,6 +327,44 @@ static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
326 return ret; 327 return ret;
327} 328}
328 329
330static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
331{
332 unsigned long sync_reg = (unsigned long) _arg2;
333 u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO);
334 u64 val;
335 int limit;
336
337 schizo_write(sync_reg, mask);
338
339 limit = 100000;
340 val = 0;
341 while (--limit) {
342 val = schizo_read(sync_reg);
343 if (!(val & mask))
344 break;
345 }
346 if (limit <= 0) {
347 printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n",
348 val, mask);
349 }
350
351 if (_arg1) {
352 static unsigned char cacheline[64]
353 __attribute__ ((aligned (64)));
354
355 __asm__ __volatile__("rd %%fprs, %0\n\t"
356 "or %0, %4, %1\n\t"
357 "wr %1, 0x0, %%fprs\n\t"
358 "stda %%f0, [%5] %6\n\t"
359 "wr %0, 0x0, %%fprs\n\t"
360 "membar #Sync"
361 : "=&r" (mask), "=&r" (val)
362 : "0" (mask), "1" (val),
363 "i" (FPRS_FEF), "r" (&cacheline[0]),
364 "i" (ASI_BLK_COMMIT_P));
365 }
366}
367
329static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, 368static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
330 struct pci_dev *pdev, 369 struct pci_dev *pdev,
331 unsigned int ino) 370 unsigned int ino)
@@ -369,6 +408,15 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
369 bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap)); 408 bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap));
370 bucket->flags |= IBF_PCI; 409 bucket->flags |= IBF_PCI;
371 410
411 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
412 struct irq_desc *p = bucket->irq_info;
413
414 p->pre_handler = tomatillo_wsync_handler;
415 p->pre_handler_arg1 = ((pbm->chip_version <= 4) ?
416 (void *) 1 : (void *) 0);
417 p->pre_handler_arg2 = (void *) pbm->sync_reg;
418 }
419
372 return __irq(bucket); 420 return __irq(bucket);
373} 421}
374 422
@@ -885,6 +933,7 @@ static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
885 933
886#define SCHIZO_PCI_CTRL (0x2000UL) 934#define SCHIZO_PCI_CTRL (0x2000UL)
887#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ 935#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
936#define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */
888#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ 937#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
889#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ 938#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
890#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ 939#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
@@ -1887,37 +1936,27 @@ static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm)
1887{ 1936{
1888 u64 tmp; 1937 u64 tmp;
1889 1938
1890 /* Set IRQ retry to infinity. */ 1939 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, 5);
1891 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY,
1892 SCHIZO_IRQ_RETRY_INF);
1893 1940
1894 /* Enable arbiter for all PCI slots. Also, disable PCI interval
1895 * timer so that DTO (Discard TimeOuts) are not reported because
1896 * some Schizo revisions report them erroneously.
1897 */
1898 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL); 1941 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL);
1899 if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS &&
1900 pbm->chip_version == 0x5 &&
1901 pbm->chip_revision == 0x1)
1902 tmp |= 0x0f;
1903 else
1904 tmp |= 0xff;
1905 1942
1906 tmp &= ~SCHIZO_PCICTRL_PTO; 1943 /* Enable arbiter for all PCI slots. */
1944 tmp |= 0xff;
1945
1907 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && 1946 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1908 pbm->chip_version >= 0x2) 1947 pbm->chip_version >= 0x2)
1909 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; 1948 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
1910 else
1911 tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT;
1912 1949
1913 if (!prom_getbool(pbm->prom_node, "no-bus-parking")) 1950 if (!prom_getbool(pbm->prom_node, "no-bus-parking"))
1914 tmp |= SCHIZO_PCICTRL_PARK; 1951 tmp |= SCHIZO_PCICTRL_PARK;
1952 else
1953 tmp &= ~SCHIZO_PCICTRL_PARK;
1915 1954
1916 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && 1955 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1917 pbm->chip_version <= 0x1) 1956 pbm->chip_version <= 0x1)
1918 tmp |= (1UL << 61); 1957 tmp |= SCHIZO_PCICTRL_DTO_INT;
1919 else 1958 else
1920 tmp &= ~(1UL << 61); 1959 tmp &= ~SCHIZO_PCICTRL_DTO_INT;
1921 1960
1922 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) 1961 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
1923 tmp |= (SCHIZO_PCICTRL_MRM_PREF | 1962 tmp |= (SCHIZO_PCICTRL_MRM_PREF |
@@ -2015,6 +2054,9 @@ static void __init schizo_pbm_init(struct pci_controller_info *p,
2015 pbm->pbm_regs = pr_regs[0].phys_addr; 2054 pbm->pbm_regs = pr_regs[0].phys_addr;
2016 pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL; 2055 pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL;
2017 2056
2057 if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
2058 pbm->sync_reg = pr_regs[3].phys_addr + 0x1a18UL;
2059
2018 sprintf(pbm->name, 2060 sprintf(pbm->name,
2019 (chip_type == PBM_CHIP_TYPE_TOMATILLO ? 2061 (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2020 "TOMATILLO%d PBM%c" : 2062 "TOMATILLO%d PBM%c" :
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 71b4e3807694..b40db389f90b 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -973,7 +973,7 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg
973 int err; 973 int err;
974 974
975 /* Register IRQ handler. */ 975 /* Register IRQ handler. */
976 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC, 976 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0,
977 "timer", NULL); 977 "timer", NULL);
978 978
979 if (err) { 979 if (err) {
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 8f0f46907a81..87302fb14885 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -79,10 +79,6 @@ struct inst {
79 79
80 unsigned char run_length; 80 unsigned char run_length;
81 unsigned char repeat_byte; 81 unsigned char repeat_byte;
82
83 /* These members manage timeouts for programmed delays */
84 wait_queue_head_t wait_queue;
85 struct timer_list timer_list;
86}; 82};
87 83
88static struct inst instances[BPP_NO]; 84static struct inst instances[BPP_NO];
@@ -297,16 +293,10 @@ static unsigned short get_pins(unsigned minor)
297 293
298#endif /* __sparc__ */ 294#endif /* __sparc__ */
299 295
300static void bpp_wake_up(unsigned long val)
301{ wake_up(&instances[val].wait_queue); }
302
303static void snooze(unsigned long snooze_time, unsigned minor) 296static void snooze(unsigned long snooze_time, unsigned minor)
304{ 297{
305 init_timer(&instances[minor].timer_list); 298 set_current_state(TASK_UNINTERRUPTIBLE);
306 instances[minor].timer_list.expires = jiffies + snooze_time + 1; 299 schedule_timeout(snooze_time + 1);
307 instances[minor].timer_list.data = minor;
308 add_timer(&instances[minor].timer_list);
309 sleep_on (&instances[minor].wait_queue);
310} 300}
311 301
312static int wait_for(unsigned short set, unsigned short clr, 302static int wait_for(unsigned short set, unsigned short clr,
@@ -880,11 +870,8 @@ static void probeLptPort(unsigned idx)
880 instances[idx].enhanced = 0; 870 instances[idx].enhanced = 0;
881 instances[idx].direction = 0; 871 instances[idx].direction = 0;
882 instances[idx].mode = COMPATIBILITY; 872 instances[idx].mode = COMPATIBILITY;
883 instances[idx].wait_queue = 0;
884 instances[idx].run_length = 0; 873 instances[idx].run_length = 0;
885 instances[idx].run_flag = 0; 874 instances[idx].run_flag = 0;
886 init_timer(&instances[idx].timer_list);
887 instances[idx].timer_list.function = bpp_wake_up;
888 if (!request_region(lpAddr,3, dev_name)) return; 875 if (!request_region(lpAddr,3, dev_name)) return;
889 876
890 /* 877 /*
@@ -977,11 +964,8 @@ static void probeLptPort(unsigned idx)
977 instances[idx].enhanced = 0; 964 instances[idx].enhanced = 0;
978 instances[idx].direction = 0; 965 instances[idx].direction = 0;
979 instances[idx].mode = COMPATIBILITY; 966 instances[idx].mode = COMPATIBILITY;
980 init_waitqueue_head(&instances[idx].wait_queue);
981 instances[idx].run_length = 0; 967 instances[idx].run_length = 0;
982 instances[idx].run_flag = 0; 968 instances[idx].run_flag = 0;
983 init_timer(&instances[idx].timer_list);
984 instances[idx].timer_list.function = bpp_wake_up;
985 969
986 if (!rp) return; 970 if (!rp) return;
987 971
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 018e2e46082b..8b70edcb80dc 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -16,6 +16,18 @@
16#include <asm/pil.h> 16#include <asm/pil.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18 18
19struct ino_bucket;
20
21#define MAX_IRQ_DESC_ACTION 4
22
23struct irq_desc {
24 void (*pre_handler)(struct ino_bucket *, void *, void *);
25 void *pre_handler_arg1;
26 void *pre_handler_arg2;
27 u32 action_active_mask;
28 struct irqaction action[MAX_IRQ_DESC_ACTION];
29};
30
19/* You should not mess with this directly. That's the job of irq.c. 31/* You should not mess with this directly. That's the job of irq.c.
20 * 32 *
21 * If you make changes here, please update hand coded assembler of 33 * If you make changes here, please update hand coded assembler of
@@ -42,24 +54,11 @@ struct ino_bucket {
42 /* Miscellaneous flags. */ 54 /* Miscellaneous flags. */
43/*0x06*/unsigned char flags; 55/*0x06*/unsigned char flags;
44 56
45 /* This is used to deal with IBF_DMA_SYNC on 57 /* Currently unused. */
46 * Sabre systems. 58/*0x07*/unsigned char __pad;
47 */ 59
48/*0x07*/unsigned char synctab_ent; 60 /* Reference to IRQ descriptor for this bucket. */
49 61/*0x08*/struct irq_desc *irq_info;
50 /* Reference to handler for this IRQ. If this is
51 * non-NULL this means it is active and should be
52 * serviced. Else the pending member is set to one
53 * and later registry of the interrupt checks for
54 * this condition.
55 *
56 * Normally this is just an irq_action structure.
57 * But, on PCI, if multiple interrupt sources behind
58 * a bridge have multiple interrupt sources that share
59 * the same INO bucket, this points to an array of
60 * pointers to four IRQ action structures.
61 */
62/*0x08*/void *irq_info;
63 62
64 /* Sun5 Interrupt Clear Register. */ 63 /* Sun5 Interrupt Clear Register. */
65/*0x10*/unsigned long iclr; 64/*0x10*/unsigned long iclr;
@@ -69,12 +68,6 @@ struct ino_bucket {
69 68
70}; 69};
71 70
72#ifdef CONFIG_PCI
73extern unsigned long pci_dma_wsync;
74extern unsigned long dma_sync_reg_table[256];
75extern unsigned char dma_sync_reg_table_entry;
76#endif
77
78/* IMAP/ICLR register defines */ 71/* IMAP/ICLR register defines */
79#define IMAP_VALID 0x80000000 /* IRQ Enabled */ 72#define IMAP_VALID 0x80000000 /* IRQ Enabled */
80#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ 73#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
@@ -90,11 +83,9 @@ extern unsigned char dma_sync_reg_table_entry;
90#define ICLR_PENDING 0x00000003 /* Pending state */ 83#define ICLR_PENDING 0x00000003 /* Pending state */
91 84
92/* Only 8-bits are available, be careful. -DaveM */ 85/* Only 8-bits are available, be careful. -DaveM */
93#define IBF_DMA_SYNC 0x01 /* DMA synchronization behind PCI bridge needed. */ 86#define IBF_PCI 0x02 /* PSYCHO/SABRE/SCHIZO PCI interrupt. */
94#define IBF_PCI 0x02 /* Indicates PSYCHO/SABRE/SCHIZO PCI interrupt. */ 87#define IBF_ACTIVE 0x04 /* Interrupt is active and has a handler.*/
95#define IBF_ACTIVE 0x04 /* This interrupt is active and has a handler. */ 88#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
96#define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */
97#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
98 89
99#define NUM_IVECS (IMAP_INR + 1) 90#define NUM_IVECS (IMAP_INR + 1)
100extern struct ino_bucket ivector_table[NUM_IVECS]; 91extern struct ino_bucket ivector_table[NUM_IVECS];
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index 4c15610a2bac..38bbbccb4068 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -145,6 +145,9 @@ struct pci_pbm_info {
145 /* Physical address base of PBM registers. */ 145 /* Physical address base of PBM registers. */
146 unsigned long pbm_regs; 146 unsigned long pbm_regs;
147 147
148 /* Physical address of DMA sync register, if any. */
149 unsigned long sync_reg;
150
148 /* Opaque 32-bit system bus Port ID. */ 151 /* Opaque 32-bit system bus Port ID. */
149 u32 portid; 152 u32 portid;
150 153
diff --git a/include/asm-sparc64/signal.h b/include/asm-sparc64/signal.h
index becdf1bc5924..e3059bb4a465 100644
--- a/include/asm-sparc64/signal.h
+++ b/include/asm-sparc64/signal.h
@@ -162,21 +162,6 @@ struct sigstack {
162#define MINSIGSTKSZ 4096 162#define MINSIGSTKSZ 4096
163#define SIGSTKSZ 16384 163#define SIGSTKSZ 16384
164 164
165#ifdef __KERNEL__
166/*
167 * DJHR
168 * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
169 * interrupt handler's irq structure should be statically allocated
170 * by the request_irq routine.
171 * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
172 * of interrupt usage and that sucks. Also without a flag like this
173 * it may be possible for the free_irq routine to attempt to free
174 * statically allocated data.. which is NOT GOOD.
175 *
176 */
177#define SA_STATIC_ALLOC 0x80
178#endif
179
180#include <asm-generic/signal.h> 165#include <asm-generic/signal.h>
181 166
182struct __new_sigaction { 167struct __new_sigaction {
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 70a4ebb5d964..ecb0d39c0798 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -346,10 +346,27 @@ COMPATIBLE_IOCTL(PPPOEIOCDFWD)
346/* LP */ 346/* LP */
347COMPATIBLE_IOCTL(LPGETSTATUS) 347COMPATIBLE_IOCTL(LPGETSTATUS)
348/* ppdev */ 348/* ppdev */
349COMPATIBLE_IOCTL(PPSETMODE)
350COMPATIBLE_IOCTL(PPRSTATUS)
351COMPATIBLE_IOCTL(PPRCONTROL)
352COMPATIBLE_IOCTL(PPWCONTROL)
353COMPATIBLE_IOCTL(PPFCONTROL)
354COMPATIBLE_IOCTL(PPRDATA)
355COMPATIBLE_IOCTL(PPWDATA)
349COMPATIBLE_IOCTL(PPCLAIM) 356COMPATIBLE_IOCTL(PPCLAIM)
350COMPATIBLE_IOCTL(PPRELEASE) 357COMPATIBLE_IOCTL(PPRELEASE)
351COMPATIBLE_IOCTL(PPEXCL)
352COMPATIBLE_IOCTL(PPYIELD) 358COMPATIBLE_IOCTL(PPYIELD)
359COMPATIBLE_IOCTL(PPEXCL)
360COMPATIBLE_IOCTL(PPDATADIR)
361COMPATIBLE_IOCTL(PPNEGOT)
362COMPATIBLE_IOCTL(PPWCTLONIRQ)
363COMPATIBLE_IOCTL(PPCLRIRQ)
364COMPATIBLE_IOCTL(PPSETPHASE)
365COMPATIBLE_IOCTL(PPGETMODES)
366COMPATIBLE_IOCTL(PPGETMODE)
367COMPATIBLE_IOCTL(PPGETPHASE)
368COMPATIBLE_IOCTL(PPGETFLAGS)
369COMPATIBLE_IOCTL(PPSETFLAGS)
353/* CDROM stuff */ 370/* CDROM stuff */
354COMPATIBLE_IOCTL(CDROMPAUSE) 371COMPATIBLE_IOCTL(CDROMPAUSE)
355COMPATIBLE_IOCTL(CDROMRESUME) 372COMPATIBLE_IOCTL(CDROMRESUME)