aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/irq.c')
-rw-r--r--arch/sparc64/kernel/irq.c779
1 files changed, 259 insertions, 520 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 4dcb8af94090..c9b69167632a 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -37,6 +37,7 @@
37#include <asm/uaccess.h> 37#include <asm/uaccess.h>
38#include <asm/cache.h> 38#include <asm/cache.h>
39#include <asm/cpudata.h> 39#include <asm/cpudata.h>
40#include <asm/auxio.h>
40 41
41#ifdef CONFIG_SMP 42#ifdef CONFIG_SMP
42static void distribute_irqs(void); 43static void distribute_irqs(void);
@@ -70,31 +71,7 @@ struct irq_work_struct {
70struct irq_work_struct __irq_work[NR_CPUS]; 71struct irq_work_struct __irq_work[NR_CPUS];
71#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) 72#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
72 73
73#ifdef CONFIG_PCI 74static struct irqaction *irq_action[NR_IRQS+1];
74/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
75 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
76 * for devices behind busses other than APB on Sabre systems.
77 *
78 * Currently these physical addresses are just config space accesses
79 * to the command register for that device.
80 */
81unsigned long pci_dma_wsync;
82unsigned long dma_sync_reg_table[256];
83unsigned char dma_sync_reg_table_entry = 0;
84#endif
85
86/* This is based upon code in the 32-bit Sparc kernel written mostly by
87 * David Redman (djhr@tadpole.co.uk).
88 */
89#define MAX_STATIC_ALLOC 4
90static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
91static int static_irq_count;
92
93/* This is exported so that fast IRQ handlers can get at it... -DaveM */
94struct irqaction *irq_action[NR_IRQS+1] = {
95 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
97};
98 75
99/* This only synchronizes entities which modify IRQ handler 76/* This only synchronizes entities which modify IRQ handler
100 * state and some selected user-level spots that want to 77 * state and some selected user-level spots that want to
@@ -240,17 +217,22 @@ void disable_irq(unsigned int irq)
240 * the CPU %tick register and not by some normal vectored interrupt 217 * the CPU %tick register and not by some normal vectored interrupt
241 * source. To handle this special case, we use this dummy INO bucket. 218 * source. To handle this special case, we use this dummy INO bucket.
242 */ 219 */
220static struct irq_desc pil0_dummy_desc;
243static struct ino_bucket pil0_dummy_bucket = { 221static struct ino_bucket pil0_dummy_bucket = {
244 0, /* irq_chain */ 222 .irq_info = &pil0_dummy_desc,
245 0, /* pil */
246 0, /* pending */
247 0, /* flags */
248 0, /* __unused */
249 NULL, /* irq_info */
250 0UL, /* iclr */
251 0UL, /* imap */
252}; 223};
253 224
225static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
226 unsigned long iclr, unsigned long imap,
227 struct ino_bucket *bucket)
228{
229 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "
230 "(%d:%d:%016lx:%016lx), halting...\n",
231 ino, bucket->pil, bucket->iclr, bucket->imap,
232 pil, inofixup, iclr, imap);
233 prom_halt();
234}
235
254unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) 236unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
255{ 237{
256 struct ino_bucket *bucket; 238 struct ino_bucket *bucket;
@@ -279,28 +261,35 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
279 prom_halt(); 261 prom_halt();
280 } 262 }
281 263
282 /* Ok, looks good, set it up. Don't touch the irq_chain or
283 * the pending flag.
284 */
285 bucket = &ivector_table[ino]; 264 bucket = &ivector_table[ino];
286 if ((bucket->flags & IBF_ACTIVE) || 265 if (bucket->flags & IBF_ACTIVE)
287 (bucket->irq_info != NULL)) { 266 build_irq_error("IRQ: Trying to build active INO bucket.\n",
288 /* This is a gross fatal error if it happens here. */ 267 ino, pil, inofixup, iclr, imap, bucket);
289 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); 268
290 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", 269 if (bucket->irq_info) {
291 ino, pil, inofixup, iclr, imap); 270 if (bucket->imap != imap || bucket->iclr != iclr)
292 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", 271 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
293 bucket->pil, bucket->iclr, bucket->imap); 272 ino, pil, inofixup, iclr, imap, bucket);
294 prom_printf("IRQ: Cannot continue, halting...\n"); 273
274 goto out;
275 }
276
277 bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
278 if (!bucket->irq_info) {
279 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
295 prom_halt(); 280 prom_halt();
296 } 281 }
282 memset(bucket->irq_info, 0, sizeof(struct irq_desc));
283
284 /* Ok, looks good, set it up. Don't touch the irq_chain or
285 * the pending flag.
286 */
297 bucket->imap = imap; 287 bucket->imap = imap;
298 bucket->iclr = iclr; 288 bucket->iclr = iclr;
299 bucket->pil = pil; 289 bucket->pil = pil;
300 bucket->flags = 0; 290 bucket->flags = 0;
301 291
302 bucket->irq_info = NULL; 292out:
303
304 return __irq(bucket); 293 return __irq(bucket);
305} 294}
306 295
@@ -318,26 +307,65 @@ static void atomic_bucket_insert(struct ino_bucket *bucket)
318 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); 307 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
319} 308}
320 309
310static int check_irq_sharing(int pil, unsigned long irqflags)
311{
312 struct irqaction *action, *tmp;
313
314 action = *(irq_action + pil);
315 if (action) {
316 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
317 for (tmp = action; tmp->next; tmp = tmp->next)
318 ;
319 } else {
320 return -EBUSY;
321 }
322 }
323 return 0;
324}
325
326static void append_irq_action(int pil, struct irqaction *action)
327{
328 struct irqaction **pp = irq_action + pil;
329
330 while (*pp)
331 pp = &((*pp)->next);
332 *pp = action;
333}
334
335static struct irqaction *get_action_slot(struct ino_bucket *bucket)
336{
337 struct irq_desc *desc = bucket->irq_info;
338 int max_irq, i;
339
340 max_irq = 1;
341 if (bucket->flags & IBF_PCI)
342 max_irq = MAX_IRQ_DESC_ACTION;
343 for (i = 0; i < max_irq; i++) {
344 struct irqaction *p = &desc->action[i];
345 u32 mask = (1 << i);
346
347 if (desc->action_active_mask & mask)
348 continue;
349
350 desc->action_active_mask |= mask;
351 return p;
352 }
353 return NULL;
354}
355
321int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 356int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
322 unsigned long irqflags, const char *name, void *dev_id) 357 unsigned long irqflags, const char *name, void *dev_id)
323{ 358{
324 struct irqaction *action, *tmp = NULL; 359 struct irqaction *action;
325 struct ino_bucket *bucket = __bucket(irq); 360 struct ino_bucket *bucket = __bucket(irq);
326 unsigned long flags; 361 unsigned long flags;
327 int pending = 0; 362 int pending = 0;
328 363
329 if ((bucket != &pil0_dummy_bucket) && 364 if (unlikely(!handler))
330 (bucket < &ivector_table[0] ||
331 bucket >= &ivector_table[NUM_IVECS])) {
332 unsigned int *caller;
333
334 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
335 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
336 "from %p, irq %08x.\n", caller, irq);
337 return -EINVAL; 365 return -EINVAL;
338 } 366
339 if (!handler) 367 if (unlikely(!bucket->irq_info))
340 return -EINVAL; 368 return -ENODEV;
341 369
342 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { 370 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
343 /* 371 /*
@@ -355,93 +383,20 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
355 383
356 spin_lock_irqsave(&irq_action_lock, flags); 384 spin_lock_irqsave(&irq_action_lock, flags);
357 385
358 action = *(bucket->pil + irq_action); 386 if (check_irq_sharing(bucket->pil, irqflags)) {
359 if (action) { 387 spin_unlock_irqrestore(&irq_action_lock, flags);
360 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) 388 return -EBUSY;
361 for (tmp = action; tmp->next; tmp = tmp->next)
362 ;
363 else {
364 spin_unlock_irqrestore(&irq_action_lock, flags);
365 return -EBUSY;
366 }
367 action = NULL; /* Or else! */
368 } 389 }
369 390
370 /* If this is flagged as statically allocated then we use our 391 action = get_action_slot(bucket);
371 * private struct which is never freed.
372 */
373 if (irqflags & SA_STATIC_ALLOC) {
374 if (static_irq_count < MAX_STATIC_ALLOC)
375 action = &static_irqaction[static_irq_count++];
376 else
377 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
378 "using kmalloc\n", irq, name);
379 }
380 if (action == NULL)
381 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
382 GFP_ATOMIC);
383
384 if (!action) { 392 if (!action) {
385 spin_unlock_irqrestore(&irq_action_lock, flags); 393 spin_unlock_irqrestore(&irq_action_lock, flags);
386 return -ENOMEM; 394 return -ENOMEM;
387 } 395 }
388 396
389 if (bucket == &pil0_dummy_bucket) { 397 bucket->flags |= IBF_ACTIVE;
390 bucket->irq_info = action; 398 pending = 0;
391 bucket->flags |= IBF_ACTIVE; 399 if (bucket != &pil0_dummy_bucket) {
392 } else {
393 if ((bucket->flags & IBF_ACTIVE) != 0) {
394 void *orig = bucket->irq_info;
395 void **vector = NULL;
396
397 if ((bucket->flags & IBF_PCI) == 0) {
398 printk("IRQ: Trying to share non-PCI bucket.\n");
399 goto free_and_ebusy;
400 }
401 if ((bucket->flags & IBF_MULTI) == 0) {
402 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
403 if (vector == NULL)
404 goto free_and_enomem;
405
406 /* We might have slept. */
407 if ((bucket->flags & IBF_MULTI) != 0) {
408 int ent;
409
410 kfree(vector);
411 vector = (void **)bucket->irq_info;
412 for(ent = 0; ent < 4; ent++) {
413 if (vector[ent] == NULL) {
414 vector[ent] = action;
415 break;
416 }
417 }
418 if (ent == 4)
419 goto free_and_ebusy;
420 } else {
421 vector[0] = orig;
422 vector[1] = action;
423 vector[2] = NULL;
424 vector[3] = NULL;
425 bucket->irq_info = vector;
426 bucket->flags |= IBF_MULTI;
427 }
428 } else {
429 int ent;
430
431 vector = (void **)orig;
432 for (ent = 0; ent < 4; ent++) {
433 if (vector[ent] == NULL) {
434 vector[ent] = action;
435 break;
436 }
437 }
438 if (ent == 4)
439 goto free_and_ebusy;
440 }
441 } else {
442 bucket->irq_info = action;
443 bucket->flags |= IBF_ACTIVE;
444 }
445 pending = bucket->pending; 400 pending = bucket->pending;
446 if (pending) 401 if (pending)
447 bucket->pending = 0; 402 bucket->pending = 0;
@@ -455,10 +410,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
455 put_ino_in_irqaction(action, irq); 410 put_ino_in_irqaction(action, irq);
456 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 411 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
457 412
458 if (tmp) 413 append_irq_action(bucket->pil, action);
459 tmp->next = action;
460 else
461 *(bucket->pil + irq_action) = action;
462 414
463 enable_irq(irq); 415 enable_irq(irq);
464 416
@@ -467,147 +419,103 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
467 atomic_bucket_insert(bucket); 419 atomic_bucket_insert(bucket);
468 set_softint(1 << bucket->pil); 420 set_softint(1 << bucket->pil);
469 } 421 }
422
470 spin_unlock_irqrestore(&irq_action_lock, flags); 423 spin_unlock_irqrestore(&irq_action_lock, flags);
471 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) 424
425 if (bucket != &pil0_dummy_bucket)
472 register_irq_proc(__irq_ino(irq)); 426 register_irq_proc(__irq_ino(irq));
473 427
474#ifdef CONFIG_SMP 428#ifdef CONFIG_SMP
475 distribute_irqs(); 429 distribute_irqs();
476#endif 430#endif
477 return 0; 431 return 0;
478
479free_and_ebusy:
480 kfree(action);
481 spin_unlock_irqrestore(&irq_action_lock, flags);
482 return -EBUSY;
483
484free_and_enomem:
485 kfree(action);
486 spin_unlock_irqrestore(&irq_action_lock, flags);
487 return -ENOMEM;
488} 432}
489 433
490EXPORT_SYMBOL(request_irq); 434EXPORT_SYMBOL(request_irq);
491 435
492void free_irq(unsigned int irq, void *dev_id) 436static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
493{ 437{
494 struct irqaction *action; 438 struct ino_bucket *bucket = __bucket(irq);
495 struct irqaction *tmp = NULL; 439 struct irqaction *action, **pp;
496 unsigned long flags;
497 struct ino_bucket *bucket = __bucket(irq), *bp;
498
499 if ((bucket != &pil0_dummy_bucket) &&
500 (bucket < &ivector_table[0] ||
501 bucket >= &ivector_table[NUM_IVECS])) {
502 unsigned int *caller;
503 440
504 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 441 pp = irq_action + bucket->pil;
505 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " 442 action = *pp;
506 "from %p, irq %08x.\n", caller, irq); 443 if (unlikely(!action))
507 return; 444 return NULL;
508 }
509
510 spin_lock_irqsave(&irq_action_lock, flags);
511 445
512 action = *(bucket->pil + irq_action); 446 if (unlikely(!action->handler)) {
513 if (!action->handler) {
514 printk("Freeing free IRQ %d\n", bucket->pil); 447 printk("Freeing free IRQ %d\n", bucket->pil);
515 return; 448 return NULL;
516 }
517 if (dev_id) {
518 for ( ; action; action = action->next) {
519 if (action->dev_id == dev_id)
520 break;
521 tmp = action;
522 }
523 if (!action) {
524 printk("Trying to free free shared IRQ %d\n", bucket->pil);
525 spin_unlock_irqrestore(&irq_action_lock, flags);
526 return;
527 }
528 } else if (action->flags & SA_SHIRQ) {
529 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
530 spin_unlock_irqrestore(&irq_action_lock, flags);
531 return;
532 } 449 }
533 450
534 if (action->flags & SA_STATIC_ALLOC) { 451 while (action && action->dev_id != dev_id) {
535 printk("Attempt to free statically allocated IRQ %d (%s)\n", 452 pp = &action->next;
536 bucket->pil, action->name); 453 action = *pp;
537 spin_unlock_irqrestore(&irq_action_lock, flags);
538 return;
539 } 454 }
540 455
541 if (action && tmp) 456 if (likely(action))
542 tmp->next = action->next; 457 *pp = action->next;
543 else 458
544 *(bucket->pil + irq_action) = action->next; 459 return action;
460}
461
462void free_irq(unsigned int irq, void *dev_id)
463{
464 struct irqaction *action;
465 struct ino_bucket *bucket;
466 unsigned long flags;
467
468 spin_lock_irqsave(&irq_action_lock, flags);
469
470 action = unlink_irq_action(irq, dev_id);
545 471
546 spin_unlock_irqrestore(&irq_action_lock, flags); 472 spin_unlock_irqrestore(&irq_action_lock, flags);
547 473
474 if (unlikely(!action))
475 return;
476
548 synchronize_irq(irq); 477 synchronize_irq(irq);
549 478
550 spin_lock_irqsave(&irq_action_lock, flags); 479 spin_lock_irqsave(&irq_action_lock, flags);
551 480
481 bucket = __bucket(irq);
552 if (bucket != &pil0_dummy_bucket) { 482 if (bucket != &pil0_dummy_bucket) {
483 struct irq_desc *desc = bucket->irq_info;
553 unsigned long imap = bucket->imap; 484 unsigned long imap = bucket->imap;
554 void **vector, *orig; 485 int ent, i;
555 int ent;
556
557 orig = bucket->irq_info;
558 vector = (void **)orig;
559
560 if ((bucket->flags & IBF_MULTI) != 0) {
561 int other = 0;
562 void *orphan = NULL;
563 for (ent = 0; ent < 4; ent++) {
564 if (vector[ent] == action)
565 vector[ent] = NULL;
566 else if (vector[ent] != NULL) {
567 orphan = vector[ent];
568 other++;
569 }
570 }
571 486
572 /* Only free when no other shared irq 487 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
573 * uses this bucket. 488 struct irqaction *p = &desc->action[i];
574 */ 489
575 if (other) { 490 if (p == action) {
576 if (other == 1) { 491 desc->action_active_mask &= ~(1 << i);
577 /* Convert back to non-shared bucket. */ 492 break;
578 bucket->irq_info = orphan;
579 bucket->flags &= ~(IBF_MULTI);
580 kfree(vector);
581 }
582 goto out;
583 } 493 }
584 } else {
585 bucket->irq_info = NULL;
586 } 494 }
587 495
588 /* This unique interrupt source is now inactive. */ 496 if (!desc->action_active_mask) {
589 bucket->flags &= ~IBF_ACTIVE; 497 /* This unique interrupt source is now inactive. */
498 bucket->flags &= ~IBF_ACTIVE;
590 499
591 /* See if any other buckets share this bucket's IMAP 500 /* See if any other buckets share this bucket's IMAP
592 * and are still active. 501 * and are still active.
593 */ 502 */
594 for (ent = 0; ent < NUM_IVECS; ent++) { 503 for (ent = 0; ent < NUM_IVECS; ent++) {
595 bp = &ivector_table[ent]; 504 struct ino_bucket *bp = &ivector_table[ent];
596 if (bp != bucket && 505 if (bp != bucket &&
597 bp->imap == imap && 506 bp->imap == imap &&
598 (bp->flags & IBF_ACTIVE) != 0) 507 (bp->flags & IBF_ACTIVE) != 0)
599 break; 508 break;
600 } 509 }
601 510
602 /* Only disable when no other sub-irq levels of 511 /* Only disable when no other sub-irq levels of
603 * the same IMAP are active. 512 * the same IMAP are active.
604 */ 513 */
605 if (ent == NUM_IVECS) 514 if (ent == NUM_IVECS)
606 disable_irq(irq); 515 disable_irq(irq);
516 }
607 } 517 }
608 518
609out:
610 kfree(action);
611 spin_unlock_irqrestore(&irq_action_lock, flags); 519 spin_unlock_irqrestore(&irq_action_lock, flags);
612} 520}
613 521
@@ -646,99 +554,55 @@ void synchronize_irq(unsigned int irq)
646} 554}
647#endif /* CONFIG_SMP */ 555#endif /* CONFIG_SMP */
648 556
649void catch_disabled_ivec(struct pt_regs *regs) 557static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
650{ 558{
651 int cpu = smp_processor_id(); 559 struct irq_desc *desc = bp->irq_info;
652 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0)); 560 unsigned char flags = bp->flags;
561 u32 action_mask, i;
562 int random;
653 563
654 /* We can actually see this on Ultra/PCI PCI cards, which are bridges 564 bp->flags |= IBF_INPROGRESS;
655 * to other devices. Here a single IMAP enabled potentially multiple
656 * unique interrupt sources (which each do have a unique ICLR register.
657 *
658 * So what we do is just register that the IVEC arrived, when registered
659 * for real the request_irq() code will check the bit and signal
660 * a local CPU interrupt for it.
661 */
662#if 0
663 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
664 bucket - &ivector_table[0], regs->tpc);
665#endif
666 *irq_work(cpu, 0) = 0;
667 bucket->pending = 1;
668}
669 565
670/* Tune this... */ 566 if (unlikely(!(flags & IBF_ACTIVE))) {
671#define FORWARD_VOLUME 12 567 bp->pending = 1;
672
673#ifdef CONFIG_SMP
674
675static inline void redirect_intr(int cpu, struct ino_bucket *bp)
676{
677 /* Ok, here is what is going on:
678 * 1) Retargeting IRQs on Starfire is very
679 * expensive so just forget about it on them.
680 * 2) Moving around very high priority interrupts
681 * is a losing game.
682 * 3) If the current cpu is idle, interrupts are
683 * useful work, so keep them here. But do not
684 * pass to our neighbour if he is not very idle.
685 * 4) If sysadmin explicitly asks for directed intrs,
686 * Just Do It.
687 */
688 struct irqaction *ap = bp->irq_info;
689 cpumask_t cpu_mask;
690 unsigned int buddy, ticks;
691
692 cpu_mask = get_smpaff_in_irqaction(ap);
693 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
694 if (cpus_empty(cpu_mask))
695 cpu_mask = cpu_online_map;
696
697 if (this_is_starfire != 0 ||
698 bp->pil >= 10 || current->pid == 0)
699 goto out; 568 goto out;
700
701 /* 'cpu' is the MID (ie. UPAID), calculate the MID
702 * of our buddy.
703 */
704 buddy = cpu + 1;
705 if (buddy >= NR_CPUS)
706 buddy = 0;
707
708 ticks = 0;
709 while (!cpu_isset(buddy, cpu_mask)) {
710 if (++buddy >= NR_CPUS)
711 buddy = 0;
712 if (++ticks > NR_CPUS) {
713 put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
714 goto out;
715 }
716 } 569 }
717 570
718 if (buddy == cpu) 571 if (desc->pre_handler)
719 goto out; 572 desc->pre_handler(bp,
573 desc->pre_handler_arg1,
574 desc->pre_handler_arg2);
720 575
721 /* Voo-doo programming. */ 576 action_mask = desc->action_active_mask;
722 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) 577 random = 0;
723 goto out; 578 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
579 struct irqaction *p = &desc->action[i];
580 u32 mask = (1 << i);
724 581
725 /* This just so happens to be correct on Cheetah 582 if (!(action_mask & mask))
726 * at the moment. 583 continue;
727 */
728 buddy <<= 26;
729 584
730 /* Push it to our buddy. */ 585 action_mask &= ~mask;
731 upa_writel(buddy | IMAP_VALID, bp->imap);
732 586
587 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
588 random |= p->flags;
589
590 if (!action_mask)
591 break;
592 }
593 if (bp->pil != 0) {
594 upa_writel(ICLR_IDLE, bp->iclr);
595 /* Test and add entropy */
596 if (random & SA_SAMPLE_RANDOM)
597 add_interrupt_randomness(irq);
598 }
733out: 599out:
734 return; 600 bp->flags &= ~IBF_INPROGRESS;
735} 601}
736 602
737#endif
738
739void handler_irq(int irq, struct pt_regs *regs) 603void handler_irq(int irq, struct pt_regs *regs)
740{ 604{
741 struct ino_bucket *bp, *nbp; 605 struct ino_bucket *bp;
742 int cpu = smp_processor_id(); 606 int cpu = smp_processor_id();
743 607
744#ifndef CONFIG_SMP 608#ifndef CONFIG_SMP
@@ -756,8 +620,6 @@ void handler_irq(int irq, struct pt_regs *regs)
756 clear_softint(clr_mask); 620 clear_softint(clr_mask);
757 } 621 }
758#else 622#else
759 int should_forward = 0;
760
761 clear_softint(1 << irq); 623 clear_softint(1 << irq);
762#endif 624#endif
763 625
@@ -772,199 +634,76 @@ void handler_irq(int irq, struct pt_regs *regs)
772#else 634#else
773 bp = __bucket(xchg32(irq_work(cpu, irq), 0)); 635 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
774#endif 636#endif
775 for ( ; bp != NULL; bp = nbp) { 637 while (bp) {
776 unsigned char flags = bp->flags; 638 struct ino_bucket *nbp = __bucket(bp->irq_chain);
777 unsigned char random = 0;
778 639
779 nbp = __bucket(bp->irq_chain);
780 bp->irq_chain = 0; 640 bp->irq_chain = 0;
781 641 process_bucket(irq, bp, regs);
782 bp->flags |= IBF_INPROGRESS; 642 bp = nbp;
783
784 if ((flags & IBF_ACTIVE) != 0) {
785#ifdef CONFIG_PCI
786 if ((flags & IBF_DMA_SYNC) != 0) {
787 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
788 upa_readq(pci_dma_wsync);
789 }
790#endif
791 if ((flags & IBF_MULTI) == 0) {
792 struct irqaction *ap = bp->irq_info;
793 int ret;
794
795 ret = ap->handler(__irq(bp), ap->dev_id, regs);
796 if (ret == IRQ_HANDLED)
797 random |= ap->flags;
798 } else {
799 void **vector = (void **)bp->irq_info;
800 int ent;
801 for (ent = 0; ent < 4; ent++) {
802 struct irqaction *ap = vector[ent];
803 if (ap != NULL) {
804 int ret;
805
806 ret = ap->handler(__irq(bp),
807 ap->dev_id,
808 regs);
809 if (ret == IRQ_HANDLED)
810 random |= ap->flags;
811 }
812 }
813 }
814 /* Only the dummy bucket lacks IMAP/ICLR. */
815 if (bp->pil != 0) {
816#ifdef CONFIG_SMP
817 if (should_forward) {
818 redirect_intr(cpu, bp);
819 should_forward = 0;
820 }
821#endif
822 upa_writel(ICLR_IDLE, bp->iclr);
823
824 /* Test and add entropy */
825 if (random & SA_SAMPLE_RANDOM)
826 add_interrupt_randomness(irq);
827 }
828 } else
829 bp->pending = 1;
830
831 bp->flags &= ~IBF_INPROGRESS;
832 } 643 }
833 irq_exit(); 644 irq_exit();
834} 645}
835 646
836#ifdef CONFIG_BLK_DEV_FD 647#ifdef CONFIG_BLK_DEV_FD
837extern void floppy_interrupt(int irq, void *dev_cookie, struct pt_regs *regs); 648extern irqreturn_t floppy_interrupt(int, void *, struct pt_regs *);;
838 649
839void sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs) 650/* XXX No easy way to include asm/floppy.h XXX */
840{ 651extern unsigned char *pdma_vaddr;
841 struct irqaction *action = *(irq + irq_action); 652extern unsigned long pdma_size;
842 struct ino_bucket *bucket; 653extern volatile int doing_pdma;
843 int cpu = smp_processor_id(); 654extern unsigned long fdc_status;
844
845 irq_enter();
846 kstat_this_cpu.irqs[irq]++;
847
848 *(irq_work(cpu, irq)) = 0;
849 bucket = get_ino_in_irqaction(action) + ivector_table;
850
851 bucket->flags |= IBF_INPROGRESS;
852
853 floppy_interrupt(irq, dev_cookie, regs);
854 upa_writel(ICLR_IDLE, bucket->iclr);
855
856 bucket->flags &= ~IBF_INPROGRESS;
857
858 irq_exit();
859}
860#endif
861
862/* The following assumes that the branch lies before the place we
863 * are branching to. This is the case for a trap vector...
864 * You have been warned.
865 */
866#define SPARC_BRANCH(dest_addr, inst_addr) \
867 (0x10800000 | ((((dest_addr)-(inst_addr))>>2)&0x3fffff))
868 655
869#define SPARC_NOP (0x01000000) 656irqreturn_t sparc_floppy_irq(int irq, void *dev_cookie, struct pt_regs *regs)
870
871static void install_fast_irq(unsigned int cpu_irq,
872 irqreturn_t (*handler)(int, void *, struct pt_regs *))
873{
874 extern unsigned long sparc64_ttable_tl0;
875 unsigned long ttent = (unsigned long) &sparc64_ttable_tl0;
876 unsigned int *insns;
877
878 ttent += 0x820;
879 ttent += (cpu_irq - 1) << 5;
880 insns = (unsigned int *) ttent;
881 insns[0] = SPARC_BRANCH(((unsigned long) handler),
882 ((unsigned long)&insns[0]));
883 insns[1] = SPARC_NOP;
884 __asm__ __volatile__("membar #StoreStore; flush %0" : : "r" (ttent));
885}
886
887int request_fast_irq(unsigned int irq,
888 irqreturn_t (*handler)(int, void *, struct pt_regs *),
889 unsigned long irqflags, const char *name, void *dev_id)
890{ 657{
891 struct irqaction *action; 658 if (likely(doing_pdma)) {
892 struct ino_bucket *bucket = __bucket(irq); 659 void __iomem *stat = (void __iomem *) fdc_status;
893 unsigned long flags; 660 unsigned char *vaddr = pdma_vaddr;
894 661 unsigned long size = pdma_size;
895 /* No pil0 dummy buckets allowed here. */ 662 u8 val;
896 if (bucket < &ivector_table[0] || 663
897 bucket >= &ivector_table[NUM_IVECS]) { 664 while (size) {
898 unsigned int *caller; 665 val = readb(stat);
899 666 if (unlikely(!(val & 0x80))) {
900 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 667 pdma_vaddr = vaddr;
901 printk(KERN_CRIT "request_fast_irq: Old style IRQ registry attempt " 668 pdma_size = size;
902 "from %p, irq %08x.\n", caller, irq); 669 return IRQ_HANDLED;
903 return -EINVAL; 670 }
904 } 671 if (unlikely(!(val & 0x20))) {
905 672 pdma_vaddr = vaddr;
906 if (!handler) 673 pdma_size = size;
907 return -EINVAL; 674 doing_pdma = 0;
675 goto main_interrupt;
676 }
677 if (val & 0x40) {
678 /* read */
679 *vaddr++ = readb(stat + 1);
680 } else {
681 unsigned char data = *vaddr++;
908 682
909 if ((bucket->pil == 0) || (bucket->pil == 14)) { 683 /* write */
910 printk("request_fast_irq: Trying to register shared IRQ 0 or 14.\n"); 684 writeb(data, stat + 1);
911 return -EBUSY; 685 }
912 } 686 size--;
687 }
913 688
914 spin_lock_irqsave(&irq_action_lock, flags); 689 pdma_vaddr = vaddr;
690 pdma_size = size;
915 691
916 action = *(bucket->pil + irq_action); 692 /* Send Terminal Count pulse to floppy controller. */
917 if (action) { 693 val = readb(auxio_register);
918 if (action->flags & SA_SHIRQ) 694 val |= AUXIO_AUX1_FTCNT;
919 panic("Trying to register fast irq when already shared.\n"); 695 writeb(val, auxio_register);
920 if (irqflags & SA_SHIRQ) 696 val &= AUXIO_AUX1_FTCNT;
921 panic("Trying to register fast irq as shared.\n"); 697 writeb(val, auxio_register);
922 printk("request_fast_irq: Trying to register yet already owned.\n");
923 spin_unlock_irqrestore(&irq_action_lock, flags);
924 return -EBUSY;
925 }
926 698
927 /* 699 doing_pdma = 0;
928 * We do not check for SA_SAMPLE_RANDOM in this path. Neither do we
929 * support smp intr affinity in this path.
930 */
931 if (irqflags & SA_STATIC_ALLOC) {
932 if (static_irq_count < MAX_STATIC_ALLOC)
933 action = &static_irqaction[static_irq_count++];
934 else
935 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
936 "using kmalloc\n", bucket->pil, name);
937 }
938 if (action == NULL)
939 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
940 GFP_ATOMIC);
941 if (!action) {
942 spin_unlock_irqrestore(&irq_action_lock, flags);
943 return -ENOMEM;
944 } 700 }
945 install_fast_irq(bucket->pil, handler);
946 701
947 bucket->irq_info = action; 702main_interrupt:
948 bucket->flags |= IBF_ACTIVE; 703 return floppy_interrupt(irq, dev_cookie, regs);
949
950 action->handler = handler;
951 action->flags = irqflags;
952 action->dev_id = NULL;
953 action->name = name;
954 action->next = NULL;
955 put_ino_in_irqaction(action, irq);
956 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
957
958 *(bucket->pil + irq_action) = action;
959 enable_irq(irq);
960
961 spin_unlock_irqrestore(&irq_action_lock, flags);
962
963#ifdef CONFIG_SMP
964 distribute_irqs();
965#endif
966 return 0;
967} 704}
705EXPORT_SYMBOL(sparc_floppy_irq);
706#endif
968 707
969/* We really don't need these at all on the Sparc. We only have 708/* We really don't need these at all on the Sparc. We only have
970 * stubs here because they are exported to modules. 709 * stubs here because they are exported to modules.
@@ -1030,7 +769,10 @@ static void distribute_irqs(void)
1030 */ 769 */
1031 for (level = 1; level < NR_IRQS; level++) { 770 for (level = 1; level < NR_IRQS; level++) {
1032 struct irqaction *p = irq_action[level]; 771 struct irqaction *p = irq_action[level];
1033 if (level == 12) continue; 772
773 if (level == 12)
774 continue;
775
1034 while(p) { 776 while(p) {
1035 cpu = retarget_one_irq(p, cpu); 777 cpu = retarget_one_irq(p, cpu);
1036 p = p->next; 778 p = p->next;
@@ -1040,8 +782,14 @@ static void distribute_irqs(void)
1040} 782}
1041#endif 783#endif
1042 784
785struct sun5_timer {
786 u64 count0;
787 u64 limit0;
788 u64 count1;
789 u64 limit1;
790};
1043 791
1044struct sun5_timer *prom_timers; 792static struct sun5_timer *prom_timers;
1045static u64 prom_limit0, prom_limit1; 793static u64 prom_limit0, prom_limit1;
1046 794
1047static void map_prom_timers(void) 795static void map_prom_timers(void)
@@ -1097,18 +845,6 @@ static void kill_prom_timer(void)
1097 : "g1", "g2"); 845 : "g1", "g2");
1098} 846}
1099 847
1100void enable_prom_timer(void)
1101{
1102 if (!prom_timers)
1103 return;
1104
1105 /* Set it to whatever was there before. */
1106 prom_timers->limit1 = prom_limit1;
1107 prom_timers->count1 = 0;
1108 prom_timers->limit0 = prom_limit0;
1109 prom_timers->count0 = 0;
1110}
1111
1112void init_irqwork_curcpu(void) 848void init_irqwork_curcpu(void)
1113{ 849{
1114 register struct irq_work_struct *workp asm("o2"); 850 register struct irq_work_struct *workp asm("o2");
@@ -1175,7 +911,8 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
1175 int count, int *eof, void *data) 911 int count, int *eof, void *data)
1176{ 912{
1177 struct ino_bucket *bp = ivector_table + (long)data; 913 struct ino_bucket *bp = ivector_table + (long)data;
1178 struct irqaction *ap = bp->irq_info; 914 struct irq_desc *desc = bp->irq_info;
915 struct irqaction *ap = desc->action;
1179 cpumask_t mask; 916 cpumask_t mask;
1180 int len; 917 int len;
1181 918
@@ -1193,11 +930,13 @@ static int irq_affinity_read_proc (char *page, char **start, off_t off,
1193static inline void set_intr_affinity(int irq, cpumask_t hw_aff) 930static inline void set_intr_affinity(int irq, cpumask_t hw_aff)
1194{ 931{
1195 struct ino_bucket *bp = ivector_table + irq; 932 struct ino_bucket *bp = ivector_table + irq;
933 struct irq_desc *desc = bp->irq_info;
934 struct irqaction *ap = desc->action;
1196 935
1197 /* Users specify affinity in terms of hw cpu ids. 936 /* Users specify affinity in terms of hw cpu ids.
1198 * As soon as we do this, handler_irq() might see and take action. 937 * As soon as we do this, handler_irq() might see and take action.
1199 */ 938 */
1200 put_smpaff_in_irqaction((struct irqaction *)bp->irq_info, hw_aff); 939 put_smpaff_in_irqaction(ap, hw_aff);
1201 940
1202 /* Migration is simply done by the next cpu to service this 941 /* Migration is simply done by the next cpu to service this
1203 * interrupt. 942 * interrupt.