diff options
-rw-r--r-- | arch/sparc64/kernel/entry.S | 11 | ||||
-rw-r--r-- | arch/sparc64/kernel/irq.c | 163 | ||||
-rw-r--r-- | arch/sparc64/kernel/sun4v_ivec.S | 13 | ||||
-rw-r--r-- | arch/sparc64/kernel/time.c | 16 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/ttable.S | 10 | ||||
-rw-r--r-- | include/asm-sparc64/cpudata.h | 19 | ||||
-rw-r--r-- | include/asm-sparc64/pil.h | 7 |
8 files changed, 101 insertions, 142 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 6d0b3ed77a02..c87365e59e71 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/estate.h> | 22 | #include <asm/estate.h> |
23 | #include <asm/auxio.h> | 23 | #include <asm/auxio.h> |
24 | #include <asm/sfafsr.h> | 24 | #include <asm/sfafsr.h> |
25 | #include <asm/pil.h> | ||
25 | 26 | ||
26 | #define curptr g6 | 27 | #define curptr g6 |
27 | 28 | ||
@@ -434,17 +435,13 @@ do_ivec: | |||
434 | sllx %g3, 5, %g3 | 435 | sllx %g3, 5, %g3 |
435 | or %g2, %lo(ivector_table), %g2 | 436 | or %g2, %lo(ivector_table), %g2 |
436 | add %g2, %g3, %g3 | 437 | add %g2, %g3, %g3 |
437 | ldub [%g3 + 0x04], %g4 /* pil */ | ||
438 | mov 1, %g2 | ||
439 | sllx %g2, %g4, %g2 | ||
440 | sllx %g4, 2, %g4 | ||
441 | 438 | ||
442 | TRAP_LOAD_IRQ_WORK(%g6, %g1) | 439 | TRAP_LOAD_IRQ_WORK(%g6, %g1) |
443 | 440 | ||
444 | lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ | 441 | lduw [%g6], %g5 /* g5 = irq_work(cpu) */ |
445 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ | 442 | stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ |
446 | stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ | 443 | stw %g3, [%g6] /* irq_work(cpu) = bucket */ |
447 | wr %g2, 0x0, %set_softint | 444 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint |
448 | retry | 445 | retry |
449 | do_ivec_xcall: | 446 | do_ivec_xcall: |
450 | mov 0x50, %g1 | 447 | mov 0x50, %g1 |
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 11e645c9ec50..0fb1738a4f3f 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -68,11 +68,7 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY | |||
68 | * access to this structure takes a TLB miss it could cause | 68 | * access to this structure takes a TLB miss it could cause |
69 | * the 5-level sparc v9 trap stack to overflow. | 69 | * the 5-level sparc v9 trap stack to overflow. |
70 | */ | 70 | */ |
71 | struct irq_work_struct { | 71 | #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist) |
72 | unsigned int irq_worklists[16]; | ||
73 | }; | ||
74 | struct irq_work_struct __irq_work[NR_CPUS]; | ||
75 | #define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) | ||
76 | 72 | ||
77 | static struct irqaction *irq_action[NR_IRQS+1]; | 73 | static struct irqaction *irq_action[NR_IRQS+1]; |
78 | 74 | ||
@@ -91,10 +87,8 @@ static void register_irq_proc (unsigned int irq); | |||
91 | */ | 87 | */ |
92 | #define put_ino_in_irqaction(action, irq) \ | 88 | #define put_ino_in_irqaction(action, irq) \ |
93 | action->flags &= 0xffffffffffffUL; \ | 89 | action->flags &= 0xffffffffffffUL; \ |
94 | if (__bucket(irq) == &pil0_dummy_bucket) \ | 90 | action->flags |= __irq_ino(irq) << 48; |
95 | action->flags |= 0xdeadUL << 48; \ | 91 | |
96 | else \ | ||
97 | action->flags |= __irq_ino(irq) << 48; | ||
98 | #define get_ino_in_irqaction(action) (action->flags >> 48) | 92 | #define get_ino_in_irqaction(action) (action->flags >> 48) |
99 | 93 | ||
100 | #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff) | 94 | #define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff) |
@@ -251,15 +245,6 @@ void disable_irq(unsigned int irq) | |||
251 | } | 245 | } |
252 | } | 246 | } |
253 | 247 | ||
254 | /* The timer is the one "weird" interrupt which is generated by | ||
255 | * the CPU %tick register and not by some normal vectored interrupt | ||
256 | * source. To handle this special case, we use this dummy INO bucket. | ||
257 | */ | ||
258 | static struct irq_desc pil0_dummy_desc; | ||
259 | static struct ino_bucket pil0_dummy_bucket = { | ||
260 | .irq_info = &pil0_dummy_desc, | ||
261 | }; | ||
262 | |||
263 | static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, | 248 | static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup, |
264 | unsigned long iclr, unsigned long imap, | 249 | unsigned long iclr, unsigned long imap, |
265 | struct ino_bucket *bucket) | 250 | struct ino_bucket *bucket) |
@@ -276,15 +261,7 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long | |||
276 | struct ino_bucket *bucket; | 261 | struct ino_bucket *bucket; |
277 | int ino; | 262 | int ino; |
278 | 263 | ||
279 | if (pil == 0) { | 264 | BUG_ON(pil == 0); |
280 | if (iclr != 0UL || imap != 0UL) { | ||
281 | prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n", | ||
282 | iclr, imap); | ||
283 | prom_halt(); | ||
284 | } | ||
285 | return __irq(&pil0_dummy_bucket); | ||
286 | } | ||
287 | |||
288 | BUG_ON(tlb_type == hypervisor); | 265 | BUG_ON(tlb_type == hypervisor); |
289 | 266 | ||
290 | /* RULE: Both must be specified in all other cases. */ | 267 | /* RULE: Both must be specified in all other cases. */ |
@@ -371,7 +348,7 @@ static void atomic_bucket_insert(struct ino_bucket *bucket) | |||
371 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); | 348 | __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); |
372 | __asm__ __volatile__("wrpr %0, %1, %%pstate" | 349 | __asm__ __volatile__("wrpr %0, %1, %%pstate" |
373 | : : "r" (pstate), "i" (PSTATE_IE)); | 350 | : : "r" (pstate), "i" (PSTATE_IE)); |
374 | ent = irq_work(smp_processor_id(), bucket->pil); | 351 | ent = irq_work(smp_processor_id()); |
375 | bucket->irq_chain = *ent; | 352 | bucket->irq_chain = *ent; |
376 | *ent = __irq(bucket); | 353 | *ent = __irq(bucket); |
377 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); | 354 | __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); |
@@ -437,7 +414,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
437 | if (unlikely(!bucket->irq_info)) | 414 | if (unlikely(!bucket->irq_info)) |
438 | return -ENODEV; | 415 | return -ENODEV; |
439 | 416 | ||
440 | if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { | 417 | if (irqflags & SA_SAMPLE_RANDOM) { |
441 | /* | 418 | /* |
442 | * This function might sleep, we want to call it first, | 419 | * This function might sleep, we want to call it first, |
443 | * outside of the atomic block. In SA_STATIC_ALLOC case, | 420 | * outside of the atomic block. In SA_STATIC_ALLOC case, |
@@ -465,12 +442,9 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
465 | } | 442 | } |
466 | 443 | ||
467 | bucket->flags |= IBF_ACTIVE; | 444 | bucket->flags |= IBF_ACTIVE; |
468 | pending = 0; | 445 | pending = bucket->pending; |
469 | if (bucket != &pil0_dummy_bucket) { | 446 | if (pending) |
470 | pending = bucket->pending; | 447 | bucket->pending = 0; |
471 | if (pending) | ||
472 | bucket->pending = 0; | ||
473 | } | ||
474 | 448 | ||
475 | action->handler = handler; | 449 | action->handler = handler; |
476 | action->flags = irqflags; | 450 | action->flags = irqflags; |
@@ -487,13 +461,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_ | |||
487 | /* We ate the IVEC already, this makes sure it does not get lost. */ | 461 | /* We ate the IVEC already, this makes sure it does not get lost. */ |
488 | if (pending) { | 462 | if (pending) { |
489 | atomic_bucket_insert(bucket); | 463 | atomic_bucket_insert(bucket); |
490 | set_softint(1 << bucket->pil); | 464 | set_softint(1 << PIL_DEVICE_IRQ); |
491 | } | 465 | } |
492 | 466 | ||
493 | spin_unlock_irqrestore(&irq_action_lock, flags); | 467 | spin_unlock_irqrestore(&irq_action_lock, flags); |
494 | 468 | ||
495 | if (bucket != &pil0_dummy_bucket) | 469 | register_irq_proc(__irq_ino(irq)); |
496 | register_irq_proc(__irq_ino(irq)); | ||
497 | 470 | ||
498 | #ifdef CONFIG_SMP | 471 | #ifdef CONFIG_SMP |
499 | distribute_irqs(); | 472 | distribute_irqs(); |
@@ -533,7 +506,9 @@ void free_irq(unsigned int irq, void *dev_id) | |||
533 | { | 506 | { |
534 | struct irqaction *action; | 507 | struct irqaction *action; |
535 | struct ino_bucket *bucket; | 508 | struct ino_bucket *bucket; |
509 | struct irq_desc *desc; | ||
536 | unsigned long flags; | 510 | unsigned long flags; |
511 | int ent, i; | ||
537 | 512 | ||
538 | spin_lock_irqsave(&irq_action_lock, flags); | 513 | spin_lock_irqsave(&irq_action_lock, flags); |
539 | 514 | ||
@@ -549,42 +524,39 @@ void free_irq(unsigned int irq, void *dev_id) | |||
549 | spin_lock_irqsave(&irq_action_lock, flags); | 524 | spin_lock_irqsave(&irq_action_lock, flags); |
550 | 525 | ||
551 | bucket = __bucket(irq); | 526 | bucket = __bucket(irq); |
552 | if (bucket != &pil0_dummy_bucket) { | 527 | desc = bucket->irq_info; |
553 | struct irq_desc *desc = bucket->irq_info; | ||
554 | int ent, i; | ||
555 | 528 | ||
556 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { | 529 | for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) { |
557 | struct irqaction *p = &desc->action[i]; | 530 | struct irqaction *p = &desc->action[i]; |
558 | 531 | ||
559 | if (p == action) { | 532 | if (p == action) { |
560 | desc->action_active_mask &= ~(1 << i); | 533 | desc->action_active_mask &= ~(1 << i); |
561 | break; | 534 | break; |
562 | } | ||
563 | } | 535 | } |
536 | } | ||
564 | 537 | ||
565 | if (!desc->action_active_mask) { | 538 | if (!desc->action_active_mask) { |
566 | unsigned long imap = bucket->imap; | 539 | unsigned long imap = bucket->imap; |
567 | |||
568 | /* This unique interrupt source is now inactive. */ | ||
569 | bucket->flags &= ~IBF_ACTIVE; | ||
570 | 540 | ||
571 | /* See if any other buckets share this bucket's IMAP | 541 | /* This unique interrupt source is now inactive. */ |
572 | * and are still active. | 542 | bucket->flags &= ~IBF_ACTIVE; |
573 | */ | ||
574 | for (ent = 0; ent < NUM_IVECS; ent++) { | ||
575 | struct ino_bucket *bp = &ivector_table[ent]; | ||
576 | if (bp != bucket && | ||
577 | bp->imap == imap && | ||
578 | (bp->flags & IBF_ACTIVE) != 0) | ||
579 | break; | ||
580 | } | ||
581 | 543 | ||
582 | /* Only disable when no other sub-irq levels of | 544 | /* See if any other buckets share this bucket's IMAP |
583 | * the same IMAP are active. | 545 | * and are still active. |
584 | */ | 546 | */ |
585 | if (ent == NUM_IVECS) | 547 | for (ent = 0; ent < NUM_IVECS; ent++) { |
586 | disable_irq(irq); | 548 | struct ino_bucket *bp = &ivector_table[ent]; |
549 | if (bp != bucket && | ||
550 | bp->imap == imap && | ||
551 | (bp->flags & IBF_ACTIVE) != 0) | ||
552 | break; | ||
587 | } | 553 | } |
554 | |||
555 | /* Only disable when no other sub-irq levels of | ||
556 | * the same IMAP are active. | ||
557 | */ | ||
558 | if (ent == NUM_IVECS) | ||
559 | disable_irq(irq); | ||
588 | } | 560 | } |
589 | 561 | ||
590 | spin_unlock_irqrestore(&irq_action_lock, flags); | 562 | spin_unlock_irqrestore(&irq_action_lock, flags); |
@@ -625,7 +597,7 @@ void synchronize_irq(unsigned int irq) | |||
625 | } | 597 | } |
626 | #endif /* CONFIG_SMP */ | 598 | #endif /* CONFIG_SMP */ |
627 | 599 | ||
628 | static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) | 600 | static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs) |
629 | { | 601 | { |
630 | struct irq_desc *desc = bp->irq_info; | 602 | struct irq_desc *desc = bp->irq_info; |
631 | unsigned char flags = bp->flags; | 603 | unsigned char flags = bp->flags; |
@@ -676,51 +648,54 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs) | |||
676 | 648 | ||
677 | /* Test and add entropy */ | 649 | /* Test and add entropy */ |
678 | if (random & SA_SAMPLE_RANDOM) | 650 | if (random & SA_SAMPLE_RANDOM) |
679 | add_interrupt_randomness(irq); | 651 | add_interrupt_randomness(bp->pil); |
680 | } | 652 | } |
681 | out: | 653 | out: |
682 | bp->flags &= ~IBF_INPROGRESS; | 654 | bp->flags &= ~IBF_INPROGRESS; |
683 | } | 655 | } |
684 | 656 | ||
657 | #ifndef CONFIG_SMP | ||
658 | extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *); | ||
659 | |||
660 | void timer_irq(int irq, struct pt_regs *regs) | ||
661 | { | ||
662 | unsigned long clr_mask = 1 << irq; | ||
663 | unsigned long tick_mask = tick_ops->softint_mask; | ||
664 | |||
665 | if (get_softint() & tick_mask) { | ||
666 | irq = 0; | ||
667 | clr_mask = tick_mask; | ||
668 | } | ||
669 | clear_softint(clr_mask); | ||
670 | |||
671 | irq_enter(); | ||
672 | kstat_this_cpu.irqs[irq]++; | ||
673 | timer_interrupt(irq, NULL, regs); | ||
674 | irq_exit(); | ||
675 | } | ||
676 | #endif | ||
677 | |||
685 | void handler_irq(int irq, struct pt_regs *regs) | 678 | void handler_irq(int irq, struct pt_regs *regs) |
686 | { | 679 | { |
687 | struct ino_bucket *bp; | 680 | struct ino_bucket *bp; |
688 | int cpu = smp_processor_id(); | 681 | int cpu = smp_processor_id(); |
689 | 682 | ||
690 | #ifndef CONFIG_SMP | 683 | /* XXX at this point we should be able to assert that |
691 | /* | 684 | * XXX irq is PIL_DEVICE_IRQ... |
692 | * Check for TICK_INT on level 14 softint. | ||
693 | */ | 685 | */ |
694 | { | ||
695 | unsigned long clr_mask = 1 << irq; | ||
696 | unsigned long tick_mask = tick_ops->softint_mask; | ||
697 | |||
698 | if ((irq == 14) && (get_softint() & tick_mask)) { | ||
699 | irq = 0; | ||
700 | clr_mask = tick_mask; | ||
701 | } | ||
702 | clear_softint(clr_mask); | ||
703 | } | ||
704 | #else | ||
705 | clear_softint(1 << irq); | 686 | clear_softint(1 << irq); |
706 | #endif | ||
707 | 687 | ||
708 | irq_enter(); | 688 | irq_enter(); |
709 | kstat_this_cpu.irqs[irq]++; | ||
710 | 689 | ||
711 | /* Sliiiick... */ | 690 | /* Sliiiick... */ |
712 | #ifndef CONFIG_SMP | 691 | bp = __bucket(xchg32(irq_work(cpu), 0)); |
713 | bp = ((irq != 0) ? | ||
714 | __bucket(xchg32(irq_work(cpu, irq), 0)) : | ||
715 | &pil0_dummy_bucket); | ||
716 | #else | ||
717 | bp = __bucket(xchg32(irq_work(cpu, irq), 0)); | ||
718 | #endif | ||
719 | while (bp) { | 692 | while (bp) { |
720 | struct ino_bucket *nbp = __bucket(bp->irq_chain); | 693 | struct ino_bucket *nbp = __bucket(bp->irq_chain); |
721 | 694 | ||
695 | kstat_this_cpu.irqs[bp->pil]++; | ||
696 | |||
722 | bp->irq_chain = 0; | 697 | bp->irq_chain = 0; |
723 | process_bucket(irq, bp, regs); | 698 | process_bucket(bp, regs); |
724 | bp = nbp; | 699 | bp = nbp; |
725 | } | 700 | } |
726 | irq_exit(); | 701 | irq_exit(); |
@@ -929,7 +904,7 @@ void init_irqwork_curcpu(void) | |||
929 | { | 904 | { |
930 | int cpu = hard_smp_processor_id(); | 905 | int cpu = hard_smp_processor_id(); |
931 | 906 | ||
932 | memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct)); | 907 | trap_block[cpu].irq_worklist = 0; |
933 | } | 908 | } |
934 | 909 | ||
935 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) | 910 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) |
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S index b49a68bdda43..f70e4774649d 100644 --- a/arch/sparc64/kernel/sun4v_ivec.S +++ b/arch/sparc64/kernel/sun4v_ivec.S | |||
@@ -5,6 +5,7 @@ | |||
5 | 5 | ||
6 | #include <asm/cpudata.h> | 6 | #include <asm/cpudata.h> |
7 | #include <asm/intr_queue.h> | 7 | #include <asm/intr_queue.h> |
8 | #include <asm/pil.h> | ||
8 | 9 | ||
9 | .text | 10 | .text |
10 | .align 32 | 11 | .align 32 |
@@ -106,19 +107,13 @@ sun4v_dev_mondo: | |||
106 | or %g4, %lo(ivector_table), %g4 | 107 | or %g4, %lo(ivector_table), %g4 |
107 | add %g4, %g3, %g4 | 108 | add %g4, %g3, %g4 |
108 | 109 | ||
109 | /* Load IRQ %pil into %g5. */ | ||
110 | ldub [%g4 + 0x04], %g5 | ||
111 | |||
112 | /* Insert ivector_table[] entry into __irq_work[] queue. */ | 110 | /* Insert ivector_table[] entry into __irq_work[] queue. */ |
113 | sllx %g5, 2, %g3 | 111 | lduw [%g1], %g2 /* g2 = irq_work(cpu) */ |
114 | lduw [%g1 + %g3], %g2 /* g2 = irq_work(cpu, pil) */ | ||
115 | stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ | 112 | stw %g2, [%g4 + 0x00] /* bucket->irq_chain = g2 */ |
116 | stw %g4, [%g1 + %g3] /* irq_work(cpu, pil) = bucket */ | 113 | stw %g4, [%g1] /* irq_work(cpu) = bucket */ |
117 | 114 | ||
118 | /* Signal the interrupt by setting (1 << pil) in %softint. */ | 115 | /* Signal the interrupt by setting (1 << pil) in %softint. */ |
119 | mov 1, %g2 | 116 | wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint |
120 | sllx %g2, %g5, %g2 | ||
121 | wr %g2, 0x0, %set_softint | ||
122 | 117 | ||
123 | sun4v_dev_mondo_queue_empty: | 118 | sun4v_dev_mondo_queue_empty: |
124 | retry | 119 | retry |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index e55b5c6ece02..0f00a99927e9 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -457,7 +457,7 @@ static inline void timer_check_rtc(void) | |||
457 | } | 457 | } |
458 | } | 458 | } |
459 | 459 | ||
460 | static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) | 460 | irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs) |
461 | { | 461 | { |
462 | unsigned long ticks, compare, pstate; | 462 | unsigned long ticks, compare, pstate; |
463 | 463 | ||
@@ -1020,19 +1020,9 @@ static unsigned long sparc64_init_timers(void) | |||
1020 | return clock; | 1020 | return clock; |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_regs *)) | 1023 | static void sparc64_start_timers(void) |
1024 | { | 1024 | { |
1025 | unsigned long pstate; | 1025 | unsigned long pstate; |
1026 | int err; | ||
1027 | |||
1028 | /* Register IRQ handler. */ | ||
1029 | err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0, | ||
1030 | "timer", NULL); | ||
1031 | |||
1032 | if (err) { | ||
1033 | prom_printf("Serious problem, cannot register TICK_INT\n"); | ||
1034 | prom_halt(); | ||
1035 | } | ||
1036 | 1026 | ||
1037 | /* Guarantee that the following sequences execute | 1027 | /* Guarantee that the following sequences execute |
1038 | * uninterrupted. | 1028 | * uninterrupted. |
@@ -1116,7 +1106,7 @@ void __init time_init(void) | |||
1116 | /* Now that the interpolator is registered, it is | 1106 | /* Now that the interpolator is registered, it is |
1117 | * safe to start the timer ticking. | 1107 | * safe to start the timer ticking. |
1118 | */ | 1108 | */ |
1119 | sparc64_start_timers(timer_interrupt); | 1109 | sparc64_start_timers(); |
1120 | 1110 | ||
1121 | timer_ticks_per_nsec_quotient = | 1111 | timer_ticks_per_nsec_quotient = |
1122 | (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + | 1112 | (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 563db528e031..5059cbd4feee 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -2544,7 +2544,9 @@ void __init trap_init(void) | |||
2544 | (TRAP_PER_CPU_TSB_HUGE != | 2544 | (TRAP_PER_CPU_TSB_HUGE != |
2545 | offsetof(struct trap_per_cpu, tsb_huge)) || | 2545 | offsetof(struct trap_per_cpu, tsb_huge)) || |
2546 | (TRAP_PER_CPU_TSB_HUGE_TEMP != | 2546 | (TRAP_PER_CPU_TSB_HUGE_TEMP != |
2547 | offsetof(struct trap_per_cpu, tsb_huge_temp))) | 2547 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || |
2548 | (TRAP_PER_CPU_IRQ_WORKLIST != | ||
2549 | offsetof(struct trap_per_cpu, irq_worklist))) | ||
2548 | trap_per_cpu_offsets_are_bolixed_dave(); | 2550 | trap_per_cpu_offsets_are_bolixed_dave(); |
2549 | 2551 | ||
2550 | if ((TSB_CONFIG_TSB != | 2552 | if ((TSB_CONFIG_TSB != |
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 5d901519db55..ee45ca2d7a04 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S | |||
@@ -58,13 +58,11 @@ tl0_irq2: BTRAP(0x42) | |||
58 | tl0_irq3: BTRAP(0x43) | 58 | tl0_irq3: BTRAP(0x43) |
59 | tl0_irq4: BTRAP(0x44) | 59 | tl0_irq4: BTRAP(0x44) |
60 | #endif | 60 | #endif |
61 | tl0_irq5: TRAP_IRQ(handler_irq, 5) TRAP_IRQ(handler_irq, 6) | 61 | tl0_irq5: TRAP_IRQ(handler_irq, 5) |
62 | tl0_irq7: TRAP_IRQ(handler_irq, 7) TRAP_IRQ(handler_irq, 8) | 62 | tl0_irq6: BTRAP(0x46) BTRAP(0x47) BTRAP(0x48) BTRAP(0x49) |
63 | tl0_irq9: TRAP_IRQ(handler_irq, 9) TRAP_IRQ(handler_irq, 10) | 63 | tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d) |
64 | tl0_irq11: TRAP_IRQ(handler_irq, 11) TRAP_IRQ(handler_irq, 12) | ||
65 | tl0_irq13: TRAP_IRQ(handler_irq, 13) | ||
66 | #ifndef CONFIG_SMP | 64 | #ifndef CONFIG_SMP |
67 | tl0_irq14: TRAP_IRQ(handler_irq, 14) | 65 | tl0_irq14: TRAP_IRQ(timer_irq, 14) |
68 | #else | 66 | #else |
69 | tl0_irq14: TICK_SMP_IRQ | 67 | tl0_irq14: TICK_SMP_IRQ |
70 | #endif | 68 | #endif |
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 9d6a6dbaf126..f2cc9411b4c7 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h | |||
@@ -74,8 +74,10 @@ struct trap_per_cpu { | |||
74 | unsigned long tsb_huge; | 74 | unsigned long tsb_huge; |
75 | unsigned long tsb_huge_temp; | 75 | unsigned long tsb_huge_temp; |
76 | 76 | ||
77 | /* Dcache line 8: Unused, needed to keep trap_block a power-of-2 in size. */ | 77 | /* Dcache line 8: IRQ work list, and keep trap_block a power-of-2 in size. */ |
78 | unsigned long __pad2[4]; | 78 | unsigned int irq_worklist; |
79 | unsigned int __pad1; | ||
80 | unsigned long __pad2[3]; | ||
79 | } __attribute__((aligned(64))); | 81 | } __attribute__((aligned(64))); |
80 | extern struct trap_per_cpu trap_block[NR_CPUS]; | 82 | extern struct trap_per_cpu trap_block[NR_CPUS]; |
81 | extern void init_cur_cpu_trap(struct thread_info *); | 83 | extern void init_cur_cpu_trap(struct thread_info *); |
@@ -119,6 +121,7 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | |||
119 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 | 121 | #define TRAP_PER_CPU_CPU_LIST_PA 0xc8 |
120 | #define TRAP_PER_CPU_TSB_HUGE 0xd0 | 122 | #define TRAP_PER_CPU_TSB_HUGE 0xd0 |
121 | #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 | 123 | #define TRAP_PER_CPU_TSB_HUGE_TEMP 0xd8 |
124 | #define TRAP_PER_CPU_IRQ_WORKLIST 0xe0 | ||
122 | 125 | ||
123 | #define TRAP_BLOCK_SZ_SHIFT 8 | 126 | #define TRAP_BLOCK_SZ_SHIFT 8 |
124 | 127 | ||
@@ -171,11 +174,8 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | |||
171 | 174 | ||
172 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | 175 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ |
173 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | 176 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ |
174 | __GET_CPUID(TMP) \ | 177 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |
175 | sethi %hi(__irq_work), DEST; \ | 178 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST; |
176 | sllx TMP, 6, TMP; \ | ||
177 | or DEST, %lo(__irq_work), DEST; \ | ||
178 | add DEST, TMP, DEST; | ||
179 | 179 | ||
180 | /* Clobbers TMP, loads DEST with current thread info pointer. */ | 180 | /* Clobbers TMP, loads DEST with current thread info pointer. */ |
181 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | 181 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ |
@@ -211,9 +211,10 @@ extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch, | |||
211 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | 211 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |
212 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; | 212 | ldx [DEST + TRAP_PER_CPU_PGD_PADDR], DEST; |
213 | 213 | ||
214 | /* Clobbers TMP, loads local processor's IRQ work area into DEST. */ | ||
214 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ | 215 | #define TRAP_LOAD_IRQ_WORK(DEST, TMP) \ |
215 | sethi %hi(__irq_work), DEST; \ | 216 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |
216 | or DEST, %lo(__irq_work), DEST; | 217 | add DEST, TRAP_PER_CPU_IRQ_WORKLIST, DEST; |
217 | 218 | ||
218 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ | 219 | #define TRAP_LOAD_THREAD_REG(DEST, TMP) \ |
219 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ | 220 | TRAP_LOAD_TRAP_BLOCK(DEST, TMP) \ |
diff --git a/include/asm-sparc64/pil.h b/include/asm-sparc64/pil.h index 79f827eb3f5d..72927749aebf 100644 --- a/include/asm-sparc64/pil.h +++ b/include/asm-sparc64/pil.h | |||
@@ -5,9 +5,9 @@ | |||
5 | /* To avoid some locking problems, we hard allocate certain PILs | 5 | /* To avoid some locking problems, we hard allocate certain PILs |
6 | * for SMP cross call messages that must do a etrap/rtrap. | 6 | * for SMP cross call messages that must do a etrap/rtrap. |
7 | * | 7 | * |
8 | * A cli() does not block the cross call delivery, so when SMP | 8 | * A local_irq_disable() does not block the cross call delivery, so |
9 | * locking is an issue we reschedule the event into a PIL interrupt | 9 | * when SMP locking is an issue we reschedule the event into a PIL |
10 | * which is blocked by cli(). | 10 | * interrupt which is blocked by local_irq_disable(). |
11 | * | 11 | * |
12 | * In fact any XCALL which has to etrap/rtrap has a problem because | 12 | * In fact any XCALL which has to etrap/rtrap has a problem because |
13 | * it is difficult to prevent rtrap from running BH's, and that would | 13 | * it is difficult to prevent rtrap from running BH's, and that would |
@@ -17,6 +17,7 @@ | |||
17 | #define PIL_SMP_RECEIVE_SIGNAL 2 | 17 | #define PIL_SMP_RECEIVE_SIGNAL 2 |
18 | #define PIL_SMP_CAPTURE 3 | 18 | #define PIL_SMP_CAPTURE 3 |
19 | #define PIL_SMP_CTX_NEW_VERSION 4 | 19 | #define PIL_SMP_CTX_NEW_VERSION 4 |
20 | #define PIL_DEVICE_IRQ 5 | ||
20 | 21 | ||
21 | #ifndef __ASSEMBLY__ | 22 | #ifndef __ASSEMBLY__ |
22 | #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ | 23 | #define PIL_RESERVED(PIL) ((PIL) == PIL_SMP_CALL_FUNC || \ |