diff options
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/entry.S | 293 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sbus.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/sparc64/kernel/signal.c | 11 | ||||
-rw-r--r-- | arch/sparc64/kernel/signal32.c | 33 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 30 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 32 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 269 | ||||
-rw-r--r-- | arch/sparc64/kernel/ttable.S | 27 | ||||
-rw-r--r-- | arch/sparc64/kernel/unaligned.c | 18 | ||||
-rw-r--r-- | arch/sparc64/kernel/winfixup.S | 6 | ||||
-rw-r--r-- | arch/sparc64/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/sparc64/lib/debuglocks.c | 56 | ||||
-rw-r--r-- | arch/sparc64/lib/mb.S | 73 | ||||
-rw-r--r-- | arch/sparc64/solaris/misc.c | 6 |
17 files changed, 511 insertions, 352 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 88332f00094a..cecdc0a7521f 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/visasm.h> | 21 | #include <asm/visasm.h> |
22 | #include <asm/estate.h> | 22 | #include <asm/estate.h> |
23 | #include <asm/auxio.h> | 23 | #include <asm/auxio.h> |
24 | #include <asm/sfafsr.h> | ||
24 | 25 | ||
25 | #define curptr g6 | 26 | #define curptr g6 |
26 | 27 | ||
@@ -690,14 +691,159 @@ netbsd_syscall: | |||
690 | retl | 691 | retl |
691 | nop | 692 | nop |
692 | 693 | ||
693 | /* These next few routines must be sure to clear the | 694 | /* We need to carefully read the error status, ACK |
694 | * SFSR FaultValid bit so that the fast tlb data protection | 695 | * the errors, prevent recursive traps, and pass the |
695 | * handler does not flush the wrong context and lock up the | 696 | * information on to C code for logging. |
696 | * box. | 697 | * |
698 | * We pass the AFAR in as-is, and we encode the status | ||
699 | * information as described in asm-sparc64/sfafsr.h | ||
700 | */ | ||
701 | .globl __spitfire_access_error | ||
702 | __spitfire_access_error: | ||
703 | /* Disable ESTATE error reporting so that we do not | ||
704 | * take recursive traps and RED state the processor. | ||
705 | */ | ||
706 | stxa %g0, [%g0] ASI_ESTATE_ERROR_EN | ||
707 | membar #Sync | ||
708 | |||
709 | mov UDBE_UE, %g1 | ||
710 | ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR | ||
711 | |||
712 | /* __spitfire_cee_trap branches here with AFSR in %g4 and | ||
713 | * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the | ||
714 | * ESTATE Error Enable register. | ||
715 | */ | ||
716 | __spitfire_cee_trap_continue: | ||
717 | ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR | ||
718 | |||
719 | rdpr %tt, %g3 | ||
720 | and %g3, 0x1ff, %g3 ! Paranoia | ||
721 | sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3 | ||
722 | or %g4, %g3, %g4 | ||
723 | rdpr %tl, %g3 | ||
724 | cmp %g3, 1 | ||
725 | mov 1, %g3 | ||
726 | bleu %xcc, 1f | ||
727 | sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3 | ||
728 | |||
729 | or %g4, %g3, %g4 | ||
730 | |||
731 | /* Read in the UDB error register state, clearing the | ||
732 | * sticky error bits as-needed. We only clear them if | ||
733 | * the UE bit is set. Likewise, __spitfire_cee_trap | ||
734 | * below will only do so if the CE bit is set. | ||
735 | * | ||
736 | * NOTE: UltraSparc-I/II have high and low UDB error | ||
737 | * registers, corresponding to the two UDB units | ||
738 | * present on those chips. UltraSparc-IIi only | ||
739 | * has a single UDB, called "SDB" in the manual. | ||
740 | * For IIi the upper UDB register always reads | ||
741 | * as zero so for our purposes things will just | ||
742 | * work with the checks below. | ||
697 | */ | 743 | */ |
698 | .globl __do_data_access_exception | 744 | 1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3 |
699 | .globl __do_data_access_exception_tl1 | 745 | and %g3, 0x3ff, %g7 ! Paranoia |
700 | __do_data_access_exception_tl1: | 746 | sllx %g7, SFSTAT_UDBH_SHIFT, %g7 |
747 | or %g4, %g7, %g4 | ||
748 | andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE | ||
749 | be,pn %xcc, 1f | ||
750 | nop | ||
751 | stxa %g3, [%g0] ASI_UDB_ERROR_W | ||
752 | membar #Sync | ||
753 | |||
754 | 1: mov 0x18, %g3 | ||
755 | ldxa [%g3] ASI_UDBL_ERROR_R, %g3 | ||
756 | and %g3, 0x3ff, %g7 ! Paranoia | ||
757 | sllx %g7, SFSTAT_UDBL_SHIFT, %g7 | ||
758 | or %g4, %g7, %g4 | ||
759 | andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE | ||
760 | be,pn %xcc, 1f | ||
761 | nop | ||
762 | mov 0x18, %g7 | ||
763 | stxa %g3, [%g7] ASI_UDB_ERROR_W | ||
764 | membar #Sync | ||
765 | |||
766 | 1: /* Ok, now that we've latched the error state, | ||
767 | * clear the sticky bits in the AFSR. | ||
768 | */ | ||
769 | stxa %g4, [%g0] ASI_AFSR | ||
770 | membar #Sync | ||
771 | |||
772 | rdpr %tl, %g2 | ||
773 | cmp %g2, 1 | ||
774 | rdpr %pil, %g2 | ||
775 | bleu,pt %xcc, 1f | ||
776 | wrpr %g0, 15, %pil | ||
777 | |||
778 | ba,pt %xcc, etraptl1 | ||
779 | rd %pc, %g7 | ||
780 | |||
781 | ba,pt %xcc, 2f | ||
782 | nop | ||
783 | |||
784 | 1: ba,pt %xcc, etrap_irq | ||
785 | rd %pc, %g7 | ||
786 | |||
787 | 2: mov %l4, %o1 | ||
788 | mov %l5, %o2 | ||
789 | call spitfire_access_error | ||
790 | add %sp, PTREGS_OFF, %o0 | ||
791 | ba,pt %xcc, rtrap | ||
792 | clr %l6 | ||
793 | |||
794 | /* This is the trap handler entry point for ECC correctable | ||
795 | * errors. They are corrected, but we listen for the trap | ||
796 | * so that the event can be logged. | ||
797 | * | ||
798 | * Disrupting errors are either: | ||
799 | * 1) single-bit ECC errors during UDB reads to system | ||
800 | * memory | ||
801 | * 2) data parity errors during write-back events | ||
802 | * | ||
803 | * As far as I can make out from the manual, the CEE trap | ||
804 | * is only for correctable errors during memory read | ||
805 | * accesses by the front-end of the processor. | ||
806 | * | ||
807 | * The code below is only for trap level 1 CEE events, | ||
808 | * as it is the only situation where we can safely record | ||
809 | * and log. For trap level >1 we just clear the CE bit | ||
810 | * in the AFSR and return. | ||
811 | * | ||
812 | * This is just like __spiftire_access_error above, but it | ||
813 | * specifically handles correctable errors. If an | ||
814 | * uncorrectable error is indicated in the AFSR we | ||
815 | * will branch directly above to __spitfire_access_error | ||
816 | * to handle it instead. Uncorrectable therefore takes | ||
817 | * priority over correctable, and the error logging | ||
818 | * C code will notice this case by inspecting the | ||
819 | * trap type. | ||
820 | */ | ||
821 | .globl __spitfire_cee_trap | ||
822 | __spitfire_cee_trap: | ||
823 | ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR | ||
824 | mov 1, %g3 | ||
825 | sllx %g3, SFAFSR_UE_SHIFT, %g3 | ||
826 | andcc %g4, %g3, %g0 ! Check for UE | ||
827 | bne,pn %xcc, __spitfire_access_error | ||
828 | nop | ||
829 | |||
830 | /* Ok, in this case we only have a correctable error. | ||
831 | * Indicate we only wish to capture that state in register | ||
832 | * %g1, and we only disable CE error reporting unlike UE | ||
833 | * handling which disables all errors. | ||
834 | */ | ||
835 | ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3 | ||
836 | andn %g3, ESTATE_ERR_CE, %g3 | ||
837 | stxa %g3, [%g0] ASI_ESTATE_ERROR_EN | ||
838 | membar #Sync | ||
839 | |||
840 | /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */ | ||
841 | ba,pt %xcc, __spitfire_cee_trap_continue | ||
842 | mov UDBE_CE, %g1 | ||
843 | |||
844 | .globl __spitfire_data_access_exception | ||
845 | .globl __spitfire_data_access_exception_tl1 | ||
846 | __spitfire_data_access_exception_tl1: | ||
701 | rdpr %pstate, %g4 | 847 | rdpr %pstate, %g4 |
702 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 848 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
703 | mov TLB_SFSR, %g3 | 849 | mov TLB_SFSR, %g3 |
@@ -706,9 +852,25 @@ __do_data_access_exception_tl1: | |||
706 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | 852 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR |
707 | stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit | 853 | stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit |
708 | membar #Sync | 854 | membar #Sync |
855 | rdpr %tt, %g3 | ||
856 | cmp %g3, 0x80 ! first win spill/fill trap | ||
857 | blu,pn %xcc, 1f | ||
858 | cmp %g3, 0xff ! last win spill/fill trap | ||
859 | bgu,pn %xcc, 1f | ||
860 | nop | ||
709 | ba,pt %xcc, winfix_dax | 861 | ba,pt %xcc, winfix_dax |
710 | rdpr %tpc, %g3 | 862 | rdpr %tpc, %g3 |
711 | __do_data_access_exception: | 863 | 1: sethi %hi(109f), %g7 |
864 | ba,pt %xcc, etraptl1 | ||
865 | 109: or %g7, %lo(109b), %g7 | ||
866 | mov %l4, %o1 | ||
867 | mov %l5, %o2 | ||
868 | call spitfire_data_access_exception_tl1 | ||
869 | add %sp, PTREGS_OFF, %o0 | ||
870 | ba,pt %xcc, rtrap | ||
871 | clr %l6 | ||
872 | |||
873 | __spitfire_data_access_exception: | ||
712 | rdpr %pstate, %g4 | 874 | rdpr %pstate, %g4 |
713 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 875 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
714 | mov TLB_SFSR, %g3 | 876 | mov TLB_SFSR, %g3 |
@@ -722,20 +884,19 @@ __do_data_access_exception: | |||
722 | 109: or %g7, %lo(109b), %g7 | 884 | 109: or %g7, %lo(109b), %g7 |
723 | mov %l4, %o1 | 885 | mov %l4, %o1 |
724 | mov %l5, %o2 | 886 | mov %l5, %o2 |
725 | call data_access_exception | 887 | call spitfire_data_access_exception |
726 | add %sp, PTREGS_OFF, %o0 | 888 | add %sp, PTREGS_OFF, %o0 |
727 | ba,pt %xcc, rtrap | 889 | ba,pt %xcc, rtrap |
728 | clr %l6 | 890 | clr %l6 |
729 | 891 | ||
730 | .globl __do_instruction_access_exception | 892 | .globl __spitfire_insn_access_exception |
731 | .globl __do_instruction_access_exception_tl1 | 893 | .globl __spitfire_insn_access_exception_tl1 |
732 | __do_instruction_access_exception_tl1: | 894 | __spitfire_insn_access_exception_tl1: |
733 | rdpr %pstate, %g4 | 895 | rdpr %pstate, %g4 |
734 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 896 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
735 | mov TLB_SFSR, %g3 | 897 | mov TLB_SFSR, %g3 |
736 | mov DMMU_SFAR, %g5 | 898 | ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR |
737 | ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR | 899 | rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC |
738 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | ||
739 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit | 900 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit |
740 | membar #Sync | 901 | membar #Sync |
741 | sethi %hi(109f), %g7 | 902 | sethi %hi(109f), %g7 |
@@ -743,18 +904,17 @@ __do_instruction_access_exception_tl1: | |||
743 | 109: or %g7, %lo(109b), %g7 | 904 | 109: or %g7, %lo(109b), %g7 |
744 | mov %l4, %o1 | 905 | mov %l4, %o1 |
745 | mov %l5, %o2 | 906 | mov %l5, %o2 |
746 | call instruction_access_exception_tl1 | 907 | call spitfire_insn_access_exception_tl1 |
747 | add %sp, PTREGS_OFF, %o0 | 908 | add %sp, PTREGS_OFF, %o0 |
748 | ba,pt %xcc, rtrap | 909 | ba,pt %xcc, rtrap |
749 | clr %l6 | 910 | clr %l6 |
750 | 911 | ||
751 | __do_instruction_access_exception: | 912 | __spitfire_insn_access_exception: |
752 | rdpr %pstate, %g4 | 913 | rdpr %pstate, %g4 |
753 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 914 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
754 | mov TLB_SFSR, %g3 | 915 | mov TLB_SFSR, %g3 |
755 | mov DMMU_SFAR, %g5 | 916 | ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR |
756 | ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR | 917 | rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC |
757 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | ||
758 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit | 918 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit |
759 | membar #Sync | 919 | membar #Sync |
760 | sethi %hi(109f), %g7 | 920 | sethi %hi(109f), %g7 |
@@ -762,102 +922,11 @@ __do_instruction_access_exception: | |||
762 | 109: or %g7, %lo(109b), %g7 | 922 | 109: or %g7, %lo(109b), %g7 |
763 | mov %l4, %o1 | 923 | mov %l4, %o1 |
764 | mov %l5, %o2 | 924 | mov %l5, %o2 |
765 | call instruction_access_exception | 925 | call spitfire_insn_access_exception |
766 | add %sp, PTREGS_OFF, %o0 | 926 | add %sp, PTREGS_OFF, %o0 |
767 | ba,pt %xcc, rtrap | 927 | ba,pt %xcc, rtrap |
768 | clr %l6 | 928 | clr %l6 |
769 | 929 | ||
770 | /* This is the trap handler entry point for ECC correctable | ||
771 | * errors. They are corrected, but we listen for the trap | ||
772 | * so that the event can be logged. | ||
773 | * | ||
774 | * Disrupting errors are either: | ||
775 | * 1) single-bit ECC errors during UDB reads to system | ||
776 | * memory | ||
777 | * 2) data parity errors during write-back events | ||
778 | * | ||
779 | * As far as I can make out from the manual, the CEE trap | ||
780 | * is only for correctable errors during memory read | ||
781 | * accesses by the front-end of the processor. | ||
782 | * | ||
783 | * The code below is only for trap level 1 CEE events, | ||
784 | * as it is the only situation where we can safely record | ||
785 | * and log. For trap level >1 we just clear the CE bit | ||
786 | * in the AFSR and return. | ||
787 | */ | ||
788 | |||
789 | /* Our trap handling infrastructure allows us to preserve | ||
790 | * two 64-bit values during etrap for arguments to | ||
791 | * subsequent C code. Therefore we encode the information | ||
792 | * as follows: | ||
793 | * | ||
794 | * value 1) Full 64-bits of AFAR | ||
795 | * value 2) Low 33-bits of AFSR, then bits 33-->42 | ||
796 | * are UDBL error status and bits 43-->52 | ||
797 | * are UDBH error status | ||
798 | */ | ||
799 | .align 64 | ||
800 | .globl cee_trap | ||
801 | cee_trap: | ||
802 | ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR | ||
803 | ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR | ||
804 | sllx %g1, 31, %g1 ! Clear reserved bits | ||
805 | srlx %g1, 31, %g1 ! in AFSR | ||
806 | |||
807 | /* NOTE: UltraSparc-I/II have high and low UDB error | ||
808 | * registers, corresponding to the two UDB units | ||
809 | * present on those chips. UltraSparc-IIi only | ||
810 | * has a single UDB, called "SDB" in the manual. | ||
811 | * For IIi the upper UDB register always reads | ||
812 | * as zero so for our purposes things will just | ||
813 | * work with the checks below. | ||
814 | */ | ||
815 | ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status | ||
816 | andcc %g3, (1 << 8), %g4 ! Check CE bit | ||
817 | sllx %g3, (64 - 10), %g3 ! Clear reserved bits | ||
818 | srlx %g3, (64 - 10), %g3 ! in UDB-Low error status | ||
819 | |||
820 | sllx %g3, (33 + 0), %g3 ! Shift up to encoding area | ||
821 | or %g1, %g3, %g1 ! Or it in | ||
822 | be,pn %xcc, 1f ! Branch if CE bit was clear | ||
823 | nop | ||
824 | stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL | ||
825 | membar #Sync ! Synchronize ASI stores | ||
826 | 1: mov 0x18, %g5 ! Addr of UDB-High error status | ||
827 | ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it | ||
828 | |||
829 | andcc %g3, (1 << 8), %g4 ! Check CE bit | ||
830 | sllx %g3, (64 - 10), %g3 ! Clear reserved bits | ||
831 | srlx %g3, (64 - 10), %g3 ! in UDB-High error status | ||
832 | sllx %g3, (33 + 10), %g3 ! Shift up to encoding area | ||
833 | or %g1, %g3, %g1 ! Or it in | ||
834 | be,pn %xcc, 1f ! Branch if CE bit was clear | ||
835 | nop | ||
836 | nop | ||
837 | |||
838 | stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH | ||
839 | membar #Sync ! Synchronize ASI stores | ||
840 | 1: mov 1, %g5 ! AFSR CE bit is | ||
841 | sllx %g5, 20, %g5 ! bit 20 | ||
842 | stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR | ||
843 | membar #Sync ! Synchronize ASI stores | ||
844 | sllx %g2, (64 - 41), %g2 ! Clear reserved bits | ||
845 | srlx %g2, (64 - 41), %g2 ! in latched AFAR | ||
846 | |||
847 | andn %g2, 0x0f, %g2 ! Finish resv bit clearing | ||
848 | mov %g1, %g4 ! Move AFSR+UDB* into save reg | ||
849 | mov %g2, %g5 ! Move AFAR into save reg | ||
850 | rdpr %pil, %g2 | ||
851 | wrpr %g0, 15, %pil | ||
852 | ba,pt %xcc, etrap_irq | ||
853 | rd %pc, %g7 | ||
854 | mov %l4, %o0 | ||
855 | |||
856 | mov %l5, %o1 | ||
857 | call cee_log | ||
858 | add %sp, PTREGS_OFF, %o2 | ||
859 | ba,a,pt %xcc, rtrap_irq | ||
860 | |||
861 | /* Capture I/D/E-cache state into per-cpu error scoreboard. | 930 | /* Capture I/D/E-cache state into per-cpu error scoreboard. |
862 | * | 931 | * |
863 | * %g1: (TL>=0) ? 1 : 0 | 932 | * %g1: (TL>=0) ? 1 : 0 |
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 2803bc7c2c79..425c60cfea19 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -466,7 +466,7 @@ do_flush_sync: | |||
466 | if (!limit) | 466 | if (!limit) |
467 | break; | 467 | break; |
468 | udelay(1); | 468 | udelay(1); |
469 | membar("#LoadLoad"); | 469 | rmb(); |
470 | } | 470 | } |
471 | if (!limit) | 471 | if (!limit) |
472 | printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " | 472 | printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " |
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 07424b075938..66255434128a 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -103,7 +103,7 @@ void cpu_idle(void) | |||
103 | * other cpus see our increasing idleness for the buddy | 103 | * other cpus see our increasing idleness for the buddy |
104 | * redistribution algorithm. -DaveM | 104 | * redistribution algorithm. -DaveM |
105 | */ | 105 | */ |
106 | membar("#StoreStore | #StoreLoad"); | 106 | membar_storeload_storestore(); |
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 89f5e019f24c..e09ddf927655 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -147,7 +147,7 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long | |||
147 | if (!limit) | 147 | if (!limit) |
148 | break; | 148 | break; |
149 | udelay(1); | 149 | udelay(1); |
150 | membar("#LoadLoad"); | 150 | rmb(); |
151 | } | 151 | } |
152 | if (!limit) | 152 | if (!limit) |
153 | printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout " | 153 | printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout " |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index b7e6a91952b2..fbdfed3798d8 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/cpu.h> | 33 | #include <linux/cpu.h> |
34 | #include <linux/initrd.h> | 34 | #include <linux/initrd.h> |
35 | 35 | ||
36 | #include <asm/segment.h> | ||
37 | #include <asm/system.h> | 36 | #include <asm/system.h> |
38 | #include <asm/io.h> | 37 | #include <asm/io.h> |
39 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c index b27934671c35..60f5dfabb1e1 100644 --- a/arch/sparc64/kernel/signal.c +++ b/arch/sparc64/kernel/signal.c | |||
@@ -574,13 +574,12 @@ static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, | |||
574 | { | 574 | { |
575 | setup_rt_frame(ka, regs, signr, oldset, | 575 | setup_rt_frame(ka, regs, signr, oldset, |
576 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); | 576 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); |
577 | if (!(ka->sa.sa_flags & SA_NOMASK)) { | 577 | spin_lock_irq(¤t->sighand->siglock); |
578 | spin_lock_irq(¤t->sighand->siglock); | 578 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
579 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 579 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
580 | sigaddset(¤t->blocked,signr); | 580 | sigaddset(¤t->blocked,signr); |
581 | recalc_sigpending(); | 581 | recalc_sigpending(); |
582 | spin_unlock_irq(¤t->sighand->siglock); | 582 | spin_unlock_irq(¤t->sighand->siglock); |
583 | } | ||
584 | } | 583 | } |
585 | 584 | ||
586 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, | 585 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, |
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c index f28428f4170e..aecccd0df1d1 100644 --- a/arch/sparc64/kernel/signal32.c +++ b/arch/sparc64/kernel/signal32.c | |||
@@ -877,11 +877,12 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
877 | unsigned long page = (unsigned long) | 877 | unsigned long page = (unsigned long) |
878 | page_address(pte_page(*ptep)); | 878 | page_address(pte_page(*ptep)); |
879 | 879 | ||
880 | __asm__ __volatile__( | 880 | wmb(); |
881 | " membar #StoreStore\n" | 881 | __asm__ __volatile__("flush %0 + %1" |
882 | " flush %0 + %1" | 882 | : /* no outputs */ |
883 | : : "r" (page), "r" (address & (PAGE_SIZE - 1)) | 883 | : "r" (page), |
884 | : "memory"); | 884 | "r" (address & (PAGE_SIZE - 1)) |
885 | : "memory"); | ||
885 | } | 886 | } |
886 | pte_unmap(ptep); | 887 | pte_unmap(ptep); |
887 | preempt_enable(); | 888 | preempt_enable(); |
@@ -1292,11 +1293,12 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
1292 | unsigned long page = (unsigned long) | 1293 | unsigned long page = (unsigned long) |
1293 | page_address(pte_page(*ptep)); | 1294 | page_address(pte_page(*ptep)); |
1294 | 1295 | ||
1295 | __asm__ __volatile__( | 1296 | wmb(); |
1296 | " membar #StoreStore\n" | 1297 | __asm__ __volatile__("flush %0 + %1" |
1297 | " flush %0 + %1" | 1298 | : /* no outputs */ |
1298 | : : "r" (page), "r" (address & (PAGE_SIZE - 1)) | 1299 | : "r" (page), |
1299 | : "memory"); | 1300 | "r" (address & (PAGE_SIZE - 1)) |
1301 | : "memory"); | ||
1300 | } | 1302 | } |
1301 | pte_unmap(ptep); | 1303 | pte_unmap(ptep); |
1302 | preempt_enable(); | 1304 | preempt_enable(); |
@@ -1325,13 +1327,12 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, | |||
1325 | else | 1327 | else |
1326 | setup_frame32(&ka->sa, regs, signr, oldset, info); | 1328 | setup_frame32(&ka->sa, regs, signr, oldset, info); |
1327 | } | 1329 | } |
1328 | if (!(ka->sa.sa_flags & SA_NOMASK)) { | 1330 | spin_lock_irq(¤t->sighand->siglock); |
1329 | spin_lock_irq(¤t->sighand->siglock); | 1331 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
1330 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 1332 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
1331 | sigaddset(¤t->blocked,signr); | 1333 | sigaddset(¤t->blocked,signr); |
1332 | recalc_sigpending(); | 1334 | recalc_sigpending(); |
1333 | spin_unlock_irq(¤t->sighand->siglock); | 1335 | spin_unlock_irq(¤t->sighand->siglock); |
1334 | } | ||
1335 | } | 1336 | } |
1336 | 1337 | ||
1337 | static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, | 1338 | static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b9b42491e118..b4fc6a5462b2 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -144,7 +144,7 @@ void __init smp_callin(void) | |||
144 | current->active_mm = &init_mm; | 144 | current->active_mm = &init_mm; |
145 | 145 | ||
146 | while (!cpu_isset(cpuid, smp_commenced_mask)) | 146 | while (!cpu_isset(cpuid, smp_commenced_mask)) |
147 | membar("#LoadLoad"); | 147 | rmb(); |
148 | 148 | ||
149 | cpu_set(cpuid, cpu_online_map); | 149 | cpu_set(cpuid, cpu_online_map); |
150 | } | 150 | } |
@@ -184,11 +184,11 @@ static inline long get_delta (long *rt, long *master) | |||
184 | for (i = 0; i < NUM_ITERS; i++) { | 184 | for (i = 0; i < NUM_ITERS; i++) { |
185 | t0 = tick_ops->get_tick(); | 185 | t0 = tick_ops->get_tick(); |
186 | go[MASTER] = 1; | 186 | go[MASTER] = 1; |
187 | membar("#StoreLoad"); | 187 | membar_storeload(); |
188 | while (!(tm = go[SLAVE])) | 188 | while (!(tm = go[SLAVE])) |
189 | membar("#LoadLoad"); | 189 | rmb(); |
190 | go[SLAVE] = 0; | 190 | go[SLAVE] = 0; |
191 | membar("#StoreStore"); | 191 | wmb(); |
192 | t1 = tick_ops->get_tick(); | 192 | t1 = tick_ops->get_tick(); |
193 | 193 | ||
194 | if (t1 - t0 < best_t1 - best_t0) | 194 | if (t1 - t0 < best_t1 - best_t0) |
@@ -221,7 +221,7 @@ void smp_synchronize_tick_client(void) | |||
221 | go[MASTER] = 1; | 221 | go[MASTER] = 1; |
222 | 222 | ||
223 | while (go[MASTER]) | 223 | while (go[MASTER]) |
224 | membar("#LoadLoad"); | 224 | rmb(); |
225 | 225 | ||
226 | local_irq_save(flags); | 226 | local_irq_save(flags); |
227 | { | 227 | { |
@@ -273,21 +273,21 @@ static void smp_synchronize_one_tick(int cpu) | |||
273 | 273 | ||
274 | /* wait for client to be ready */ | 274 | /* wait for client to be ready */ |
275 | while (!go[MASTER]) | 275 | while (!go[MASTER]) |
276 | membar("#LoadLoad"); | 276 | rmb(); |
277 | 277 | ||
278 | /* now let the client proceed into his loop */ | 278 | /* now let the client proceed into his loop */ |
279 | go[MASTER] = 0; | 279 | go[MASTER] = 0; |
280 | membar("#StoreLoad"); | 280 | membar_storeload(); |
281 | 281 | ||
282 | spin_lock_irqsave(&itc_sync_lock, flags); | 282 | spin_lock_irqsave(&itc_sync_lock, flags); |
283 | { | 283 | { |
284 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | 284 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { |
285 | while (!go[MASTER]) | 285 | while (!go[MASTER]) |
286 | membar("#LoadLoad"); | 286 | rmb(); |
287 | go[MASTER] = 0; | 287 | go[MASTER] = 0; |
288 | membar("#StoreStore"); | 288 | wmb(); |
289 | go[SLAVE] = tick_ops->get_tick(); | 289 | go[SLAVE] = tick_ops->get_tick(); |
290 | membar("#StoreLoad"); | 290 | membar_storeload(); |
291 | } | 291 | } |
292 | } | 292 | } |
293 | spin_unlock_irqrestore(&itc_sync_lock, flags); | 293 | spin_unlock_irqrestore(&itc_sync_lock, flags); |
@@ -927,11 +927,11 @@ void smp_capture(void) | |||
927 | smp_processor_id()); | 927 | smp_processor_id()); |
928 | #endif | 928 | #endif |
929 | penguins_are_doing_time = 1; | 929 | penguins_are_doing_time = 1; |
930 | membar("#StoreStore | #LoadStore"); | 930 | membar_storestore_loadstore(); |
931 | atomic_inc(&smp_capture_registry); | 931 | atomic_inc(&smp_capture_registry); |
932 | smp_cross_call(&xcall_capture, 0, 0, 0); | 932 | smp_cross_call(&xcall_capture, 0, 0, 0); |
933 | while (atomic_read(&smp_capture_registry) != ncpus) | 933 | while (atomic_read(&smp_capture_registry) != ncpus) |
934 | membar("#LoadLoad"); | 934 | rmb(); |
935 | #ifdef CAPTURE_DEBUG | 935 | #ifdef CAPTURE_DEBUG |
936 | printk("done\n"); | 936 | printk("done\n"); |
937 | #endif | 937 | #endif |
@@ -947,7 +947,7 @@ void smp_release(void) | |||
947 | smp_processor_id()); | 947 | smp_processor_id()); |
948 | #endif | 948 | #endif |
949 | penguins_are_doing_time = 0; | 949 | penguins_are_doing_time = 0; |
950 | membar("#StoreStore | #StoreLoad"); | 950 | membar_storeload_storestore(); |
951 | atomic_dec(&smp_capture_registry); | 951 | atomic_dec(&smp_capture_registry); |
952 | } | 952 | } |
953 | } | 953 | } |
@@ -970,9 +970,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) | |||
970 | save_alternate_globals(global_save); | 970 | save_alternate_globals(global_save); |
971 | prom_world(1); | 971 | prom_world(1); |
972 | atomic_inc(&smp_capture_registry); | 972 | atomic_inc(&smp_capture_registry); |
973 | membar("#StoreLoad | #StoreStore"); | 973 | membar_storeload_storestore(); |
974 | while (penguins_are_doing_time) | 974 | while (penguins_are_doing_time) |
975 | membar("#LoadLoad"); | 975 | rmb(); |
976 | restore_alternate_globals(global_save); | 976 | restore_alternate_globals(global_save); |
977 | atomic_dec(&smp_capture_registry); | 977 | atomic_dec(&smp_capture_registry); |
978 | prom_world(0); | 978 | prom_world(0); |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 9202d925a9ce..a3ea697f1adb 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -99,17 +99,6 @@ extern int __ashrdi3(int, int); | |||
99 | extern void dump_thread(struct pt_regs *, struct user *); | 99 | extern void dump_thread(struct pt_regs *, struct user *); |
100 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); | 100 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); |
101 | 101 | ||
102 | #if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK) | ||
103 | extern void _do_spin_lock (spinlock_t *lock, char *str); | ||
104 | extern void _do_spin_unlock (spinlock_t *lock); | ||
105 | extern int _spin_trylock (spinlock_t *lock); | ||
106 | extern void _do_read_lock(rwlock_t *rw, char *str); | ||
107 | extern void _do_read_unlock(rwlock_t *rw, char *str); | ||
108 | extern void _do_write_lock(rwlock_t *rw, char *str); | ||
109 | extern void _do_write_unlock(rwlock_t *rw); | ||
110 | extern int _do_write_trylock(rwlock_t *rw, char *str); | ||
111 | #endif | ||
112 | |||
113 | extern unsigned long phys_base; | 102 | extern unsigned long phys_base; |
114 | extern unsigned long pfn_base; | 103 | extern unsigned long pfn_base; |
115 | 104 | ||
@@ -152,18 +141,6 @@ EXPORT_SYMBOL(_mcount); | |||
152 | EXPORT_SYMBOL(cpu_online_map); | 141 | EXPORT_SYMBOL(cpu_online_map); |
153 | EXPORT_SYMBOL(phys_cpu_present_map); | 142 | EXPORT_SYMBOL(phys_cpu_present_map); |
154 | 143 | ||
155 | /* Spinlock debugging library, optional. */ | ||
156 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
157 | EXPORT_SYMBOL(_do_spin_lock); | ||
158 | EXPORT_SYMBOL(_do_spin_unlock); | ||
159 | EXPORT_SYMBOL(_spin_trylock); | ||
160 | EXPORT_SYMBOL(_do_read_lock); | ||
161 | EXPORT_SYMBOL(_do_read_unlock); | ||
162 | EXPORT_SYMBOL(_do_write_lock); | ||
163 | EXPORT_SYMBOL(_do_write_unlock); | ||
164 | EXPORT_SYMBOL(_do_write_trylock); | ||
165 | #endif | ||
166 | |||
167 | EXPORT_SYMBOL(smp_call_function); | 144 | EXPORT_SYMBOL(smp_call_function); |
168 | #endif /* CONFIG_SMP */ | 145 | #endif /* CONFIG_SMP */ |
169 | 146 | ||
@@ -429,3 +406,12 @@ EXPORT_SYMBOL(xor_vis_4); | |||
429 | EXPORT_SYMBOL(xor_vis_5); | 406 | EXPORT_SYMBOL(xor_vis_5); |
430 | 407 | ||
431 | EXPORT_SYMBOL(prom_palette); | 408 | EXPORT_SYMBOL(prom_palette); |
409 | |||
410 | /* memory barriers */ | ||
411 | EXPORT_SYMBOL(mb); | ||
412 | EXPORT_SYMBOL(rmb); | ||
413 | EXPORT_SYMBOL(wmb); | ||
414 | EXPORT_SYMBOL(membar_storeload); | ||
415 | EXPORT_SYMBOL(membar_storeload_storestore); | ||
416 | EXPORT_SYMBOL(membar_storeload_loadload); | ||
417 | EXPORT_SYMBOL(membar_storestore_loadstore); | ||
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 0c9e54b2f0c8..b280b2ef674f 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/dcu.h> | 33 | #include <asm/dcu.h> |
34 | #include <asm/estate.h> | 34 | #include <asm/estate.h> |
35 | #include <asm/chafsr.h> | 35 | #include <asm/chafsr.h> |
36 | #include <asm/sfafsr.h> | ||
36 | #include <asm/psrcompat.h> | 37 | #include <asm/psrcompat.h> |
37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
38 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
@@ -143,8 +144,7 @@ void do_BUG(const char *file, int line) | |||
143 | } | 144 | } |
144 | #endif | 145 | #endif |
145 | 146 | ||
146 | void instruction_access_exception(struct pt_regs *regs, | 147 | void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
147 | unsigned long sfsr, unsigned long sfar) | ||
148 | { | 148 | { |
149 | siginfo_t info; | 149 | siginfo_t info; |
150 | 150 | ||
@@ -153,8 +153,8 @@ void instruction_access_exception(struct pt_regs *regs, | |||
153 | return; | 153 | return; |
154 | 154 | ||
155 | if (regs->tstate & TSTATE_PRIV) { | 155 | if (regs->tstate & TSTATE_PRIV) { |
156 | printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", | 156 | printk("spitfire_insn_access_exception: SFSR[%016lx] " |
157 | sfsr, sfar); | 157 | "SFAR[%016lx], going.\n", sfsr, sfar); |
158 | die_if_kernel("Iax", regs); | 158 | die_if_kernel("Iax", regs); |
159 | } | 159 | } |
160 | if (test_thread_flag(TIF_32BIT)) { | 160 | if (test_thread_flag(TIF_32BIT)) { |
@@ -169,19 +169,17 @@ void instruction_access_exception(struct pt_regs *regs, | |||
169 | force_sig_info(SIGSEGV, &info, current); | 169 | force_sig_info(SIGSEGV, &info, current); |
170 | } | 170 | } |
171 | 171 | ||
172 | void instruction_access_exception_tl1(struct pt_regs *regs, | 172 | void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
173 | unsigned long sfsr, unsigned long sfar) | ||
174 | { | 173 | { |
175 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, | 174 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, |
176 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | 175 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) |
177 | return; | 176 | return; |
178 | 177 | ||
179 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | 178 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); |
180 | instruction_access_exception(regs, sfsr, sfar); | 179 | spitfire_insn_access_exception(regs, sfsr, sfar); |
181 | } | 180 | } |
182 | 181 | ||
183 | void data_access_exception(struct pt_regs *regs, | 182 | void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
184 | unsigned long sfsr, unsigned long sfar) | ||
185 | { | 183 | { |
186 | siginfo_t info; | 184 | siginfo_t info; |
187 | 185 | ||
@@ -207,8 +205,8 @@ void data_access_exception(struct pt_regs *regs, | |||
207 | return; | 205 | return; |
208 | } | 206 | } |
209 | /* Shit... */ | 207 | /* Shit... */ |
210 | printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", | 208 | printk("spitfire_data_access_exception: SFSR[%016lx] " |
211 | sfsr, sfar); | 209 | "SFAR[%016lx], going.\n", sfsr, sfar); |
212 | die_if_kernel("Dax", regs); | 210 | die_if_kernel("Dax", regs); |
213 | } | 211 | } |
214 | 212 | ||
@@ -220,6 +218,16 @@ void data_access_exception(struct pt_regs *regs, | |||
220 | force_sig_info(SIGSEGV, &info, current); | 218 | force_sig_info(SIGSEGV, &info, current); |
221 | } | 219 | } |
222 | 220 | ||
221 | void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | ||
222 | { | ||
223 | if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, | ||
224 | 0, 0x30, SIGTRAP) == NOTIFY_STOP) | ||
225 | return; | ||
226 | |||
227 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
228 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
229 | } | ||
230 | |||
223 | #ifdef CONFIG_PCI | 231 | #ifdef CONFIG_PCI |
224 | /* This is really pathetic... */ | 232 | /* This is really pathetic... */ |
225 | extern volatile int pci_poke_in_progress; | 233 | extern volatile int pci_poke_in_progress; |
@@ -253,54 +261,13 @@ static void spitfire_clean_and_reenable_l1_caches(void) | |||
253 | : "memory"); | 261 | : "memory"); |
254 | } | 262 | } |
255 | 263 | ||
256 | void do_iae(struct pt_regs *regs) | 264 | static void spitfire_enable_estate_errors(void) |
257 | { | 265 | { |
258 | siginfo_t info; | 266 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" |
259 | 267 | "membar #Sync" | |
260 | spitfire_clean_and_reenable_l1_caches(); | 268 | : /* no outputs */ |
261 | 269 | : "r" (ESTATE_ERR_ALL), | |
262 | if (notify_die(DIE_TRAP, "instruction access exception", regs, | 270 | "i" (ASI_ESTATE_ERROR_EN)); |
263 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
264 | return; | ||
265 | |||
266 | info.si_signo = SIGBUS; | ||
267 | info.si_errno = 0; | ||
268 | info.si_code = BUS_OBJERR; | ||
269 | info.si_addr = (void *)0; | ||
270 | info.si_trapno = 0; | ||
271 | force_sig_info(SIGBUS, &info, current); | ||
272 | } | ||
273 | |||
274 | void do_dae(struct pt_regs *regs) | ||
275 | { | ||
276 | siginfo_t info; | ||
277 | |||
278 | #ifdef CONFIG_PCI | ||
279 | if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { | ||
280 | spitfire_clean_and_reenable_l1_caches(); | ||
281 | |||
282 | pci_poke_faulted = 1; | ||
283 | |||
284 | /* Why the fuck did they have to change this? */ | ||
285 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
286 | regs->tpc += 4; | ||
287 | |||
288 | regs->tnpc = regs->tpc + 4; | ||
289 | return; | ||
290 | } | ||
291 | #endif | ||
292 | spitfire_clean_and_reenable_l1_caches(); | ||
293 | |||
294 | if (notify_die(DIE_TRAP, "data access exception", regs, | ||
295 | 0, 0x30, SIGTRAP) == NOTIFY_STOP) | ||
296 | return; | ||
297 | |||
298 | info.si_signo = SIGBUS; | ||
299 | info.si_errno = 0; | ||
300 | info.si_code = BUS_OBJERR; | ||
301 | info.si_addr = (void *)0; | ||
302 | info.si_trapno = 0; | ||
303 | force_sig_info(SIGBUS, &info, current); | ||
304 | } | 271 | } |
305 | 272 | ||
306 | static char ecc_syndrome_table[] = { | 273 | static char ecc_syndrome_table[] = { |
@@ -338,65 +305,15 @@ static char ecc_syndrome_table[] = { | |||
338 | 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a | 305 | 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a |
339 | }; | 306 | }; |
340 | 307 | ||
341 | /* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status | ||
342 | * in the following format. The AFAR is left as is, with | ||
343 | * reserved bits cleared, and is a raw 40-bit physical | ||
344 | * address. | ||
345 | */ | ||
346 | #define CE_STATUS_UDBH_UE (1UL << (43 + 9)) | ||
347 | #define CE_STATUS_UDBH_CE (1UL << (43 + 8)) | ||
348 | #define CE_STATUS_UDBH_ESYNDR (0xffUL << 43) | ||
349 | #define CE_STATUS_UDBH_SHIFT 43 | ||
350 | #define CE_STATUS_UDBL_UE (1UL << (33 + 9)) | ||
351 | #define CE_STATUS_UDBL_CE (1UL << (33 + 8)) | ||
352 | #define CE_STATUS_UDBL_ESYNDR (0xffUL << 33) | ||
353 | #define CE_STATUS_UDBL_SHIFT 33 | ||
354 | #define CE_STATUS_AFSR_MASK (0x1ffffffffUL) | ||
355 | #define CE_STATUS_AFSR_ME (1UL << 32) | ||
356 | #define CE_STATUS_AFSR_PRIV (1UL << 31) | ||
357 | #define CE_STATUS_AFSR_ISAP (1UL << 30) | ||
358 | #define CE_STATUS_AFSR_ETP (1UL << 29) | ||
359 | #define CE_STATUS_AFSR_IVUE (1UL << 28) | ||
360 | #define CE_STATUS_AFSR_TO (1UL << 27) | ||
361 | #define CE_STATUS_AFSR_BERR (1UL << 26) | ||
362 | #define CE_STATUS_AFSR_LDP (1UL << 25) | ||
363 | #define CE_STATUS_AFSR_CP (1UL << 24) | ||
364 | #define CE_STATUS_AFSR_WP (1UL << 23) | ||
365 | #define CE_STATUS_AFSR_EDP (1UL << 22) | ||
366 | #define CE_STATUS_AFSR_UE (1UL << 21) | ||
367 | #define CE_STATUS_AFSR_CE (1UL << 20) | ||
368 | #define CE_STATUS_AFSR_ETS (0xfUL << 16) | ||
369 | #define CE_STATUS_AFSR_ETS_SHIFT 16 | ||
370 | #define CE_STATUS_AFSR_PSYND (0xffffUL << 0) | ||
371 | #define CE_STATUS_AFSR_PSYND_SHIFT 0 | ||
372 | |||
373 | /* Layout of Ecache TAG Parity Syndrome of AFSR */ | ||
374 | #define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */ | ||
375 | #define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */ | ||
376 | #define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */ | ||
377 | #define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */ | ||
378 | |||
379 | static char *syndrome_unknown = "<Unknown>"; | 308 | static char *syndrome_unknown = "<Unknown>"; |
380 | 309 | ||
381 | asmlinkage void cee_log(unsigned long ce_status, | 310 | static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit) |
382 | unsigned long afar, | ||
383 | struct pt_regs *regs) | ||
384 | { | 311 | { |
385 | char memmod_str[64]; | 312 | unsigned short scode; |
386 | char *p; | 313 | char memmod_str[64], *p; |
387 | unsigned short scode, udb_reg; | ||
388 | 314 | ||
389 | printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " | 315 | if (udbl & bit) { |
390 | "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n", | 316 | scode = ecc_syndrome_table[udbl & 0xff]; |
391 | smp_processor_id(), | ||
392 | (ce_status & CE_STATUS_AFSR_MASK), | ||
393 | afar, | ||
394 | ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL), | ||
395 | ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL)); | ||
396 | |||
397 | udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL); | ||
398 | if (udb_reg & (1 << 8)) { | ||
399 | scode = ecc_syndrome_table[udb_reg & 0xff]; | ||
400 | if (prom_getunumber(scode, afar, | 317 | if (prom_getunumber(scode, afar, |
401 | memmod_str, sizeof(memmod_str)) == -1) | 318 | memmod_str, sizeof(memmod_str)) == -1) |
402 | p = syndrome_unknown; | 319 | p = syndrome_unknown; |
@@ -407,9 +324,8 @@ asmlinkage void cee_log(unsigned long ce_status, | |||
407 | smp_processor_id(), scode, p); | 324 | smp_processor_id(), scode, p); |
408 | } | 325 | } |
409 | 326 | ||
410 | udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL); | 327 | if (udbh & bit) { |
411 | if (udb_reg & (1 << 8)) { | 328 | scode = ecc_syndrome_table[udbh & 0xff]; |
412 | scode = ecc_syndrome_table[udb_reg & 0xff]; | ||
413 | if (prom_getunumber(scode, afar, | 329 | if (prom_getunumber(scode, afar, |
414 | memmod_str, sizeof(memmod_str)) == -1) | 330 | memmod_str, sizeof(memmod_str)) == -1) |
415 | p = syndrome_unknown; | 331 | p = syndrome_unknown; |
@@ -419,6 +335,127 @@ asmlinkage void cee_log(unsigned long ce_status, | |||
419 | "Memory Module \"%s\"\n", | 335 | "Memory Module \"%s\"\n", |
420 | smp_processor_id(), scode, p); | 336 | smp_processor_id(), scode, p); |
421 | } | 337 | } |
338 | |||
339 | } | ||
340 | |||
341 | static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs) | ||
342 | { | ||
343 | |||
344 | printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " | ||
345 | "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n", | ||
346 | smp_processor_id(), afsr, afar, udbl, udbh, tl1); | ||
347 | |||
348 | spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE); | ||
349 | |||
350 | /* We always log it, even if someone is listening for this | ||
351 | * trap. | ||
352 | */ | ||
353 | notify_die(DIE_TRAP, "Correctable ECC Error", regs, | ||
354 | 0, TRAP_TYPE_CEE, SIGTRAP); | ||
355 | |||
356 | /* The Correctable ECC Error trap does not disable I/D caches. So | ||
357 | * we only have to restore the ESTATE Error Enable register. | ||
358 | */ | ||
359 | spitfire_enable_estate_errors(); | ||
360 | } | ||
361 | |||
362 | static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) | ||
363 | { | ||
364 | siginfo_t info; | ||
365 | |||
366 | printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " | ||
367 | "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", | ||
368 | smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); | ||
369 | |||
370 | /* XXX add more human friendly logging of the error status | ||
371 | * XXX as is implemented for cheetah | ||
372 | */ | ||
373 | |||
374 | spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE); | ||
375 | |||
376 | /* We always log it, even if someone is listening for this | ||
377 | * trap. | ||
378 | */ | ||
379 | notify_die(DIE_TRAP, "Uncorrectable Error", regs, | ||
380 | 0, tt, SIGTRAP); | ||
381 | |||
382 | if (regs->tstate & TSTATE_PRIV) { | ||
383 | if (tl1) | ||
384 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
385 | die_if_kernel("UE", regs); | ||
386 | } | ||
387 | |||
388 | /* XXX need more intelligent processing here, such as is implemented | ||
389 | * XXX for cheetah errors, in fact if the E-cache still holds the | ||
390 | * XXX line with bad parity this will loop | ||
391 | */ | ||
392 | |||
393 | spitfire_clean_and_reenable_l1_caches(); | ||
394 | spitfire_enable_estate_errors(); | ||
395 | |||
396 | if (test_thread_flag(TIF_32BIT)) { | ||
397 | regs->tpc &= 0xffffffff; | ||
398 | regs->tnpc &= 0xffffffff; | ||
399 | } | ||
400 | info.si_signo = SIGBUS; | ||
401 | info.si_errno = 0; | ||
402 | info.si_code = BUS_OBJERR; | ||
403 | info.si_addr = (void *)0; | ||
404 | info.si_trapno = 0; | ||
405 | force_sig_info(SIGBUS, &info, current); | ||
406 | } | ||
407 | |||
408 | void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) | ||
409 | { | ||
410 | unsigned long afsr, tt, udbh, udbl; | ||
411 | int tl1; | ||
412 | |||
413 | afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT; | ||
414 | tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT; | ||
415 | tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0; | ||
416 | udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; | ||
417 | udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; | ||
418 | |||
419 | #ifdef CONFIG_PCI | ||
420 | if (tt == TRAP_TYPE_DAE && | ||
421 | pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { | ||
422 | spitfire_clean_and_reenable_l1_caches(); | ||
423 | spitfire_enable_estate_errors(); | ||
424 | |||
425 | pci_poke_faulted = 1; | ||
426 | regs->tnpc = regs->tpc + 4; | ||
427 | return; | ||
428 | } | ||
429 | #endif | ||
430 | |||
431 | if (afsr & SFAFSR_UE) | ||
432 | spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs); | ||
433 | |||
434 | if (tt == TRAP_TYPE_CEE) { | ||
435 | /* Handle the case where we took a CEE trap, but ACK'd | ||
436 | * only the UE state in the UDB error registers. | ||
437 | */ | ||
438 | if (afsr & SFAFSR_UE) { | ||
439 | if (udbh & UDBE_CE) { | ||
440 | __asm__ __volatile__( | ||
441 | "stxa %0, [%1] %2\n\t" | ||
442 | "membar #Sync" | ||
443 | : /* no outputs */ | ||
444 | : "r" (udbh & UDBE_CE), | ||
445 | "r" (0x0), "i" (ASI_UDB_ERROR_W)); | ||
446 | } | ||
447 | if (udbl & UDBE_CE) { | ||
448 | __asm__ __volatile__( | ||
449 | "stxa %0, [%1] %2\n\t" | ||
450 | "membar #Sync" | ||
451 | : /* no outputs */ | ||
452 | : "r" (udbl & UDBE_CE), | ||
453 | "r" (0x18), "i" (ASI_UDB_ERROR_W)); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs); | ||
458 | } | ||
422 | } | 459 | } |
423 | 460 | ||
424 | int cheetah_pcache_forced_on; | 461 | int cheetah_pcache_forced_on; |
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 491bb3681f9d..8365bc1f81f3 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S | |||
@@ -18,9 +18,10 @@ sparc64_ttable_tl0: | |||
18 | tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) | 18 | tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) |
19 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) | 19 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) |
20 | tl0_iax: membar #Sync | 20 | tl0_iax: membar #Sync |
21 | TRAP_NOSAVE_7INSNS(__do_instruction_access_exception) | 21 | TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) |
22 | tl0_resv009: BTRAP(0x9) | 22 | tl0_resv009: BTRAP(0x9) |
23 | tl0_iae: TRAP(do_iae) | 23 | tl0_iae: membar #Sync |
24 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
24 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) | 25 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) |
25 | tl0_ill: membar #Sync | 26 | tl0_ill: membar #Sync |
26 | TRAP_7INSNS(do_illegal_instruction) | 27 | TRAP_7INSNS(do_illegal_instruction) |
@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW | |||
36 | tl0_div0: TRAP(do_div0) | 37 | tl0_div0: TRAP(do_div0) |
37 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) | 38 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) |
38 | tl0_resv02f: BTRAP(0x2f) | 39 | tl0_resv02f: BTRAP(0x2f) |
39 | tl0_dax: TRAP_NOSAVE(__do_data_access_exception) | 40 | tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) |
40 | tl0_resv031: BTRAP(0x31) | 41 | tl0_resv031: BTRAP(0x31) |
41 | tl0_dae: TRAP(do_dae) | 42 | tl0_dae: membar #Sync |
43 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
42 | tl0_resv033: BTRAP(0x33) | 44 | tl0_resv033: BTRAP(0x33) |
43 | tl0_mna: TRAP_NOSAVE(do_mna) | 45 | tl0_mna: TRAP_NOSAVE(do_mna) |
44 | tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) | 46 | tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) |
@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f) | |||
73 | tl0_ivec: TRAP_IVEC | 75 | tl0_ivec: TRAP_IVEC |
74 | tl0_paw: TRAP(do_paw) | 76 | tl0_paw: TRAP(do_paw) |
75 | tl0_vaw: TRAP(do_vaw) | 77 | tl0_vaw: TRAP(do_vaw) |
76 | tl0_cee: TRAP_NOSAVE(cee_trap) | 78 | tl0_cee: membar #Sync |
79 | TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) | ||
77 | tl0_iamiss: | 80 | tl0_iamiss: |
78 | #include "itlb_base.S" | 81 | #include "itlb_base.S" |
79 | tl0_damiss: | 82 | tl0_damiss: |
@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8) | |||
175 | sparc64_ttable_tl1: | 178 | sparc64_ttable_tl1: |
176 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) | 179 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) |
177 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) | 180 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) |
178 | tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1) | 181 | tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) |
179 | tl1_resv009: BTRAPTL1(0x9) | 182 | tl1_resv009: BTRAPTL1(0x9) |
180 | tl1_iae: TRAPTL1(do_iae_tl1) | 183 | tl1_iae: membar #Sync |
184 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
181 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) | 185 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) |
182 | tl1_ill: TRAPTL1(do_ill_tl1) | 186 | tl1_ill: TRAPTL1(do_ill_tl1) |
183 | tl1_privop: BTRAPTL1(0x11) | 187 | tl1_privop: BTRAPTL1(0x11) |
@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW | |||
193 | tl1_div0: TRAPTL1(do_div0_tl1) | 197 | tl1_div0: TRAPTL1(do_div0_tl1) |
194 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) | 198 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) |
195 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) | 199 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) |
196 | tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1) | 200 | tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) |
197 | tl1_resv031: BTRAPTL1(0x31) | 201 | tl1_resv031: BTRAPTL1(0x31) |
198 | tl1_dae: TRAPTL1(do_dae_tl1) | 202 | tl1_dae: membar #Sync |
203 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
199 | tl1_resv033: BTRAPTL1(0x33) | 204 | tl1_resv033: BTRAPTL1(0x33) |
200 | tl1_mna: TRAP_NOSAVE(do_mna) | 205 | tl1_mna: TRAP_NOSAVE(do_mna) |
201 | tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) | 206 | tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) |
@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1) | |||
219 | tl1_vaw: TRAPTL1(do_vaw_tl1) | 224 | tl1_vaw: TRAPTL1(do_vaw_tl1) |
220 | 225 | ||
221 | /* The grotty trick to save %g1 into current->thread.cee_stuff | 226 | /* The grotty trick to save %g1 into current->thread.cee_stuff |
222 | * is because when we take this trap we could be interrupting trap | 227 | * is because when we take this trap we could be interrupting |
223 | * code already using the trap alternate global registers. | 228 | * trap code already using the trap alternate global registers. |
224 | * | 229 | * |
225 | * We cross our fingers and pray that this store/load does | 230 | * We cross our fingers and pray that this store/load does |
226 | * not cause yet another CEE trap. | 231 | * not cause yet another CEE trap. |
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 11c3e88732e4..da9739f0d437 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c | |||
@@ -349,9 +349,9 @@ int handle_popc(u32 insn, struct pt_regs *regs) | |||
349 | 349 | ||
350 | extern void do_fpother(struct pt_regs *regs); | 350 | extern void do_fpother(struct pt_regs *regs); |
351 | extern void do_privact(struct pt_regs *regs); | 351 | extern void do_privact(struct pt_regs *regs); |
352 | extern void data_access_exception(struct pt_regs *regs, | 352 | extern void spitfire_data_access_exception(struct pt_regs *regs, |
353 | unsigned long sfsr, | 353 | unsigned long sfsr, |
354 | unsigned long sfar); | 354 | unsigned long sfar); |
355 | 355 | ||
356 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) | 356 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) |
357 | { | 357 | { |
@@ -394,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
394 | break; | 394 | break; |
395 | } | 395 | } |
396 | default: | 396 | default: |
397 | data_access_exception(regs, 0, addr); | 397 | spitfire_data_access_exception(regs, 0, addr); |
398 | return 1; | 398 | return 1; |
399 | } | 399 | } |
400 | if (put_user (first >> 32, (u32 __user *)addr) || | 400 | if (put_user (first >> 32, (u32 __user *)addr) || |
401 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || | 401 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || |
402 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || | 402 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || |
403 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { | 403 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { |
404 | data_access_exception(regs, 0, addr); | 404 | spitfire_data_access_exception(regs, 0, addr); |
405 | return 1; | 405 | return 1; |
406 | } | 406 | } |
407 | } else { | 407 | } else { |
@@ -414,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
414 | do_privact(regs); | 414 | do_privact(regs); |
415 | return 1; | 415 | return 1; |
416 | } else if (asi > ASI_SNFL) { | 416 | } else if (asi > ASI_SNFL) { |
417 | data_access_exception(regs, 0, addr); | 417 | spitfire_data_access_exception(regs, 0, addr); |
418 | return 1; | 418 | return 1; |
419 | } | 419 | } |
420 | switch (insn & 0x180000) { | 420 | switch (insn & 0x180000) { |
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
431 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); | 431 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); |
432 | } | 432 | } |
433 | if (err && !(asi & 0x2 /* NF */)) { | 433 | if (err && !(asi & 0x2 /* NF */)) { |
434 | data_access_exception(regs, 0, addr); | 434 | spitfire_data_access_exception(regs, 0, addr); |
435 | return 1; | 435 | return 1; |
436 | } | 436 | } |
437 | if (asi & 0x8) /* Little */ { | 437 | if (asi & 0x8) /* Little */ { |
@@ -534,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
534 | *(u64 *)(f->regs + freg) = value; | 534 | *(u64 *)(f->regs + freg) = value; |
535 | current_thread_info()->fpsaved[0] |= flag; | 535 | current_thread_info()->fpsaved[0] |= flag; |
536 | } else { | 536 | } else { |
537 | daex: data_access_exception(regs, sfsr, sfar); | 537 | daex: spitfire_data_access_exception(regs, sfsr, sfar); |
538 | return; | 538 | return; |
539 | } | 539 | } |
540 | advance(regs); | 540 | advance(regs); |
@@ -578,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
578 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) | 578 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) |
579 | goto daex; | 579 | goto daex; |
580 | } else { | 580 | } else { |
581 | daex: data_access_exception(regs, sfsr, sfar); | 581 | daex: spitfire_data_access_exception(regs, sfsr, sfar); |
582 | return; | 582 | return; |
583 | } | 583 | } |
584 | advance(regs); | 584 | advance(regs); |
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index dfbc7e0dcf70..99c809a1e5ac 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S | |||
@@ -318,7 +318,7 @@ fill_fixup_dax: | |||
318 | nop | 318 | nop |
319 | rdpr %pstate, %l1 ! Prepare to change globals. | 319 | rdpr %pstate, %l1 ! Prepare to change globals. |
320 | mov %g4, %o1 ! Setup args for | 320 | mov %g4, %o1 ! Setup args for |
321 | mov %g5, %o2 ! final call to data_access_exception. | 321 | mov %g5, %o2 ! final call to spitfire_data_access_exception. |
322 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | 322 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO |
323 | 323 | ||
324 | mov %g6, %o7 ! Stash away current. | 324 | mov %g6, %o7 ! Stash away current. |
@@ -330,7 +330,7 @@ fill_fixup_dax: | |||
330 | mov TSB_REG, %g1 | 330 | mov TSB_REG, %g1 |
331 | ldxa [%g1] ASI_IMMU, %g5 | 331 | ldxa [%g1] ASI_IMMU, %g5 |
332 | #endif | 332 | #endif |
333 | call data_access_exception | 333 | call spitfire_data_access_exception |
334 | add %sp, PTREGS_OFF, %o0 | 334 | add %sp, PTREGS_OFF, %o0 |
335 | 335 | ||
336 | b,pt %xcc, rtrap | 336 | b,pt %xcc, rtrap |
@@ -391,7 +391,7 @@ window_dax_from_user_common: | |||
391 | 109: or %g7, %lo(109b), %g7 | 391 | 109: or %g7, %lo(109b), %g7 |
392 | mov %l4, %o1 | 392 | mov %l4, %o1 |
393 | mov %l5, %o2 | 393 | mov %l5, %o2 |
394 | call data_access_exception | 394 | call spitfire_data_access_exception |
395 | add %sp, PTREGS_OFF, %o0 | 395 | add %sp, PTREGS_OFF, %o0 |
396 | ba,pt %xcc, rtrap | 396 | ba,pt %xcc, rtrap |
397 | clr %l6 | 397 | clr %l6 |
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 40dbeec7e5d6..6201f1040982 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile | |||
@@ -12,7 +12,7 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ | |||
12 | U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ | 12 | U1memcpy.o U1copy_from_user.o U1copy_to_user.o \ |
13 | U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ | 13 | U3memcpy.o U3copy_from_user.o U3copy_to_user.o U3patch.o \ |
14 | copy_in_user.o user_fixup.o memmove.o \ | 14 | copy_in_user.o user_fixup.o memmove.o \ |
15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o | 15 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o mb.o |
16 | 16 | ||
17 | lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o | 17 | lib-$(CONFIG_DEBUG_SPINLOCK) += debuglocks.o |
18 | lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o | 18 | lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o |
diff --git a/arch/sparc64/lib/debuglocks.c b/arch/sparc64/lib/debuglocks.c index f03344cf784e..f5f0b5586f01 100644 --- a/arch/sparc64/lib/debuglocks.c +++ b/arch/sparc64/lib/debuglocks.c | |||
@@ -12,8 +12,6 @@ | |||
12 | 12 | ||
13 | #ifdef CONFIG_SMP | 13 | #ifdef CONFIG_SMP |
14 | 14 | ||
15 | #define GET_CALLER(PC) __asm__ __volatile__("mov %%i7, %0" : "=r" (PC)) | ||
16 | |||
17 | static inline void show (char *str, spinlock_t *lock, unsigned long caller) | 15 | static inline void show (char *str, spinlock_t *lock, unsigned long caller) |
18 | { | 16 | { |
19 | int cpu = smp_processor_id(); | 17 | int cpu = smp_processor_id(); |
@@ -51,20 +49,19 @@ static inline void show_write (char *str, rwlock_t *lock, unsigned long caller) | |||
51 | #undef INIT_STUCK | 49 | #undef INIT_STUCK |
52 | #define INIT_STUCK 100000000 | 50 | #define INIT_STUCK 100000000 |
53 | 51 | ||
54 | void _do_spin_lock(spinlock_t *lock, char *str) | 52 | void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller) |
55 | { | 53 | { |
56 | unsigned long caller, val; | 54 | unsigned long val; |
57 | int stuck = INIT_STUCK; | 55 | int stuck = INIT_STUCK; |
58 | int cpu = get_cpu(); | 56 | int cpu = get_cpu(); |
59 | int shown = 0; | 57 | int shown = 0; |
60 | 58 | ||
61 | GET_CALLER(caller); | ||
62 | again: | 59 | again: |
63 | __asm__ __volatile__("ldstub [%1], %0" | 60 | __asm__ __volatile__("ldstub [%1], %0" |
64 | : "=r" (val) | 61 | : "=r" (val) |
65 | : "r" (&(lock->lock)) | 62 | : "r" (&(lock->lock)) |
66 | : "memory"); | 63 | : "memory"); |
67 | membar("#StoreLoad | #StoreStore"); | 64 | membar_storeload_storestore(); |
68 | if (val) { | 65 | if (val) { |
69 | while (lock->lock) { | 66 | while (lock->lock) { |
70 | if (!--stuck) { | 67 | if (!--stuck) { |
@@ -72,7 +69,7 @@ again: | |||
72 | show(str, lock, caller); | 69 | show(str, lock, caller); |
73 | stuck = INIT_STUCK; | 70 | stuck = INIT_STUCK; |
74 | } | 71 | } |
75 | membar("#LoadLoad"); | 72 | rmb(); |
76 | } | 73 | } |
77 | goto again; | 74 | goto again; |
78 | } | 75 | } |
@@ -84,17 +81,16 @@ again: | |||
84 | put_cpu(); | 81 | put_cpu(); |
85 | } | 82 | } |
86 | 83 | ||
87 | int _do_spin_trylock(spinlock_t *lock) | 84 | int _do_spin_trylock(spinlock_t *lock, unsigned long caller) |
88 | { | 85 | { |
89 | unsigned long val, caller; | 86 | unsigned long val; |
90 | int cpu = get_cpu(); | 87 | int cpu = get_cpu(); |
91 | 88 | ||
92 | GET_CALLER(caller); | ||
93 | __asm__ __volatile__("ldstub [%1], %0" | 89 | __asm__ __volatile__("ldstub [%1], %0" |
94 | : "=r" (val) | 90 | : "=r" (val) |
95 | : "r" (&(lock->lock)) | 91 | : "r" (&(lock->lock)) |
96 | : "memory"); | 92 | : "memory"); |
97 | membar("#StoreLoad | #StoreStore"); | 93 | membar_storeload_storestore(); |
98 | if (!val) { | 94 | if (!val) { |
99 | lock->owner_pc = ((unsigned int)caller); | 95 | lock->owner_pc = ((unsigned int)caller); |
100 | lock->owner_cpu = cpu; | 96 | lock->owner_cpu = cpu; |
@@ -111,21 +107,20 @@ void _do_spin_unlock(spinlock_t *lock) | |||
111 | { | 107 | { |
112 | lock->owner_pc = 0; | 108 | lock->owner_pc = 0; |
113 | lock->owner_cpu = NO_PROC_ID; | 109 | lock->owner_cpu = NO_PROC_ID; |
114 | membar("#StoreStore | #LoadStore"); | 110 | membar_storestore_loadstore(); |
115 | lock->lock = 0; | 111 | lock->lock = 0; |
116 | current->thread.smp_lock_count--; | 112 | current->thread.smp_lock_count--; |
117 | } | 113 | } |
118 | 114 | ||
119 | /* Keep INIT_STUCK the same... */ | 115 | /* Keep INIT_STUCK the same... */ |
120 | 116 | ||
121 | void _do_read_lock (rwlock_t *rw, char *str) | 117 | void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller) |
122 | { | 118 | { |
123 | unsigned long caller, val; | 119 | unsigned long val; |
124 | int stuck = INIT_STUCK; | 120 | int stuck = INIT_STUCK; |
125 | int cpu = get_cpu(); | 121 | int cpu = get_cpu(); |
126 | int shown = 0; | 122 | int shown = 0; |
127 | 123 | ||
128 | GET_CALLER(caller); | ||
129 | wlock_again: | 124 | wlock_again: |
130 | /* Wait for any writer to go away. */ | 125 | /* Wait for any writer to go away. */ |
131 | while (((long)(rw->lock)) < 0) { | 126 | while (((long)(rw->lock)) < 0) { |
@@ -134,7 +129,7 @@ wlock_again: | |||
134 | show_read(str, rw, caller); | 129 | show_read(str, rw, caller); |
135 | stuck = INIT_STUCK; | 130 | stuck = INIT_STUCK; |
136 | } | 131 | } |
137 | membar("#LoadLoad"); | 132 | rmb(); |
138 | } | 133 | } |
139 | /* Try once to increment the counter. */ | 134 | /* Try once to increment the counter. */ |
140 | __asm__ __volatile__( | 135 | __asm__ __volatile__( |
@@ -147,7 +142,7 @@ wlock_again: | |||
147 | "2:" : "=r" (val) | 142 | "2:" : "=r" (val) |
148 | : "0" (&(rw->lock)) | 143 | : "0" (&(rw->lock)) |
149 | : "g1", "g7", "memory"); | 144 | : "g1", "g7", "memory"); |
150 | membar("#StoreLoad | #StoreStore"); | 145 | membar_storeload_storestore(); |
151 | if (val) | 146 | if (val) |
152 | goto wlock_again; | 147 | goto wlock_again; |
153 | rw->reader_pc[cpu] = ((unsigned int)caller); | 148 | rw->reader_pc[cpu] = ((unsigned int)caller); |
@@ -157,15 +152,13 @@ wlock_again: | |||
157 | put_cpu(); | 152 | put_cpu(); |
158 | } | 153 | } |
159 | 154 | ||
160 | void _do_read_unlock (rwlock_t *rw, char *str) | 155 | void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller) |
161 | { | 156 | { |
162 | unsigned long caller, val; | 157 | unsigned long val; |
163 | int stuck = INIT_STUCK; | 158 | int stuck = INIT_STUCK; |
164 | int cpu = get_cpu(); | 159 | int cpu = get_cpu(); |
165 | int shown = 0; | 160 | int shown = 0; |
166 | 161 | ||
167 | GET_CALLER(caller); | ||
168 | |||
169 | /* Drop our identity _first_. */ | 162 | /* Drop our identity _first_. */ |
170 | rw->reader_pc[cpu] = 0; | 163 | rw->reader_pc[cpu] = 0; |
171 | current->thread.smp_lock_count--; | 164 | current->thread.smp_lock_count--; |
@@ -193,14 +186,13 @@ runlock_again: | |||
193 | put_cpu(); | 186 | put_cpu(); |
194 | } | 187 | } |
195 | 188 | ||
196 | void _do_write_lock (rwlock_t *rw, char *str) | 189 | void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller) |
197 | { | 190 | { |
198 | unsigned long caller, val; | 191 | unsigned long val; |
199 | int stuck = INIT_STUCK; | 192 | int stuck = INIT_STUCK; |
200 | int cpu = get_cpu(); | 193 | int cpu = get_cpu(); |
201 | int shown = 0; | 194 | int shown = 0; |
202 | 195 | ||
203 | GET_CALLER(caller); | ||
204 | wlock_again: | 196 | wlock_again: |
205 | /* Spin while there is another writer. */ | 197 | /* Spin while there is another writer. */ |
206 | while (((long)rw->lock) < 0) { | 198 | while (((long)rw->lock) < 0) { |
@@ -209,7 +201,7 @@ wlock_again: | |||
209 | show_write(str, rw, caller); | 201 | show_write(str, rw, caller); |
210 | stuck = INIT_STUCK; | 202 | stuck = INIT_STUCK; |
211 | } | 203 | } |
212 | membar("#LoadLoad"); | 204 | rmb(); |
213 | } | 205 | } |
214 | 206 | ||
215 | /* Try to acuire the write bit. */ | 207 | /* Try to acuire the write bit. */ |
@@ -264,7 +256,7 @@ wlock_again: | |||
264 | show_write(str, rw, caller); | 256 | show_write(str, rw, caller); |
265 | stuck = INIT_STUCK; | 257 | stuck = INIT_STUCK; |
266 | } | 258 | } |
267 | membar("#LoadLoad"); | 259 | rmb(); |
268 | } | 260 | } |
269 | goto wlock_again; | 261 | goto wlock_again; |
270 | } | 262 | } |
@@ -278,14 +270,12 @@ wlock_again: | |||
278 | put_cpu(); | 270 | put_cpu(); |
279 | } | 271 | } |
280 | 272 | ||
281 | void _do_write_unlock(rwlock_t *rw) | 273 | void _do_write_unlock(rwlock_t *rw, unsigned long caller) |
282 | { | 274 | { |
283 | unsigned long caller, val; | 275 | unsigned long val; |
284 | int stuck = INIT_STUCK; | 276 | int stuck = INIT_STUCK; |
285 | int shown = 0; | 277 | int shown = 0; |
286 | 278 | ||
287 | GET_CALLER(caller); | ||
288 | |||
289 | /* Drop our identity _first_ */ | 279 | /* Drop our identity _first_ */ |
290 | rw->writer_pc = 0; | 280 | rw->writer_pc = 0; |
291 | rw->writer_cpu = NO_PROC_ID; | 281 | rw->writer_cpu = NO_PROC_ID; |
@@ -313,13 +303,11 @@ wlock_again: | |||
313 | } | 303 | } |
314 | } | 304 | } |
315 | 305 | ||
316 | int _do_write_trylock (rwlock_t *rw, char *str) | 306 | int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller) |
317 | { | 307 | { |
318 | unsigned long caller, val; | 308 | unsigned long val; |
319 | int cpu = get_cpu(); | 309 | int cpu = get_cpu(); |
320 | 310 | ||
321 | GET_CALLER(caller); | ||
322 | |||
323 | /* Try to acuire the write bit. */ | 311 | /* Try to acuire the write bit. */ |
324 | __asm__ __volatile__( | 312 | __asm__ __volatile__( |
325 | " mov 1, %%g3\n" | 313 | " mov 1, %%g3\n" |
diff --git a/arch/sparc64/lib/mb.S b/arch/sparc64/lib/mb.S new file mode 100644 index 000000000000..4004f748619f --- /dev/null +++ b/arch/sparc64/lib/mb.S | |||
@@ -0,0 +1,73 @@ | |||
1 | /* mb.S: Out of line memory barriers. | ||
2 | * | ||
3 | * Copyright (C) 2005 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | /* These are here in an effort to more fully work around | ||
7 | * Spitfire Errata #51. Essentially, if a memory barrier | ||
8 | * occurs soon after a mispredicted branch, the chip can stop | ||
9 | * executing instructions until a trap occurs. Therefore, if | ||
10 | * interrupts are disabled, the chip can hang forever. | ||
11 | * | ||
12 | * It used to be believed that the memory barrier had to be | ||
13 | * right in the delay slot, but a case has been traced | ||
14 | * recently wherein the memory barrier was one instruction | ||
15 | * after the branch delay slot and the chip still hung. The | ||
16 | * offending sequence was the following in sym_wakeup_done() | ||
17 | * of the sym53c8xx_2 driver: | ||
18 | * | ||
19 | * call sym_ccb_from_dsa, 0 | ||
20 | * movge %icc, 0, %l0 | ||
21 | * brz,pn %o0, .LL1303 | ||
22 | * mov %o0, %l2 | ||
23 | * membar #LoadLoad | ||
24 | * | ||
25 | * The branch has to be mispredicted for the bug to occur. | ||
26 | * Therefore, we put the memory barrier explicitly into a | ||
27 | * "branch always, predicted taken" delay slot to avoid the | ||
28 | * problem case. | ||
29 | */ | ||
30 | |||
31 | .text | ||
32 | |||
33 | 99: retl | ||
34 | nop | ||
35 | |||
36 | .globl mb | ||
37 | mb: ba,pt %xcc, 99b | ||
38 | membar #LoadLoad | #LoadStore | #StoreStore | #StoreLoad | ||
39 | .size mb, .-mb | ||
40 | |||
41 | .globl rmb | ||
42 | rmb: ba,pt %xcc, 99b | ||
43 | membar #LoadLoad | ||
44 | .size rmb, .-rmb | ||
45 | |||
46 | .globl wmb | ||
47 | wmb: ba,pt %xcc, 99b | ||
48 | membar #StoreStore | ||
49 | .size wmb, .-wmb | ||
50 | |||
51 | .globl membar_storeload | ||
52 | membar_storeload: | ||
53 | ba,pt %xcc, 99b | ||
54 | membar #StoreLoad | ||
55 | .size membar_storeload, .-membar_storeload | ||
56 | |||
57 | .globl membar_storeload_storestore | ||
58 | membar_storeload_storestore: | ||
59 | ba,pt %xcc, 99b | ||
60 | membar #StoreLoad | #StoreStore | ||
61 | .size membar_storeload_storestore, .-membar_storeload_storestore | ||
62 | |||
63 | .globl membar_storeload_loadload | ||
64 | membar_storeload_loadload: | ||
65 | ba,pt %xcc, 99b | ||
66 | membar #StoreLoad | #LoadLoad | ||
67 | .size membar_storeload_loadload, .-membar_storeload_loadload | ||
68 | |||
69 | .globl membar_storestore_loadstore | ||
70 | membar_storestore_loadstore: | ||
71 | ba,pt %xcc, 99b | ||
72 | membar #StoreStore | #LoadStore | ||
73 | .size membar_storestore_loadstore, .-membar_storestore_loadstore | ||
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c index 15b4cfe07557..302efbcba70e 100644 --- a/arch/sparc64/solaris/misc.c +++ b/arch/sparc64/solaris/misc.c | |||
@@ -737,7 +737,8 @@ MODULE_LICENSE("GPL"); | |||
737 | extern u32 tl0_solaris[8]; | 737 | extern u32 tl0_solaris[8]; |
738 | #define update_ttable(x) \ | 738 | #define update_ttable(x) \ |
739 | tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000; \ | 739 | tl0_solaris[3] = (((long)(x) - (long)tl0_solaris - 3) >> 2) | 0x40000000; \ |
740 | __asm__ __volatile__ ("membar #StoreStore; flush %0" : : "r" (&tl0_solaris[3])) | 740 | wmb(); \ |
741 | __asm__ __volatile__ ("flush %0" : : "r" (&tl0_solaris[3])) | ||
741 | #else | 742 | #else |
742 | #endif | 743 | #endif |
743 | 744 | ||
@@ -761,7 +762,8 @@ int init_module(void) | |||
761 | entry64_personality_patch |= | 762 | entry64_personality_patch |= |
762 | (offsetof(struct task_struct, personality) + | 763 | (offsetof(struct task_struct, personality) + |
763 | (sizeof(unsigned long) - 1)); | 764 | (sizeof(unsigned long) - 1)); |
764 | __asm__ __volatile__("membar #StoreStore; flush %0" | 765 | wmb(); |
766 | __asm__ __volatile__("flush %0" | ||
765 | : : "r" (&entry64_personality_patch)); | 767 | : : "r" (&entry64_personality_patch)); |
766 | return 0; | 768 | return 0; |
767 | } | 769 | } |