diff options
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 293 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci.c | 1 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sbus.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 1 | ||||
-rw-r--r-- | arch/sparc64/kernel/signal.c | 11 | ||||
-rw-r--r-- | arch/sparc64/kernel/signal32.c | 33 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 30 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 32 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 272 | ||||
-rw-r--r-- | arch/sparc64/kernel/ttable.S | 27 | ||||
-rw-r--r-- | arch/sparc64/kernel/una_asm.S | 153 | ||||
-rw-r--r-- | arch/sparc64/kernel/unaligned.c | 279 | ||||
-rw-r--r-- | arch/sparc64/kernel/us2e_cpufreq.c | 36 | ||||
-rw-r--r-- | arch/sparc64/kernel/us3_cpufreq.c | 29 | ||||
-rw-r--r-- | arch/sparc64/kernel/winfixup.S | 6 |
18 files changed, 680 insertions, 531 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 093281bdf85f..6f00ab8b9d23 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile | |||
@@ -8,7 +8,7 @@ EXTRA_CFLAGS := -Werror | |||
8 | extra-y := head.o init_task.o vmlinux.lds | 8 | extra-y := head.o init_task.o vmlinux.lds |
9 | 9 | ||
10 | obj-y := process.o setup.o cpu.o idprom.o \ | 10 | obj-y := process.o setup.o cpu.o idprom.o \ |
11 | traps.o devices.o auxio.o \ | 11 | traps.o devices.o auxio.o una_asm.o \ |
12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ | 12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ |
13 | unaligned.o central.o pci.o starfire.o semaphore.o \ | 13 | unaligned.o central.o pci.o starfire.o semaphore.o \ |
14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o | 14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 88332f00094a..cecdc0a7521f 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/visasm.h> | 21 | #include <asm/visasm.h> |
22 | #include <asm/estate.h> | 22 | #include <asm/estate.h> |
23 | #include <asm/auxio.h> | 23 | #include <asm/auxio.h> |
24 | #include <asm/sfafsr.h> | ||
24 | 25 | ||
25 | #define curptr g6 | 26 | #define curptr g6 |
26 | 27 | ||
@@ -690,14 +691,159 @@ netbsd_syscall: | |||
690 | retl | 691 | retl |
691 | nop | 692 | nop |
692 | 693 | ||
693 | /* These next few routines must be sure to clear the | 694 | /* We need to carefully read the error status, ACK |
694 | * SFSR FaultValid bit so that the fast tlb data protection | 695 | * the errors, prevent recursive traps, and pass the |
695 | * handler does not flush the wrong context and lock up the | 696 | * information on to C code for logging. |
696 | * box. | 697 | * |
698 | * We pass the AFAR in as-is, and we encode the status | ||
699 | * information as described in asm-sparc64/sfafsr.h | ||
700 | */ | ||
701 | .globl __spitfire_access_error | ||
702 | __spitfire_access_error: | ||
703 | /* Disable ESTATE error reporting so that we do not | ||
704 | * take recursive traps and RED state the processor. | ||
705 | */ | ||
706 | stxa %g0, [%g0] ASI_ESTATE_ERROR_EN | ||
707 | membar #Sync | ||
708 | |||
709 | mov UDBE_UE, %g1 | ||
710 | ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR | ||
711 | |||
712 | /* __spitfire_cee_trap branches here with AFSR in %g4 and | ||
713 | * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the | ||
714 | * ESTATE Error Enable register. | ||
715 | */ | ||
716 | __spitfire_cee_trap_continue: | ||
717 | ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR | ||
718 | |||
719 | rdpr %tt, %g3 | ||
720 | and %g3, 0x1ff, %g3 ! Paranoia | ||
721 | sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3 | ||
722 | or %g4, %g3, %g4 | ||
723 | rdpr %tl, %g3 | ||
724 | cmp %g3, 1 | ||
725 | mov 1, %g3 | ||
726 | bleu %xcc, 1f | ||
727 | sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3 | ||
728 | |||
729 | or %g4, %g3, %g4 | ||
730 | |||
731 | /* Read in the UDB error register state, clearing the | ||
732 | * sticky error bits as-needed. We only clear them if | ||
733 | * the UE bit is set. Likewise, __spitfire_cee_trap | ||
734 | * below will only do so if the CE bit is set. | ||
735 | * | ||
736 | * NOTE: UltraSparc-I/II have high and low UDB error | ||
737 | * registers, corresponding to the two UDB units | ||
738 | * present on those chips. UltraSparc-IIi only | ||
739 | * has a single UDB, called "SDB" in the manual. | ||
740 | * For IIi the upper UDB register always reads | ||
741 | * as zero so for our purposes things will just | ||
742 | * work with the checks below. | ||
697 | */ | 743 | */ |
698 | .globl __do_data_access_exception | 744 | 1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3 |
699 | .globl __do_data_access_exception_tl1 | 745 | and %g3, 0x3ff, %g7 ! Paranoia |
700 | __do_data_access_exception_tl1: | 746 | sllx %g7, SFSTAT_UDBH_SHIFT, %g7 |
747 | or %g4, %g7, %g4 | ||
748 | andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE | ||
749 | be,pn %xcc, 1f | ||
750 | nop | ||
751 | stxa %g3, [%g0] ASI_UDB_ERROR_W | ||
752 | membar #Sync | ||
753 | |||
754 | 1: mov 0x18, %g3 | ||
755 | ldxa [%g3] ASI_UDBL_ERROR_R, %g3 | ||
756 | and %g3, 0x3ff, %g7 ! Paranoia | ||
757 | sllx %g7, SFSTAT_UDBL_SHIFT, %g7 | ||
758 | or %g4, %g7, %g4 | ||
759 | andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE | ||
760 | be,pn %xcc, 1f | ||
761 | nop | ||
762 | mov 0x18, %g7 | ||
763 | stxa %g3, [%g7] ASI_UDB_ERROR_W | ||
764 | membar #Sync | ||
765 | |||
766 | 1: /* Ok, now that we've latched the error state, | ||
767 | * clear the sticky bits in the AFSR. | ||
768 | */ | ||
769 | stxa %g4, [%g0] ASI_AFSR | ||
770 | membar #Sync | ||
771 | |||
772 | rdpr %tl, %g2 | ||
773 | cmp %g2, 1 | ||
774 | rdpr %pil, %g2 | ||
775 | bleu,pt %xcc, 1f | ||
776 | wrpr %g0, 15, %pil | ||
777 | |||
778 | ba,pt %xcc, etraptl1 | ||
779 | rd %pc, %g7 | ||
780 | |||
781 | ba,pt %xcc, 2f | ||
782 | nop | ||
783 | |||
784 | 1: ba,pt %xcc, etrap_irq | ||
785 | rd %pc, %g7 | ||
786 | |||
787 | 2: mov %l4, %o1 | ||
788 | mov %l5, %o2 | ||
789 | call spitfire_access_error | ||
790 | add %sp, PTREGS_OFF, %o0 | ||
791 | ba,pt %xcc, rtrap | ||
792 | clr %l6 | ||
793 | |||
794 | /* This is the trap handler entry point for ECC correctable | ||
795 | * errors. They are corrected, but we listen for the trap | ||
796 | * so that the event can be logged. | ||
797 | * | ||
798 | * Disrupting errors are either: | ||
799 | * 1) single-bit ECC errors during UDB reads to system | ||
800 | * memory | ||
801 | * 2) data parity errors during write-back events | ||
802 | * | ||
803 | * As far as I can make out from the manual, the CEE trap | ||
804 | * is only for correctable errors during memory read | ||
805 | * accesses by the front-end of the processor. | ||
806 | * | ||
807 | * The code below is only for trap level 1 CEE events, | ||
808 | * as it is the only situation where we can safely record | ||
809 | * and log. For trap level >1 we just clear the CE bit | ||
810 | * in the AFSR and return. | ||
811 | * | ||
812 | * This is just like __spiftire_access_error above, but it | ||
813 | * specifically handles correctable errors. If an | ||
814 | * uncorrectable error is indicated in the AFSR we | ||
815 | * will branch directly above to __spitfire_access_error | ||
816 | * to handle it instead. Uncorrectable therefore takes | ||
817 | * priority over correctable, and the error logging | ||
818 | * C code will notice this case by inspecting the | ||
819 | * trap type. | ||
820 | */ | ||
821 | .globl __spitfire_cee_trap | ||
822 | __spitfire_cee_trap: | ||
823 | ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR | ||
824 | mov 1, %g3 | ||
825 | sllx %g3, SFAFSR_UE_SHIFT, %g3 | ||
826 | andcc %g4, %g3, %g0 ! Check for UE | ||
827 | bne,pn %xcc, __spitfire_access_error | ||
828 | nop | ||
829 | |||
830 | /* Ok, in this case we only have a correctable error. | ||
831 | * Indicate we only wish to capture that state in register | ||
832 | * %g1, and we only disable CE error reporting unlike UE | ||
833 | * handling which disables all errors. | ||
834 | */ | ||
835 | ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3 | ||
836 | andn %g3, ESTATE_ERR_CE, %g3 | ||
837 | stxa %g3, [%g0] ASI_ESTATE_ERROR_EN | ||
838 | membar #Sync | ||
839 | |||
840 | /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */ | ||
841 | ba,pt %xcc, __spitfire_cee_trap_continue | ||
842 | mov UDBE_CE, %g1 | ||
843 | |||
844 | .globl __spitfire_data_access_exception | ||
845 | .globl __spitfire_data_access_exception_tl1 | ||
846 | __spitfire_data_access_exception_tl1: | ||
701 | rdpr %pstate, %g4 | 847 | rdpr %pstate, %g4 |
702 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 848 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
703 | mov TLB_SFSR, %g3 | 849 | mov TLB_SFSR, %g3 |
@@ -706,9 +852,25 @@ __do_data_access_exception_tl1: | |||
706 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | 852 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR |
707 | stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit | 853 | stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit |
708 | membar #Sync | 854 | membar #Sync |
855 | rdpr %tt, %g3 | ||
856 | cmp %g3, 0x80 ! first win spill/fill trap | ||
857 | blu,pn %xcc, 1f | ||
858 | cmp %g3, 0xff ! last win spill/fill trap | ||
859 | bgu,pn %xcc, 1f | ||
860 | nop | ||
709 | ba,pt %xcc, winfix_dax | 861 | ba,pt %xcc, winfix_dax |
710 | rdpr %tpc, %g3 | 862 | rdpr %tpc, %g3 |
711 | __do_data_access_exception: | 863 | 1: sethi %hi(109f), %g7 |
864 | ba,pt %xcc, etraptl1 | ||
865 | 109: or %g7, %lo(109b), %g7 | ||
866 | mov %l4, %o1 | ||
867 | mov %l5, %o2 | ||
868 | call spitfire_data_access_exception_tl1 | ||
869 | add %sp, PTREGS_OFF, %o0 | ||
870 | ba,pt %xcc, rtrap | ||
871 | clr %l6 | ||
872 | |||
873 | __spitfire_data_access_exception: | ||
712 | rdpr %pstate, %g4 | 874 | rdpr %pstate, %g4 |
713 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 875 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
714 | mov TLB_SFSR, %g3 | 876 | mov TLB_SFSR, %g3 |
@@ -722,20 +884,19 @@ __do_data_access_exception: | |||
722 | 109: or %g7, %lo(109b), %g7 | 884 | 109: or %g7, %lo(109b), %g7 |
723 | mov %l4, %o1 | 885 | mov %l4, %o1 |
724 | mov %l5, %o2 | 886 | mov %l5, %o2 |
725 | call data_access_exception | 887 | call spitfire_data_access_exception |
726 | add %sp, PTREGS_OFF, %o0 | 888 | add %sp, PTREGS_OFF, %o0 |
727 | ba,pt %xcc, rtrap | 889 | ba,pt %xcc, rtrap |
728 | clr %l6 | 890 | clr %l6 |
729 | 891 | ||
730 | .globl __do_instruction_access_exception | 892 | .globl __spitfire_insn_access_exception |
731 | .globl __do_instruction_access_exception_tl1 | 893 | .globl __spitfire_insn_access_exception_tl1 |
732 | __do_instruction_access_exception_tl1: | 894 | __spitfire_insn_access_exception_tl1: |
733 | rdpr %pstate, %g4 | 895 | rdpr %pstate, %g4 |
734 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 896 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
735 | mov TLB_SFSR, %g3 | 897 | mov TLB_SFSR, %g3 |
736 | mov DMMU_SFAR, %g5 | 898 | ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR |
737 | ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR | 899 | rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC |
738 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | ||
739 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit | 900 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit |
740 | membar #Sync | 901 | membar #Sync |
741 | sethi %hi(109f), %g7 | 902 | sethi %hi(109f), %g7 |
@@ -743,18 +904,17 @@ __do_instruction_access_exception_tl1: | |||
743 | 109: or %g7, %lo(109b), %g7 | 904 | 109: or %g7, %lo(109b), %g7 |
744 | mov %l4, %o1 | 905 | mov %l4, %o1 |
745 | mov %l5, %o2 | 906 | mov %l5, %o2 |
746 | call instruction_access_exception_tl1 | 907 | call spitfire_insn_access_exception_tl1 |
747 | add %sp, PTREGS_OFF, %o0 | 908 | add %sp, PTREGS_OFF, %o0 |
748 | ba,pt %xcc, rtrap | 909 | ba,pt %xcc, rtrap |
749 | clr %l6 | 910 | clr %l6 |
750 | 911 | ||
751 | __do_instruction_access_exception: | 912 | __spitfire_insn_access_exception: |
752 | rdpr %pstate, %g4 | 913 | rdpr %pstate, %g4 |
753 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate | 914 | wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate |
754 | mov TLB_SFSR, %g3 | 915 | mov TLB_SFSR, %g3 |
755 | mov DMMU_SFAR, %g5 | 916 | ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR |
756 | ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR | 917 | rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC |
757 | ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR | ||
758 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit | 918 | stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit |
759 | membar #Sync | 919 | membar #Sync |
760 | sethi %hi(109f), %g7 | 920 | sethi %hi(109f), %g7 |
@@ -762,102 +922,11 @@ __do_instruction_access_exception: | |||
762 | 109: or %g7, %lo(109b), %g7 | 922 | 109: or %g7, %lo(109b), %g7 |
763 | mov %l4, %o1 | 923 | mov %l4, %o1 |
764 | mov %l5, %o2 | 924 | mov %l5, %o2 |
765 | call instruction_access_exception | 925 | call spitfire_insn_access_exception |
766 | add %sp, PTREGS_OFF, %o0 | 926 | add %sp, PTREGS_OFF, %o0 |
767 | ba,pt %xcc, rtrap | 927 | ba,pt %xcc, rtrap |
768 | clr %l6 | 928 | clr %l6 |
769 | 929 | ||
770 | /* This is the trap handler entry point for ECC correctable | ||
771 | * errors. They are corrected, but we listen for the trap | ||
772 | * so that the event can be logged. | ||
773 | * | ||
774 | * Disrupting errors are either: | ||
775 | * 1) single-bit ECC errors during UDB reads to system | ||
776 | * memory | ||
777 | * 2) data parity errors during write-back events | ||
778 | * | ||
779 | * As far as I can make out from the manual, the CEE trap | ||
780 | * is only for correctable errors during memory read | ||
781 | * accesses by the front-end of the processor. | ||
782 | * | ||
783 | * The code below is only for trap level 1 CEE events, | ||
784 | * as it is the only situation where we can safely record | ||
785 | * and log. For trap level >1 we just clear the CE bit | ||
786 | * in the AFSR and return. | ||
787 | */ | ||
788 | |||
789 | /* Our trap handling infrastructure allows us to preserve | ||
790 | * two 64-bit values during etrap for arguments to | ||
791 | * subsequent C code. Therefore we encode the information | ||
792 | * as follows: | ||
793 | * | ||
794 | * value 1) Full 64-bits of AFAR | ||
795 | * value 2) Low 33-bits of AFSR, then bits 33-->42 | ||
796 | * are UDBL error status and bits 43-->52 | ||
797 | * are UDBH error status | ||
798 | */ | ||
799 | .align 64 | ||
800 | .globl cee_trap | ||
801 | cee_trap: | ||
802 | ldxa [%g0] ASI_AFSR, %g1 ! Read AFSR | ||
803 | ldxa [%g0] ASI_AFAR, %g2 ! Read AFAR | ||
804 | sllx %g1, 31, %g1 ! Clear reserved bits | ||
805 | srlx %g1, 31, %g1 ! in AFSR | ||
806 | |||
807 | /* NOTE: UltraSparc-I/II have high and low UDB error | ||
808 | * registers, corresponding to the two UDB units | ||
809 | * present on those chips. UltraSparc-IIi only | ||
810 | * has a single UDB, called "SDB" in the manual. | ||
811 | * For IIi the upper UDB register always reads | ||
812 | * as zero so for our purposes things will just | ||
813 | * work with the checks below. | ||
814 | */ | ||
815 | ldxa [%g0] ASI_UDBL_ERROR_R, %g3 ! Read UDB-Low error status | ||
816 | andcc %g3, (1 << 8), %g4 ! Check CE bit | ||
817 | sllx %g3, (64 - 10), %g3 ! Clear reserved bits | ||
818 | srlx %g3, (64 - 10), %g3 ! in UDB-Low error status | ||
819 | |||
820 | sllx %g3, (33 + 0), %g3 ! Shift up to encoding area | ||
821 | or %g1, %g3, %g1 ! Or it in | ||
822 | be,pn %xcc, 1f ! Branch if CE bit was clear | ||
823 | nop | ||
824 | stxa %g4, [%g0] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBL | ||
825 | membar #Sync ! Synchronize ASI stores | ||
826 | 1: mov 0x18, %g5 ! Addr of UDB-High error status | ||
827 | ldxa [%g5] ASI_UDBH_ERROR_R, %g3 ! Read it | ||
828 | |||
829 | andcc %g3, (1 << 8), %g4 ! Check CE bit | ||
830 | sllx %g3, (64 - 10), %g3 ! Clear reserved bits | ||
831 | srlx %g3, (64 - 10), %g3 ! in UDB-High error status | ||
832 | sllx %g3, (33 + 10), %g3 ! Shift up to encoding area | ||
833 | or %g1, %g3, %g1 ! Or it in | ||
834 | be,pn %xcc, 1f ! Branch if CE bit was clear | ||
835 | nop | ||
836 | nop | ||
837 | |||
838 | stxa %g4, [%g5] ASI_UDB_ERROR_W ! Clear CE sticky bit in UDBH | ||
839 | membar #Sync ! Synchronize ASI stores | ||
840 | 1: mov 1, %g5 ! AFSR CE bit is | ||
841 | sllx %g5, 20, %g5 ! bit 20 | ||
842 | stxa %g5, [%g0] ASI_AFSR ! Clear CE sticky bit in AFSR | ||
843 | membar #Sync ! Synchronize ASI stores | ||
844 | sllx %g2, (64 - 41), %g2 ! Clear reserved bits | ||
845 | srlx %g2, (64 - 41), %g2 ! in latched AFAR | ||
846 | |||
847 | andn %g2, 0x0f, %g2 ! Finish resv bit clearing | ||
848 | mov %g1, %g4 ! Move AFSR+UDB* into save reg | ||
849 | mov %g2, %g5 ! Move AFAR into save reg | ||
850 | rdpr %pil, %g2 | ||
851 | wrpr %g0, 15, %pil | ||
852 | ba,pt %xcc, etrap_irq | ||
853 | rd %pc, %g7 | ||
854 | mov %l4, %o0 | ||
855 | |||
856 | mov %l5, %o1 | ||
857 | call cee_log | ||
858 | add %sp, PTREGS_OFF, %o2 | ||
859 | ba,a,pt %xcc, rtrap_irq | ||
860 | |||
861 | /* Capture I/D/E-cache state into per-cpu error scoreboard. | 930 | /* Capture I/D/E-cache state into per-cpu error scoreboard. |
862 | * | 931 | * |
863 | * %g1: (TL>=0) ? 1 : 0 | 932 | * %g1: (TL>=0) ? 1 : 0 |
diff --git a/arch/sparc64/kernel/pci.c b/arch/sparc64/kernel/pci.c index bba140d98b1b..f21c993f8856 100644 --- a/arch/sparc64/kernel/pci.c +++ b/arch/sparc64/kernel/pci.c | |||
@@ -540,6 +540,7 @@ void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res, | |||
540 | 540 | ||
541 | pbm->parent->resource_adjust(pdev, res, root); | 541 | pbm->parent->resource_adjust(pdev, res, root); |
542 | } | 542 | } |
543 | EXPORT_SYMBOL(pcibios_bus_to_resource); | ||
543 | 544 | ||
544 | char * __init pcibios_setup(char *str) | 545 | char * __init pcibios_setup(char *str) |
545 | { | 546 | { |
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 2803bc7c2c79..425c60cfea19 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -466,7 +466,7 @@ do_flush_sync: | |||
466 | if (!limit) | 466 | if (!limit) |
467 | break; | 467 | break; |
468 | udelay(1); | 468 | udelay(1); |
469 | membar("#LoadLoad"); | 469 | rmb(); |
470 | } | 470 | } |
471 | if (!limit) | 471 | if (!limit) |
472 | printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " | 472 | printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " |
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c index 07424b075938..66255434128a 100644 --- a/arch/sparc64/kernel/process.c +++ b/arch/sparc64/kernel/process.c | |||
@@ -103,7 +103,7 @@ void cpu_idle(void) | |||
103 | * other cpus see our increasing idleness for the buddy | 103 | * other cpus see our increasing idleness for the buddy |
104 | * redistribution algorithm. -DaveM | 104 | * redistribution algorithm. -DaveM |
105 | */ | 105 | */ |
106 | membar("#StoreStore | #StoreLoad"); | 106 | membar_storeload_storestore(); |
107 | } | 107 | } |
108 | } | 108 | } |
109 | 109 | ||
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 89f5e019f24c..e09ddf927655 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -147,7 +147,7 @@ static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long | |||
147 | if (!limit) | 147 | if (!limit) |
148 | break; | 148 | break; |
149 | udelay(1); | 149 | udelay(1); |
150 | membar("#LoadLoad"); | 150 | rmb(); |
151 | } | 151 | } |
152 | if (!limit) | 152 | if (!limit) |
153 | printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout " | 153 | printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout " |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index b7e6a91952b2..fbdfed3798d8 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/cpu.h> | 33 | #include <linux/cpu.h> |
34 | #include <linux/initrd.h> | 34 | #include <linux/initrd.h> |
35 | 35 | ||
36 | #include <asm/segment.h> | ||
37 | #include <asm/system.h> | 36 | #include <asm/system.h> |
38 | #include <asm/io.h> | 37 | #include <asm/io.h> |
39 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
diff --git a/arch/sparc64/kernel/signal.c b/arch/sparc64/kernel/signal.c index b27934671c35..60f5dfabb1e1 100644 --- a/arch/sparc64/kernel/signal.c +++ b/arch/sparc64/kernel/signal.c | |||
@@ -574,13 +574,12 @@ static inline void handle_signal(unsigned long signr, struct k_sigaction *ka, | |||
574 | { | 574 | { |
575 | setup_rt_frame(ka, regs, signr, oldset, | 575 | setup_rt_frame(ka, regs, signr, oldset, |
576 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); | 576 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); |
577 | if (!(ka->sa.sa_flags & SA_NOMASK)) { | 577 | spin_lock_irq(¤t->sighand->siglock); |
578 | spin_lock_irq(¤t->sighand->siglock); | 578 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
579 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 579 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
580 | sigaddset(¤t->blocked,signr); | 580 | sigaddset(¤t->blocked,signr); |
581 | recalc_sigpending(); | 581 | recalc_sigpending(); |
582 | spin_unlock_irq(¤t->sighand->siglock); | 582 | spin_unlock_irq(¤t->sighand->siglock); |
583 | } | ||
584 | } | 583 | } |
585 | 584 | ||
586 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, | 585 | static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, |
diff --git a/arch/sparc64/kernel/signal32.c b/arch/sparc64/kernel/signal32.c index f28428f4170e..aecccd0df1d1 100644 --- a/arch/sparc64/kernel/signal32.c +++ b/arch/sparc64/kernel/signal32.c | |||
@@ -877,11 +877,12 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
877 | unsigned long page = (unsigned long) | 877 | unsigned long page = (unsigned long) |
878 | page_address(pte_page(*ptep)); | 878 | page_address(pte_page(*ptep)); |
879 | 879 | ||
880 | __asm__ __volatile__( | 880 | wmb(); |
881 | " membar #StoreStore\n" | 881 | __asm__ __volatile__("flush %0 + %1" |
882 | " flush %0 + %1" | 882 | : /* no outputs */ |
883 | : : "r" (page), "r" (address & (PAGE_SIZE - 1)) | 883 | : "r" (page), |
884 | : "memory"); | 884 | "r" (address & (PAGE_SIZE - 1)) |
885 | : "memory"); | ||
885 | } | 886 | } |
886 | pte_unmap(ptep); | 887 | pte_unmap(ptep); |
887 | preempt_enable(); | 888 | preempt_enable(); |
@@ -1292,11 +1293,12 @@ static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, | |||
1292 | unsigned long page = (unsigned long) | 1293 | unsigned long page = (unsigned long) |
1293 | page_address(pte_page(*ptep)); | 1294 | page_address(pte_page(*ptep)); |
1294 | 1295 | ||
1295 | __asm__ __volatile__( | 1296 | wmb(); |
1296 | " membar #StoreStore\n" | 1297 | __asm__ __volatile__("flush %0 + %1" |
1297 | " flush %0 + %1" | 1298 | : /* no outputs */ |
1298 | : : "r" (page), "r" (address & (PAGE_SIZE - 1)) | 1299 | : "r" (page), |
1299 | : "memory"); | 1300 | "r" (address & (PAGE_SIZE - 1)) |
1301 | : "memory"); | ||
1300 | } | 1302 | } |
1301 | pte_unmap(ptep); | 1303 | pte_unmap(ptep); |
1302 | preempt_enable(); | 1304 | preempt_enable(); |
@@ -1325,13 +1327,12 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka, | |||
1325 | else | 1327 | else |
1326 | setup_frame32(&ka->sa, regs, signr, oldset, info); | 1328 | setup_frame32(&ka->sa, regs, signr, oldset, info); |
1327 | } | 1329 | } |
1328 | if (!(ka->sa.sa_flags & SA_NOMASK)) { | 1330 | spin_lock_irq(¤t->sighand->siglock); |
1329 | spin_lock_irq(¤t->sighand->siglock); | 1331 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); |
1330 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | 1332 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
1331 | sigaddset(¤t->blocked,signr); | 1333 | sigaddset(¤t->blocked,signr); |
1332 | recalc_sigpending(); | 1334 | recalc_sigpending(); |
1333 | spin_unlock_irq(¤t->sighand->siglock); | 1335 | spin_unlock_irq(¤t->sighand->siglock); |
1334 | } | ||
1335 | } | 1336 | } |
1336 | 1337 | ||
1337 | static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, | 1338 | static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index b9b42491e118..b4fc6a5462b2 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -144,7 +144,7 @@ void __init smp_callin(void) | |||
144 | current->active_mm = &init_mm; | 144 | current->active_mm = &init_mm; |
145 | 145 | ||
146 | while (!cpu_isset(cpuid, smp_commenced_mask)) | 146 | while (!cpu_isset(cpuid, smp_commenced_mask)) |
147 | membar("#LoadLoad"); | 147 | rmb(); |
148 | 148 | ||
149 | cpu_set(cpuid, cpu_online_map); | 149 | cpu_set(cpuid, cpu_online_map); |
150 | } | 150 | } |
@@ -184,11 +184,11 @@ static inline long get_delta (long *rt, long *master) | |||
184 | for (i = 0; i < NUM_ITERS; i++) { | 184 | for (i = 0; i < NUM_ITERS; i++) { |
185 | t0 = tick_ops->get_tick(); | 185 | t0 = tick_ops->get_tick(); |
186 | go[MASTER] = 1; | 186 | go[MASTER] = 1; |
187 | membar("#StoreLoad"); | 187 | membar_storeload(); |
188 | while (!(tm = go[SLAVE])) | 188 | while (!(tm = go[SLAVE])) |
189 | membar("#LoadLoad"); | 189 | rmb(); |
190 | go[SLAVE] = 0; | 190 | go[SLAVE] = 0; |
191 | membar("#StoreStore"); | 191 | wmb(); |
192 | t1 = tick_ops->get_tick(); | 192 | t1 = tick_ops->get_tick(); |
193 | 193 | ||
194 | if (t1 - t0 < best_t1 - best_t0) | 194 | if (t1 - t0 < best_t1 - best_t0) |
@@ -221,7 +221,7 @@ void smp_synchronize_tick_client(void) | |||
221 | go[MASTER] = 1; | 221 | go[MASTER] = 1; |
222 | 222 | ||
223 | while (go[MASTER]) | 223 | while (go[MASTER]) |
224 | membar("#LoadLoad"); | 224 | rmb(); |
225 | 225 | ||
226 | local_irq_save(flags); | 226 | local_irq_save(flags); |
227 | { | 227 | { |
@@ -273,21 +273,21 @@ static void smp_synchronize_one_tick(int cpu) | |||
273 | 273 | ||
274 | /* wait for client to be ready */ | 274 | /* wait for client to be ready */ |
275 | while (!go[MASTER]) | 275 | while (!go[MASTER]) |
276 | membar("#LoadLoad"); | 276 | rmb(); |
277 | 277 | ||
278 | /* now let the client proceed into his loop */ | 278 | /* now let the client proceed into his loop */ |
279 | go[MASTER] = 0; | 279 | go[MASTER] = 0; |
280 | membar("#StoreLoad"); | 280 | membar_storeload(); |
281 | 281 | ||
282 | spin_lock_irqsave(&itc_sync_lock, flags); | 282 | spin_lock_irqsave(&itc_sync_lock, flags); |
283 | { | 283 | { |
284 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { | 284 | for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { |
285 | while (!go[MASTER]) | 285 | while (!go[MASTER]) |
286 | membar("#LoadLoad"); | 286 | rmb(); |
287 | go[MASTER] = 0; | 287 | go[MASTER] = 0; |
288 | membar("#StoreStore"); | 288 | wmb(); |
289 | go[SLAVE] = tick_ops->get_tick(); | 289 | go[SLAVE] = tick_ops->get_tick(); |
290 | membar("#StoreLoad"); | 290 | membar_storeload(); |
291 | } | 291 | } |
292 | } | 292 | } |
293 | spin_unlock_irqrestore(&itc_sync_lock, flags); | 293 | spin_unlock_irqrestore(&itc_sync_lock, flags); |
@@ -927,11 +927,11 @@ void smp_capture(void) | |||
927 | smp_processor_id()); | 927 | smp_processor_id()); |
928 | #endif | 928 | #endif |
929 | penguins_are_doing_time = 1; | 929 | penguins_are_doing_time = 1; |
930 | membar("#StoreStore | #LoadStore"); | 930 | membar_storestore_loadstore(); |
931 | atomic_inc(&smp_capture_registry); | 931 | atomic_inc(&smp_capture_registry); |
932 | smp_cross_call(&xcall_capture, 0, 0, 0); | 932 | smp_cross_call(&xcall_capture, 0, 0, 0); |
933 | while (atomic_read(&smp_capture_registry) != ncpus) | 933 | while (atomic_read(&smp_capture_registry) != ncpus) |
934 | membar("#LoadLoad"); | 934 | rmb(); |
935 | #ifdef CAPTURE_DEBUG | 935 | #ifdef CAPTURE_DEBUG |
936 | printk("done\n"); | 936 | printk("done\n"); |
937 | #endif | 937 | #endif |
@@ -947,7 +947,7 @@ void smp_release(void) | |||
947 | smp_processor_id()); | 947 | smp_processor_id()); |
948 | #endif | 948 | #endif |
949 | penguins_are_doing_time = 0; | 949 | penguins_are_doing_time = 0; |
950 | membar("#StoreStore | #StoreLoad"); | 950 | membar_storeload_storestore(); |
951 | atomic_dec(&smp_capture_registry); | 951 | atomic_dec(&smp_capture_registry); |
952 | } | 952 | } |
953 | } | 953 | } |
@@ -970,9 +970,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs) | |||
970 | save_alternate_globals(global_save); | 970 | save_alternate_globals(global_save); |
971 | prom_world(1); | 971 | prom_world(1); |
972 | atomic_inc(&smp_capture_registry); | 972 | atomic_inc(&smp_capture_registry); |
973 | membar("#StoreLoad | #StoreStore"); | 973 | membar_storeload_storestore(); |
974 | while (penguins_are_doing_time) | 974 | while (penguins_are_doing_time) |
975 | membar("#LoadLoad"); | 975 | rmb(); |
976 | restore_alternate_globals(global_save); | 976 | restore_alternate_globals(global_save); |
977 | atomic_dec(&smp_capture_registry); | 977 | atomic_dec(&smp_capture_registry); |
978 | prom_world(0); | 978 | prom_world(0); |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 9202d925a9ce..a3ea697f1adb 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -99,17 +99,6 @@ extern int __ashrdi3(int, int); | |||
99 | extern void dump_thread(struct pt_regs *, struct user *); | 99 | extern void dump_thread(struct pt_regs *, struct user *); |
100 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); | 100 | extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs); |
101 | 101 | ||
102 | #if defined(CONFIG_SMP) && defined(CONFIG_DEBUG_SPINLOCK) | ||
103 | extern void _do_spin_lock (spinlock_t *lock, char *str); | ||
104 | extern void _do_spin_unlock (spinlock_t *lock); | ||
105 | extern int _spin_trylock (spinlock_t *lock); | ||
106 | extern void _do_read_lock(rwlock_t *rw, char *str); | ||
107 | extern void _do_read_unlock(rwlock_t *rw, char *str); | ||
108 | extern void _do_write_lock(rwlock_t *rw, char *str); | ||
109 | extern void _do_write_unlock(rwlock_t *rw); | ||
110 | extern int _do_write_trylock(rwlock_t *rw, char *str); | ||
111 | #endif | ||
112 | |||
113 | extern unsigned long phys_base; | 102 | extern unsigned long phys_base; |
114 | extern unsigned long pfn_base; | 103 | extern unsigned long pfn_base; |
115 | 104 | ||
@@ -152,18 +141,6 @@ EXPORT_SYMBOL(_mcount); | |||
152 | EXPORT_SYMBOL(cpu_online_map); | 141 | EXPORT_SYMBOL(cpu_online_map); |
153 | EXPORT_SYMBOL(phys_cpu_present_map); | 142 | EXPORT_SYMBOL(phys_cpu_present_map); |
154 | 143 | ||
155 | /* Spinlock debugging library, optional. */ | ||
156 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
157 | EXPORT_SYMBOL(_do_spin_lock); | ||
158 | EXPORT_SYMBOL(_do_spin_unlock); | ||
159 | EXPORT_SYMBOL(_spin_trylock); | ||
160 | EXPORT_SYMBOL(_do_read_lock); | ||
161 | EXPORT_SYMBOL(_do_read_unlock); | ||
162 | EXPORT_SYMBOL(_do_write_lock); | ||
163 | EXPORT_SYMBOL(_do_write_unlock); | ||
164 | EXPORT_SYMBOL(_do_write_trylock); | ||
165 | #endif | ||
166 | |||
167 | EXPORT_SYMBOL(smp_call_function); | 144 | EXPORT_SYMBOL(smp_call_function); |
168 | #endif /* CONFIG_SMP */ | 145 | #endif /* CONFIG_SMP */ |
169 | 146 | ||
@@ -429,3 +406,12 @@ EXPORT_SYMBOL(xor_vis_4); | |||
429 | EXPORT_SYMBOL(xor_vis_5); | 406 | EXPORT_SYMBOL(xor_vis_5); |
430 | 407 | ||
431 | EXPORT_SYMBOL(prom_palette); | 408 | EXPORT_SYMBOL(prom_palette); |
409 | |||
410 | /* memory barriers */ | ||
411 | EXPORT_SYMBOL(mb); | ||
412 | EXPORT_SYMBOL(rmb); | ||
413 | EXPORT_SYMBOL(wmb); | ||
414 | EXPORT_SYMBOL(membar_storeload); | ||
415 | EXPORT_SYMBOL(membar_storeload_storestore); | ||
416 | EXPORT_SYMBOL(membar_storeload_loadload); | ||
417 | EXPORT_SYMBOL(membar_storestore_loadstore); | ||
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index 100b0107c4be..b280b2ef674f 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/dcu.h> | 33 | #include <asm/dcu.h> |
34 | #include <asm/estate.h> | 34 | #include <asm/estate.h> |
35 | #include <asm/chafsr.h> | 35 | #include <asm/chafsr.h> |
36 | #include <asm/sfafsr.h> | ||
36 | #include <asm/psrcompat.h> | 37 | #include <asm/psrcompat.h> |
37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
38 | #include <asm/timer.h> | 39 | #include <asm/timer.h> |
@@ -143,8 +144,7 @@ void do_BUG(const char *file, int line) | |||
143 | } | 144 | } |
144 | #endif | 145 | #endif |
145 | 146 | ||
146 | void instruction_access_exception(struct pt_regs *regs, | 147 | void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
147 | unsigned long sfsr, unsigned long sfar) | ||
148 | { | 148 | { |
149 | siginfo_t info; | 149 | siginfo_t info; |
150 | 150 | ||
@@ -153,8 +153,8 @@ void instruction_access_exception(struct pt_regs *regs, | |||
153 | return; | 153 | return; |
154 | 154 | ||
155 | if (regs->tstate & TSTATE_PRIV) { | 155 | if (regs->tstate & TSTATE_PRIV) { |
156 | printk("instruction_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", | 156 | printk("spitfire_insn_access_exception: SFSR[%016lx] " |
157 | sfsr, sfar); | 157 | "SFAR[%016lx], going.\n", sfsr, sfar); |
158 | die_if_kernel("Iax", regs); | 158 | die_if_kernel("Iax", regs); |
159 | } | 159 | } |
160 | if (test_thread_flag(TIF_32BIT)) { | 160 | if (test_thread_flag(TIF_32BIT)) { |
@@ -169,19 +169,17 @@ void instruction_access_exception(struct pt_regs *regs, | |||
169 | force_sig_info(SIGSEGV, &info, current); | 169 | force_sig_info(SIGSEGV, &info, current); |
170 | } | 170 | } |
171 | 171 | ||
172 | void instruction_access_exception_tl1(struct pt_regs *regs, | 172 | void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
173 | unsigned long sfsr, unsigned long sfar) | ||
174 | { | 173 | { |
175 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, | 174 | if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs, |
176 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | 175 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) |
177 | return; | 176 | return; |
178 | 177 | ||
179 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | 178 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); |
180 | instruction_access_exception(regs, sfsr, sfar); | 179 | spitfire_insn_access_exception(regs, sfsr, sfar); |
181 | } | 180 | } |
182 | 181 | ||
183 | void data_access_exception(struct pt_regs *regs, | 182 | void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) |
184 | unsigned long sfsr, unsigned long sfar) | ||
185 | { | 183 | { |
186 | siginfo_t info; | 184 | siginfo_t info; |
187 | 185 | ||
@@ -207,8 +205,8 @@ void data_access_exception(struct pt_regs *regs, | |||
207 | return; | 205 | return; |
208 | } | 206 | } |
209 | /* Shit... */ | 207 | /* Shit... */ |
210 | printk("data_access_exception: SFSR[%016lx] SFAR[%016lx], going.\n", | 208 | printk("spitfire_data_access_exception: SFSR[%016lx] " |
211 | sfsr, sfar); | 209 | "SFAR[%016lx], going.\n", sfsr, sfar); |
212 | die_if_kernel("Dax", regs); | 210 | die_if_kernel("Dax", regs); |
213 | } | 211 | } |
214 | 212 | ||
@@ -220,6 +218,16 @@ void data_access_exception(struct pt_regs *regs, | |||
220 | force_sig_info(SIGSEGV, &info, current); | 218 | force_sig_info(SIGSEGV, &info, current); |
221 | } | 219 | } |
222 | 220 | ||
221 | void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) | ||
222 | { | ||
223 | if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs, | ||
224 | 0, 0x30, SIGTRAP) == NOTIFY_STOP) | ||
225 | return; | ||
226 | |||
227 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
228 | spitfire_data_access_exception(regs, sfsr, sfar); | ||
229 | } | ||
230 | |||
223 | #ifdef CONFIG_PCI | 231 | #ifdef CONFIG_PCI |
224 | /* This is really pathetic... */ | 232 | /* This is really pathetic... */ |
225 | extern volatile int pci_poke_in_progress; | 233 | extern volatile int pci_poke_in_progress; |
@@ -253,54 +261,13 @@ static void spitfire_clean_and_reenable_l1_caches(void) | |||
253 | : "memory"); | 261 | : "memory"); |
254 | } | 262 | } |
255 | 263 | ||
256 | void do_iae(struct pt_regs *regs) | 264 | static void spitfire_enable_estate_errors(void) |
257 | { | 265 | { |
258 | siginfo_t info; | 266 | __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" |
259 | 267 | "membar #Sync" | |
260 | spitfire_clean_and_reenable_l1_caches(); | 268 | : /* no outputs */ |
261 | 269 | : "r" (ESTATE_ERR_ALL), | |
262 | if (notify_die(DIE_TRAP, "instruction access exception", regs, | 270 | "i" (ASI_ESTATE_ERROR_EN)); |
263 | 0, 0x8, SIGTRAP) == NOTIFY_STOP) | ||
264 | return; | ||
265 | |||
266 | info.si_signo = SIGBUS; | ||
267 | info.si_errno = 0; | ||
268 | info.si_code = BUS_OBJERR; | ||
269 | info.si_addr = (void *)0; | ||
270 | info.si_trapno = 0; | ||
271 | force_sig_info(SIGBUS, &info, current); | ||
272 | } | ||
273 | |||
274 | void do_dae(struct pt_regs *regs) | ||
275 | { | ||
276 | siginfo_t info; | ||
277 | |||
278 | #ifdef CONFIG_PCI | ||
279 | if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { | ||
280 | spitfire_clean_and_reenable_l1_caches(); | ||
281 | |||
282 | pci_poke_faulted = 1; | ||
283 | |||
284 | /* Why the fuck did they have to change this? */ | ||
285 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | ||
286 | regs->tpc += 4; | ||
287 | |||
288 | regs->tnpc = regs->tpc + 4; | ||
289 | return; | ||
290 | } | ||
291 | #endif | ||
292 | spitfire_clean_and_reenable_l1_caches(); | ||
293 | |||
294 | if (notify_die(DIE_TRAP, "data access exception", regs, | ||
295 | 0, 0x30, SIGTRAP) == NOTIFY_STOP) | ||
296 | return; | ||
297 | |||
298 | info.si_signo = SIGBUS; | ||
299 | info.si_errno = 0; | ||
300 | info.si_code = BUS_OBJERR; | ||
301 | info.si_addr = (void *)0; | ||
302 | info.si_trapno = 0; | ||
303 | force_sig_info(SIGBUS, &info, current); | ||
304 | } | 271 | } |
305 | 272 | ||
306 | static char ecc_syndrome_table[] = { | 273 | static char ecc_syndrome_table[] = { |
@@ -338,65 +305,15 @@ static char ecc_syndrome_table[] = { | |||
338 | 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a | 305 | 0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a |
339 | }; | 306 | }; |
340 | 307 | ||
341 | /* cee_trap in entry.S encodes AFSR/UDBH/UDBL error status | ||
342 | * in the following format. The AFAR is left as is, with | ||
343 | * reserved bits cleared, and is a raw 40-bit physical | ||
344 | * address. | ||
345 | */ | ||
346 | #define CE_STATUS_UDBH_UE (1UL << (43 + 9)) | ||
347 | #define CE_STATUS_UDBH_CE (1UL << (43 + 8)) | ||
348 | #define CE_STATUS_UDBH_ESYNDR (0xffUL << 43) | ||
349 | #define CE_STATUS_UDBH_SHIFT 43 | ||
350 | #define CE_STATUS_UDBL_UE (1UL << (33 + 9)) | ||
351 | #define CE_STATUS_UDBL_CE (1UL << (33 + 8)) | ||
352 | #define CE_STATUS_UDBL_ESYNDR (0xffUL << 33) | ||
353 | #define CE_STATUS_UDBL_SHIFT 33 | ||
354 | #define CE_STATUS_AFSR_MASK (0x1ffffffffUL) | ||
355 | #define CE_STATUS_AFSR_ME (1UL << 32) | ||
356 | #define CE_STATUS_AFSR_PRIV (1UL << 31) | ||
357 | #define CE_STATUS_AFSR_ISAP (1UL << 30) | ||
358 | #define CE_STATUS_AFSR_ETP (1UL << 29) | ||
359 | #define CE_STATUS_AFSR_IVUE (1UL << 28) | ||
360 | #define CE_STATUS_AFSR_TO (1UL << 27) | ||
361 | #define CE_STATUS_AFSR_BERR (1UL << 26) | ||
362 | #define CE_STATUS_AFSR_LDP (1UL << 25) | ||
363 | #define CE_STATUS_AFSR_CP (1UL << 24) | ||
364 | #define CE_STATUS_AFSR_WP (1UL << 23) | ||
365 | #define CE_STATUS_AFSR_EDP (1UL << 22) | ||
366 | #define CE_STATUS_AFSR_UE (1UL << 21) | ||
367 | #define CE_STATUS_AFSR_CE (1UL << 20) | ||
368 | #define CE_STATUS_AFSR_ETS (0xfUL << 16) | ||
369 | #define CE_STATUS_AFSR_ETS_SHIFT 16 | ||
370 | #define CE_STATUS_AFSR_PSYND (0xffffUL << 0) | ||
371 | #define CE_STATUS_AFSR_PSYND_SHIFT 0 | ||
372 | |||
373 | /* Layout of Ecache TAG Parity Syndrome of AFSR */ | ||
374 | #define AFSR_ETSYNDROME_7_0 0x1UL /* E$-tag bus bits <7:0> */ | ||
375 | #define AFSR_ETSYNDROME_15_8 0x2UL /* E$-tag bus bits <15:8> */ | ||
376 | #define AFSR_ETSYNDROME_21_16 0x4UL /* E$-tag bus bits <21:16> */ | ||
377 | #define AFSR_ETSYNDROME_24_22 0x8UL /* E$-tag bus bits <24:22> */ | ||
378 | |||
379 | static char *syndrome_unknown = "<Unknown>"; | 308 | static char *syndrome_unknown = "<Unknown>"; |
380 | 309 | ||
381 | asmlinkage void cee_log(unsigned long ce_status, | 310 | static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit) |
382 | unsigned long afar, | ||
383 | struct pt_regs *regs) | ||
384 | { | 311 | { |
385 | char memmod_str[64]; | 312 | unsigned short scode; |
386 | char *p; | 313 | char memmod_str[64], *p; |
387 | unsigned short scode, udb_reg; | ||
388 | 314 | ||
389 | printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " | 315 | if (udbl & bit) { |
390 | "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx]\n", | 316 | scode = ecc_syndrome_table[udbl & 0xff]; |
391 | smp_processor_id(), | ||
392 | (ce_status & CE_STATUS_AFSR_MASK), | ||
393 | afar, | ||
394 | ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL), | ||
395 | ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL)); | ||
396 | |||
397 | udb_reg = ((ce_status >> CE_STATUS_UDBL_SHIFT) & 0x3ffUL); | ||
398 | if (udb_reg & (1 << 8)) { | ||
399 | scode = ecc_syndrome_table[udb_reg & 0xff]; | ||
400 | if (prom_getunumber(scode, afar, | 317 | if (prom_getunumber(scode, afar, |
401 | memmod_str, sizeof(memmod_str)) == -1) | 318 | memmod_str, sizeof(memmod_str)) == -1) |
402 | p = syndrome_unknown; | 319 | p = syndrome_unknown; |
@@ -407,9 +324,8 @@ asmlinkage void cee_log(unsigned long ce_status, | |||
407 | smp_processor_id(), scode, p); | 324 | smp_processor_id(), scode, p); |
408 | } | 325 | } |
409 | 326 | ||
410 | udb_reg = ((ce_status >> CE_STATUS_UDBH_SHIFT) & 0x3ffUL); | 327 | if (udbh & bit) { |
411 | if (udb_reg & (1 << 8)) { | 328 | scode = ecc_syndrome_table[udbh & 0xff]; |
412 | scode = ecc_syndrome_table[udb_reg & 0xff]; | ||
413 | if (prom_getunumber(scode, afar, | 329 | if (prom_getunumber(scode, afar, |
414 | memmod_str, sizeof(memmod_str)) == -1) | 330 | memmod_str, sizeof(memmod_str)) == -1) |
415 | p = syndrome_unknown; | 331 | p = syndrome_unknown; |
@@ -419,6 +335,127 @@ asmlinkage void cee_log(unsigned long ce_status, | |||
419 | "Memory Module \"%s\"\n", | 335 | "Memory Module \"%s\"\n", |
420 | smp_processor_id(), scode, p); | 336 | smp_processor_id(), scode, p); |
421 | } | 337 | } |
338 | |||
339 | } | ||
340 | |||
341 | static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs) | ||
342 | { | ||
343 | |||
344 | printk(KERN_WARNING "CPU[%d]: Correctable ECC Error " | ||
345 | "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n", | ||
346 | smp_processor_id(), afsr, afar, udbl, udbh, tl1); | ||
347 | |||
348 | spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE); | ||
349 | |||
350 | /* We always log it, even if someone is listening for this | ||
351 | * trap. | ||
352 | */ | ||
353 | notify_die(DIE_TRAP, "Correctable ECC Error", regs, | ||
354 | 0, TRAP_TYPE_CEE, SIGTRAP); | ||
355 | |||
356 | /* The Correctable ECC Error trap does not disable I/D caches. So | ||
357 | * we only have to restore the ESTATE Error Enable register. | ||
358 | */ | ||
359 | spitfire_enable_estate_errors(); | ||
360 | } | ||
361 | |||
362 | static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs) | ||
363 | { | ||
364 | siginfo_t info; | ||
365 | |||
366 | printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] " | ||
367 | "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n", | ||
368 | smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1); | ||
369 | |||
370 | /* XXX add more human friendly logging of the error status | ||
371 | * XXX as is implemented for cheetah | ||
372 | */ | ||
373 | |||
374 | spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE); | ||
375 | |||
376 | /* We always log it, even if someone is listening for this | ||
377 | * trap. | ||
378 | */ | ||
379 | notify_die(DIE_TRAP, "Uncorrectable Error", regs, | ||
380 | 0, tt, SIGTRAP); | ||
381 | |||
382 | if (regs->tstate & TSTATE_PRIV) { | ||
383 | if (tl1) | ||
384 | dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); | ||
385 | die_if_kernel("UE", regs); | ||
386 | } | ||
387 | |||
388 | /* XXX need more intelligent processing here, such as is implemented | ||
389 | * XXX for cheetah errors, in fact if the E-cache still holds the | ||
390 | * XXX line with bad parity this will loop | ||
391 | */ | ||
392 | |||
393 | spitfire_clean_and_reenable_l1_caches(); | ||
394 | spitfire_enable_estate_errors(); | ||
395 | |||
396 | if (test_thread_flag(TIF_32BIT)) { | ||
397 | regs->tpc &= 0xffffffff; | ||
398 | regs->tnpc &= 0xffffffff; | ||
399 | } | ||
400 | info.si_signo = SIGBUS; | ||
401 | info.si_errno = 0; | ||
402 | info.si_code = BUS_OBJERR; | ||
403 | info.si_addr = (void *)0; | ||
404 | info.si_trapno = 0; | ||
405 | force_sig_info(SIGBUS, &info, current); | ||
406 | } | ||
407 | |||
408 | void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar) | ||
409 | { | ||
410 | unsigned long afsr, tt, udbh, udbl; | ||
411 | int tl1; | ||
412 | |||
413 | afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT; | ||
414 | tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT; | ||
415 | tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0; | ||
416 | udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT; | ||
417 | udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT; | ||
418 | |||
419 | #ifdef CONFIG_PCI | ||
420 | if (tt == TRAP_TYPE_DAE && | ||
421 | pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) { | ||
422 | spitfire_clean_and_reenable_l1_caches(); | ||
423 | spitfire_enable_estate_errors(); | ||
424 | |||
425 | pci_poke_faulted = 1; | ||
426 | regs->tnpc = regs->tpc + 4; | ||
427 | return; | ||
428 | } | ||
429 | #endif | ||
430 | |||
431 | if (afsr & SFAFSR_UE) | ||
432 | spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs); | ||
433 | |||
434 | if (tt == TRAP_TYPE_CEE) { | ||
435 | /* Handle the case where we took a CEE trap, but ACK'd | ||
436 | * only the UE state in the UDB error registers. | ||
437 | */ | ||
438 | if (afsr & SFAFSR_UE) { | ||
439 | if (udbh & UDBE_CE) { | ||
440 | __asm__ __volatile__( | ||
441 | "stxa %0, [%1] %2\n\t" | ||
442 | "membar #Sync" | ||
443 | : /* no outputs */ | ||
444 | : "r" (udbh & UDBE_CE), | ||
445 | "r" (0x0), "i" (ASI_UDB_ERROR_W)); | ||
446 | } | ||
447 | if (udbl & UDBE_CE) { | ||
448 | __asm__ __volatile__( | ||
449 | "stxa %0, [%1] %2\n\t" | ||
450 | "membar #Sync" | ||
451 | : /* no outputs */ | ||
452 | : "r" (udbl & UDBE_CE), | ||
453 | "r" (0x18), "i" (ASI_UDB_ERROR_W)); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs); | ||
458 | } | ||
422 | } | 459 | } |
423 | 460 | ||
424 | int cheetah_pcache_forced_on; | 461 | int cheetah_pcache_forced_on; |
@@ -2127,6 +2164,9 @@ void __init trap_init(void) | |||
2127 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || | 2164 | TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || |
2128 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || | 2165 | TI_NEW_CHILD != offsetof(struct thread_info, new_child) || |
2129 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || | 2166 | TI_SYS_NOERROR != offsetof(struct thread_info, syscall_noerror) || |
2167 | TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || | ||
2168 | TI_KUNA_REGS != offsetof(struct thread_info, kern_una_regs) || | ||
2169 | TI_KUNA_INSN != offsetof(struct thread_info, kern_una_insn) || | ||
2130 | TI_FPREGS != offsetof(struct thread_info, fpregs) || | 2170 | TI_FPREGS != offsetof(struct thread_info, fpregs) || |
2131 | (TI_FPREGS & (64 - 1))) | 2171 | (TI_FPREGS & (64 - 1))) |
2132 | thread_info_offsets_are_bolixed_dave(); | 2172 | thread_info_offsets_are_bolixed_dave(); |
diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S index 491bb3681f9d..8365bc1f81f3 100644 --- a/arch/sparc64/kernel/ttable.S +++ b/arch/sparc64/kernel/ttable.S | |||
@@ -18,9 +18,10 @@ sparc64_ttable_tl0: | |||
18 | tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) | 18 | tl0_resv000: BOOT_KERNEL BTRAP(0x1) BTRAP(0x2) BTRAP(0x3) |
19 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) | 19 | tl0_resv004: BTRAP(0x4) BTRAP(0x5) BTRAP(0x6) BTRAP(0x7) |
20 | tl0_iax: membar #Sync | 20 | tl0_iax: membar #Sync |
21 | TRAP_NOSAVE_7INSNS(__do_instruction_access_exception) | 21 | TRAP_NOSAVE_7INSNS(__spitfire_insn_access_exception) |
22 | tl0_resv009: BTRAP(0x9) | 22 | tl0_resv009: BTRAP(0x9) |
23 | tl0_iae: TRAP(do_iae) | 23 | tl0_iae: membar #Sync |
24 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
24 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) | 25 | tl0_resv00b: BTRAP(0xb) BTRAP(0xc) BTRAP(0xd) BTRAP(0xe) BTRAP(0xf) |
25 | tl0_ill: membar #Sync | 26 | tl0_ill: membar #Sync |
26 | TRAP_7INSNS(do_illegal_instruction) | 27 | TRAP_7INSNS(do_illegal_instruction) |
@@ -36,9 +37,10 @@ tl0_cwin: CLEAN_WINDOW | |||
36 | tl0_div0: TRAP(do_div0) | 37 | tl0_div0: TRAP(do_div0) |
37 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) | 38 | tl0_resv029: BTRAP(0x29) BTRAP(0x2a) BTRAP(0x2b) BTRAP(0x2c) BTRAP(0x2d) BTRAP(0x2e) |
38 | tl0_resv02f: BTRAP(0x2f) | 39 | tl0_resv02f: BTRAP(0x2f) |
39 | tl0_dax: TRAP_NOSAVE(__do_data_access_exception) | 40 | tl0_dax: TRAP_NOSAVE(__spitfire_data_access_exception) |
40 | tl0_resv031: BTRAP(0x31) | 41 | tl0_resv031: BTRAP(0x31) |
41 | tl0_dae: TRAP(do_dae) | 42 | tl0_dae: membar #Sync |
43 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
42 | tl0_resv033: BTRAP(0x33) | 44 | tl0_resv033: BTRAP(0x33) |
43 | tl0_mna: TRAP_NOSAVE(do_mna) | 45 | tl0_mna: TRAP_NOSAVE(do_mna) |
44 | tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) | 46 | tl0_lddfmna: TRAP_NOSAVE(do_lddfmna) |
@@ -73,7 +75,8 @@ tl0_resv05c: BTRAP(0x5c) BTRAP(0x5d) BTRAP(0x5e) BTRAP(0x5f) | |||
73 | tl0_ivec: TRAP_IVEC | 75 | tl0_ivec: TRAP_IVEC |
74 | tl0_paw: TRAP(do_paw) | 76 | tl0_paw: TRAP(do_paw) |
75 | tl0_vaw: TRAP(do_vaw) | 77 | tl0_vaw: TRAP(do_vaw) |
76 | tl0_cee: TRAP_NOSAVE(cee_trap) | 78 | tl0_cee: membar #Sync |
79 | TRAP_NOSAVE_7INSNS(__spitfire_cee_trap) | ||
77 | tl0_iamiss: | 80 | tl0_iamiss: |
78 | #include "itlb_base.S" | 81 | #include "itlb_base.S" |
79 | tl0_damiss: | 82 | tl0_damiss: |
@@ -175,9 +178,10 @@ tl0_resv1f0: BTRAPS(0x1f0) BTRAPS(0x1f8) | |||
175 | sparc64_ttable_tl1: | 178 | sparc64_ttable_tl1: |
176 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) | 179 | tl1_resv000: BOOT_KERNEL BTRAPTL1(0x1) BTRAPTL1(0x2) BTRAPTL1(0x3) |
177 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) | 180 | tl1_resv004: BTRAPTL1(0x4) BTRAPTL1(0x5) BTRAPTL1(0x6) BTRAPTL1(0x7) |
178 | tl1_iax: TRAP_NOSAVE(__do_instruction_access_exception_tl1) | 181 | tl1_iax: TRAP_NOSAVE(__spitfire_insn_access_exception_tl1) |
179 | tl1_resv009: BTRAPTL1(0x9) | 182 | tl1_resv009: BTRAPTL1(0x9) |
180 | tl1_iae: TRAPTL1(do_iae_tl1) | 183 | tl1_iae: membar #Sync |
184 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
181 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) | 185 | tl1_resv00b: BTRAPTL1(0xb) BTRAPTL1(0xc) BTRAPTL1(0xd) BTRAPTL1(0xe) BTRAPTL1(0xf) |
182 | tl1_ill: TRAPTL1(do_ill_tl1) | 186 | tl1_ill: TRAPTL1(do_ill_tl1) |
183 | tl1_privop: BTRAPTL1(0x11) | 187 | tl1_privop: BTRAPTL1(0x11) |
@@ -193,9 +197,10 @@ tl1_cwin: CLEAN_WINDOW | |||
193 | tl1_div0: TRAPTL1(do_div0_tl1) | 197 | tl1_div0: TRAPTL1(do_div0_tl1) |
194 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) | 198 | tl1_resv029: BTRAPTL1(0x29) BTRAPTL1(0x2a) BTRAPTL1(0x2b) BTRAPTL1(0x2c) |
195 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) | 199 | tl1_resv02d: BTRAPTL1(0x2d) BTRAPTL1(0x2e) BTRAPTL1(0x2f) |
196 | tl1_dax: TRAP_NOSAVE(__do_data_access_exception_tl1) | 200 | tl1_dax: TRAP_NOSAVE(__spitfire_data_access_exception_tl1) |
197 | tl1_resv031: BTRAPTL1(0x31) | 201 | tl1_resv031: BTRAPTL1(0x31) |
198 | tl1_dae: TRAPTL1(do_dae_tl1) | 202 | tl1_dae: membar #Sync |
203 | TRAP_NOSAVE_7INSNS(__spitfire_access_error) | ||
199 | tl1_resv033: BTRAPTL1(0x33) | 204 | tl1_resv033: BTRAPTL1(0x33) |
200 | tl1_mna: TRAP_NOSAVE(do_mna) | 205 | tl1_mna: TRAP_NOSAVE(do_mna) |
201 | tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) | 206 | tl1_lddfmna: TRAPTL1(do_lddfmna_tl1) |
@@ -219,8 +224,8 @@ tl1_paw: TRAPTL1(do_paw_tl1) | |||
219 | tl1_vaw: TRAPTL1(do_vaw_tl1) | 224 | tl1_vaw: TRAPTL1(do_vaw_tl1) |
220 | 225 | ||
221 | /* The grotty trick to save %g1 into current->thread.cee_stuff | 226 | /* The grotty trick to save %g1 into current->thread.cee_stuff |
222 | * is because when we take this trap we could be interrupting trap | 227 | * is because when we take this trap we could be interrupting |
223 | * code already using the trap alternate global registers. | 228 | * trap code already using the trap alternate global registers. |
224 | * | 229 | * |
225 | * We cross our fingers and pray that this store/load does | 230 | * We cross our fingers and pray that this store/load does |
226 | * not cause yet another CEE trap. | 231 | * not cause yet another CEE trap. |
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S new file mode 100644 index 000000000000..cbb40585253c --- /dev/null +++ b/arch/sparc64/kernel/una_asm.S | |||
@@ -0,0 +1,153 @@ | |||
1 | /* una_asm.S: Kernel unaligned trap assembler helpers. | ||
2 | * | ||
3 | * Copyright (C) 1996,2005 David S. Miller (davem@davemloft.net) | ||
4 | * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | ||
5 | */ | ||
6 | |||
7 | .text | ||
8 | |||
9 | kernel_unaligned_trap_fault: | ||
10 | call kernel_mna_trap_fault | ||
11 | nop | ||
12 | retl | ||
13 | nop | ||
14 | .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault | ||
15 | |||
16 | .globl __do_int_store | ||
17 | __do_int_store: | ||
18 | rd %asi, %o4 | ||
19 | wr %o3, 0, %asi | ||
20 | ldx [%o2], %g3 | ||
21 | cmp %o1, 2 | ||
22 | be,pn %icc, 2f | ||
23 | cmp %o1, 4 | ||
24 | be,pt %icc, 1f | ||
25 | srlx %g3, 24, %g2 | ||
26 | srlx %g3, 56, %g1 | ||
27 | srlx %g3, 48, %g7 | ||
28 | 4: stba %g1, [%o0] %asi | ||
29 | srlx %g3, 40, %g1 | ||
30 | 5: stba %g7, [%o0 + 1] %asi | ||
31 | srlx %g3, 32, %g7 | ||
32 | 6: stba %g1, [%o0 + 2] %asi | ||
33 | 7: stba %g7, [%o0 + 3] %asi | ||
34 | srlx %g3, 16, %g1 | ||
35 | 8: stba %g2, [%o0 + 4] %asi | ||
36 | srlx %g3, 8, %g7 | ||
37 | 9: stba %g1, [%o0 + 5] %asi | ||
38 | 10: stba %g7, [%o0 + 6] %asi | ||
39 | ba,pt %xcc, 0f | ||
40 | 11: stba %g3, [%o0 + 7] %asi | ||
41 | 1: srl %g3, 16, %g7 | ||
42 | 12: stba %g2, [%o0] %asi | ||
43 | srl %g3, 8, %g2 | ||
44 | 13: stba %g7, [%o0 + 1] %asi | ||
45 | 14: stba %g2, [%o0 + 2] %asi | ||
46 | ba,pt %xcc, 0f | ||
47 | 15: stba %g3, [%o0 + 3] %asi | ||
48 | 2: srl %g3, 8, %g2 | ||
49 | 16: stba %g2, [%o0] %asi | ||
50 | 17: stba %g3, [%o0 + 1] %asi | ||
51 | 0: | ||
52 | wr %o4, 0x0, %asi | ||
53 | retl | ||
54 | nop | ||
55 | .size __do_int_store, .-__do_int_store | ||
56 | |||
57 | .section __ex_table | ||
58 | .word 4b, kernel_unaligned_trap_fault | ||
59 | .word 5b, kernel_unaligned_trap_fault | ||
60 | .word 6b, kernel_unaligned_trap_fault | ||
61 | .word 7b, kernel_unaligned_trap_fault | ||
62 | .word 8b, kernel_unaligned_trap_fault | ||
63 | .word 9b, kernel_unaligned_trap_fault | ||
64 | .word 10b, kernel_unaligned_trap_fault | ||
65 | .word 11b, kernel_unaligned_trap_fault | ||
66 | .word 12b, kernel_unaligned_trap_fault | ||
67 | .word 13b, kernel_unaligned_trap_fault | ||
68 | .word 14b, kernel_unaligned_trap_fault | ||
69 | .word 15b, kernel_unaligned_trap_fault | ||
70 | .word 16b, kernel_unaligned_trap_fault | ||
71 | .word 17b, kernel_unaligned_trap_fault | ||
72 | .previous | ||
73 | |||
74 | .globl do_int_load | ||
75 | do_int_load: | ||
76 | rd %asi, %o5 | ||
77 | wr %o4, 0, %asi | ||
78 | cmp %o1, 8 | ||
79 | bge,pn %icc, 9f | ||
80 | cmp %o1, 4 | ||
81 | be,pt %icc, 6f | ||
82 | 4: lduba [%o2] %asi, %g2 | ||
83 | 5: lduba [%o2 + 1] %asi, %g3 | ||
84 | sll %g2, 8, %g2 | ||
85 | brz,pt %o3, 3f | ||
86 | add %g2, %g3, %g2 | ||
87 | sllx %g2, 48, %g2 | ||
88 | srax %g2, 48, %g2 | ||
89 | 3: ba,pt %xcc, 0f | ||
90 | stx %g2, [%o0] | ||
91 | 6: lduba [%o2 + 1] %asi, %g3 | ||
92 | sll %g2, 24, %g2 | ||
93 | 7: lduba [%o2 + 2] %asi, %g7 | ||
94 | sll %g3, 16, %g3 | ||
95 | 8: lduba [%o2 + 3] %asi, %g1 | ||
96 | sll %g7, 8, %g7 | ||
97 | or %g2, %g3, %g2 | ||
98 | or %g7, %g1, %g7 | ||
99 | or %g2, %g7, %g2 | ||
100 | brnz,a,pt %o3, 3f | ||
101 | sra %g2, 0, %g2 | ||
102 | 3: ba,pt %xcc, 0f | ||
103 | stx %g2, [%o0] | ||
104 | 9: lduba [%o2] %asi, %g2 | ||
105 | 10: lduba [%o2 + 1] %asi, %g3 | ||
106 | sllx %g2, 56, %g2 | ||
107 | 11: lduba [%o2 + 2] %asi, %g7 | ||
108 | sllx %g3, 48, %g3 | ||
109 | 12: lduba [%o2 + 3] %asi, %g1 | ||
110 | sllx %g7, 40, %g7 | ||
111 | sllx %g1, 32, %g1 | ||
112 | or %g2, %g3, %g2 | ||
113 | or %g7, %g1, %g7 | ||
114 | 13: lduba [%o2 + 4] %asi, %g3 | ||
115 | or %g2, %g7, %g7 | ||
116 | 14: lduba [%o2 + 5] %asi, %g1 | ||
117 | sllx %g3, 24, %g3 | ||
118 | 15: lduba [%o2 + 6] %asi, %g2 | ||
119 | sllx %g1, 16, %g1 | ||
120 | or %g7, %g3, %g7 | ||
121 | 16: lduba [%o2 + 7] %asi, %g3 | ||
122 | sllx %g2, 8, %g2 | ||
123 | or %g7, %g1, %g7 | ||
124 | or %g2, %g3, %g2 | ||
125 | or %g7, %g2, %g7 | ||
126 | cmp %o1, 8 | ||
127 | be,a,pt %icc, 0f | ||
128 | stx %g7, [%o0] | ||
129 | srlx %g7, 32, %g2 | ||
130 | sra %g7, 0, %g7 | ||
131 | stx %g2, [%o0] | ||
132 | stx %g7, [%o0 + 8] | ||
133 | 0: | ||
134 | wr %o5, 0x0, %asi | ||
135 | retl | ||
136 | nop | ||
137 | .size __do_int_load, .-__do_int_load | ||
138 | |||
139 | .section __ex_table | ||
140 | .word 4b, kernel_unaligned_trap_fault | ||
141 | .word 5b, kernel_unaligned_trap_fault | ||
142 | .word 6b, kernel_unaligned_trap_fault | ||
143 | .word 7b, kernel_unaligned_trap_fault | ||
144 | .word 8b, kernel_unaligned_trap_fault | ||
145 | .word 9b, kernel_unaligned_trap_fault | ||
146 | .word 10b, kernel_unaligned_trap_fault | ||
147 | .word 11b, kernel_unaligned_trap_fault | ||
148 | .word 12b, kernel_unaligned_trap_fault | ||
149 | .word 13b, kernel_unaligned_trap_fault | ||
150 | .word 14b, kernel_unaligned_trap_fault | ||
151 | .word 15b, kernel_unaligned_trap_fault | ||
152 | .word 16b, kernel_unaligned_trap_fault | ||
153 | .previous | ||
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index 4372bf32ecf6..da9739f0d437 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c | |||
@@ -180,169 +180,28 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs) | |||
180 | die_if_kernel(str, regs); | 180 | die_if_kernel(str, regs); |
181 | } | 181 | } |
182 | 182 | ||
183 | #define do_integer_load(dest_reg, size, saddr, is_signed, asi, errh) ({ \ | 183 | extern void do_int_load(unsigned long *dest_reg, int size, |
184 | __asm__ __volatile__ ( \ | 184 | unsigned long *saddr, int is_signed, int asi); |
185 | "wr %4, 0, %%asi\n\t" \ | ||
186 | "cmp %1, 8\n\t" \ | ||
187 | "bge,pn %%icc, 9f\n\t" \ | ||
188 | " cmp %1, 4\n\t" \ | ||
189 | "be,pt %%icc, 6f\n" \ | ||
190 | "4:\t" " lduba [%2] %%asi, %%l1\n" \ | ||
191 | "5:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \ | ||
192 | "sll %%l1, 8, %%l1\n\t" \ | ||
193 | "brz,pt %3, 3f\n\t" \ | ||
194 | " add %%l1, %%l2, %%l1\n\t" \ | ||
195 | "sllx %%l1, 48, %%l1\n\t" \ | ||
196 | "srax %%l1, 48, %%l1\n" \ | ||
197 | "3:\t" "ba,pt %%xcc, 0f\n\t" \ | ||
198 | " stx %%l1, [%0]\n" \ | ||
199 | "6:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \ | ||
200 | "sll %%l1, 24, %%l1\n" \ | ||
201 | "7:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \ | ||
202 | "sll %%l2, 16, %%l2\n" \ | ||
203 | "8:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \ | ||
204 | "sll %%g7, 8, %%g7\n\t" \ | ||
205 | "or %%l1, %%l2, %%l1\n\t" \ | ||
206 | "or %%g7, %%g1, %%g7\n\t" \ | ||
207 | "or %%l1, %%g7, %%l1\n\t" \ | ||
208 | "brnz,a,pt %3, 3f\n\t" \ | ||
209 | " sra %%l1, 0, %%l1\n" \ | ||
210 | "3:\t" "ba,pt %%xcc, 0f\n\t" \ | ||
211 | " stx %%l1, [%0]\n" \ | ||
212 | "9:\t" "lduba [%2] %%asi, %%l1\n" \ | ||
213 | "10:\t" "lduba [%2 + 1] %%asi, %%l2\n\t" \ | ||
214 | "sllx %%l1, 56, %%l1\n" \ | ||
215 | "11:\t" "lduba [%2 + 2] %%asi, %%g7\n\t" \ | ||
216 | "sllx %%l2, 48, %%l2\n" \ | ||
217 | "12:\t" "lduba [%2 + 3] %%asi, %%g1\n\t" \ | ||
218 | "sllx %%g7, 40, %%g7\n\t" \ | ||
219 | "sllx %%g1, 32, %%g1\n\t" \ | ||
220 | "or %%l1, %%l2, %%l1\n\t" \ | ||
221 | "or %%g7, %%g1, %%g7\n" \ | ||
222 | "13:\t" "lduba [%2 + 4] %%asi, %%l2\n\t" \ | ||
223 | "or %%l1, %%g7, %%g7\n" \ | ||
224 | "14:\t" "lduba [%2 + 5] %%asi, %%g1\n\t" \ | ||
225 | "sllx %%l2, 24, %%l2\n" \ | ||
226 | "15:\t" "lduba [%2 + 6] %%asi, %%l1\n\t" \ | ||
227 | "sllx %%g1, 16, %%g1\n\t" \ | ||
228 | "or %%g7, %%l2, %%g7\n" \ | ||
229 | "16:\t" "lduba [%2 + 7] %%asi, %%l2\n\t" \ | ||
230 | "sllx %%l1, 8, %%l1\n\t" \ | ||
231 | "or %%g7, %%g1, %%g7\n\t" \ | ||
232 | "or %%l1, %%l2, %%l1\n\t" \ | ||
233 | "or %%g7, %%l1, %%g7\n\t" \ | ||
234 | "cmp %1, 8\n\t" \ | ||
235 | "be,a,pt %%icc, 0f\n\t" \ | ||
236 | " stx %%g7, [%0]\n\t" \ | ||
237 | "srlx %%g7, 32, %%l1\n\t" \ | ||
238 | "sra %%g7, 0, %%g7\n\t" \ | ||
239 | "stx %%l1, [%0]\n\t" \ | ||
240 | "stx %%g7, [%0 + 8]\n" \ | ||
241 | "0:\n\t" \ | ||
242 | "wr %%g0, %5, %%asi\n\n\t" \ | ||
243 | ".section __ex_table\n\t" \ | ||
244 | ".word 4b, " #errh "\n\t" \ | ||
245 | ".word 5b, " #errh "\n\t" \ | ||
246 | ".word 6b, " #errh "\n\t" \ | ||
247 | ".word 7b, " #errh "\n\t" \ | ||
248 | ".word 8b, " #errh "\n\t" \ | ||
249 | ".word 9b, " #errh "\n\t" \ | ||
250 | ".word 10b, " #errh "\n\t" \ | ||
251 | ".word 11b, " #errh "\n\t" \ | ||
252 | ".word 12b, " #errh "\n\t" \ | ||
253 | ".word 13b, " #errh "\n\t" \ | ||
254 | ".word 14b, " #errh "\n\t" \ | ||
255 | ".word 15b, " #errh "\n\t" \ | ||
256 | ".word 16b, " #errh "\n\n\t" \ | ||
257 | ".previous\n\t" \ | ||
258 | : : "r" (dest_reg), "r" (size), "r" (saddr), "r" (is_signed), \ | ||
259 | "r" (asi), "i" (ASI_AIUS) \ | ||
260 | : "l1", "l2", "g7", "g1", "cc"); \ | ||
261 | }) | ||
262 | 185 | ||
263 | #define store_common(dst_addr, size, src_val, asi, errh) ({ \ | 186 | extern void __do_int_store(unsigned long *dst_addr, int size, |
264 | __asm__ __volatile__ ( \ | 187 | unsigned long *src_val, int asi); |
265 | "wr %3, 0, %%asi\n\t" \ | 188 | |
266 | "ldx [%2], %%l1\n" \ | 189 | static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, |
267 | "cmp %1, 2\n\t" \ | 190 | struct pt_regs *regs, int asi) |
268 | "be,pn %%icc, 2f\n\t" \ | 191 | { |
269 | " cmp %1, 4\n\t" \ | 192 | unsigned long zero = 0; |
270 | "be,pt %%icc, 1f\n\t" \ | 193 | unsigned long *src_val = &zero; |
271 | " srlx %%l1, 24, %%l2\n\t" \ | 194 | |
272 | "srlx %%l1, 56, %%g1\n\t" \ | 195 | if (size == 16) { |
273 | "srlx %%l1, 48, %%g7\n" \ | 196 | size = 8; |
274 | "4:\t" "stba %%g1, [%0] %%asi\n\t" \ | 197 | zero = (((long)(reg_num ? |
275 | "srlx %%l1, 40, %%g1\n" \ | 198 | (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | |
276 | "5:\t" "stba %%g7, [%0 + 1] %%asi\n\t" \ | 199 | (unsigned)fetch_reg(reg_num + 1, regs); |
277 | "srlx %%l1, 32, %%g7\n" \ | 200 | } else if (reg_num) { |
278 | "6:\t" "stba %%g1, [%0 + 2] %%asi\n" \ | 201 | src_val = fetch_reg_addr(reg_num, regs); |
279 | "7:\t" "stba %%g7, [%0 + 3] %%asi\n\t" \ | 202 | } |
280 | "srlx %%l1, 16, %%g1\n" \ | 203 | __do_int_store(dst_addr, size, src_val, asi); |
281 | "8:\t" "stba %%l2, [%0 + 4] %%asi\n\t" \ | 204 | } |
282 | "srlx %%l1, 8, %%g7\n" \ | ||
283 | "9:\t" "stba %%g1, [%0 + 5] %%asi\n" \ | ||
284 | "10:\t" "stba %%g7, [%0 + 6] %%asi\n\t" \ | ||
285 | "ba,pt %%xcc, 0f\n" \ | ||
286 | "11:\t" " stba %%l1, [%0 + 7] %%asi\n" \ | ||
287 | "1:\t" "srl %%l1, 16, %%g7\n" \ | ||
288 | "12:\t" "stba %%l2, [%0] %%asi\n\t" \ | ||
289 | "srl %%l1, 8, %%l2\n" \ | ||
290 | "13:\t" "stba %%g7, [%0 + 1] %%asi\n" \ | ||
291 | "14:\t" "stba %%l2, [%0 + 2] %%asi\n\t" \ | ||
292 | "ba,pt %%xcc, 0f\n" \ | ||
293 | "15:\t" " stba %%l1, [%0 + 3] %%asi\n" \ | ||
294 | "2:\t" "srl %%l1, 8, %%l2\n" \ | ||
295 | "16:\t" "stba %%l2, [%0] %%asi\n" \ | ||
296 | "17:\t" "stba %%l1, [%0 + 1] %%asi\n" \ | ||
297 | "0:\n\t" \ | ||
298 | "wr %%g0, %4, %%asi\n\n\t" \ | ||
299 | ".section __ex_table\n\t" \ | ||
300 | ".word 4b, " #errh "\n\t" \ | ||
301 | ".word 5b, " #errh "\n\t" \ | ||
302 | ".word 6b, " #errh "\n\t" \ | ||
303 | ".word 7b, " #errh "\n\t" \ | ||
304 | ".word 8b, " #errh "\n\t" \ | ||
305 | ".word 9b, " #errh "\n\t" \ | ||
306 | ".word 10b, " #errh "\n\t" \ | ||
307 | ".word 11b, " #errh "\n\t" \ | ||
308 | ".word 12b, " #errh "\n\t" \ | ||
309 | ".word 13b, " #errh "\n\t" \ | ||
310 | ".word 14b, " #errh "\n\t" \ | ||
311 | ".word 15b, " #errh "\n\t" \ | ||
312 | ".word 16b, " #errh "\n\t" \ | ||
313 | ".word 17b, " #errh "\n\n\t" \ | ||
314 | ".previous\n\t" \ | ||
315 | : : "r" (dst_addr), "r" (size), "r" (src_val), "r" (asi), "i" (ASI_AIUS)\ | ||
316 | : "l1", "l2", "g7", "g1", "cc"); \ | ||
317 | }) | ||
318 | |||
319 | #define do_integer_store(reg_num, size, dst_addr, regs, asi, errh) ({ \ | ||
320 | unsigned long zero = 0; \ | ||
321 | unsigned long *src_val = &zero; \ | ||
322 | \ | ||
323 | if (size == 16) { \ | ||
324 | size = 8; \ | ||
325 | zero = (((long)(reg_num ? \ | ||
326 | (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | \ | ||
327 | (unsigned)fetch_reg(reg_num + 1, regs); \ | ||
328 | } else if (reg_num) src_val = fetch_reg_addr(reg_num, regs); \ | ||
329 | store_common(dst_addr, size, src_val, asi, errh); \ | ||
330 | }) | ||
331 | |||
332 | extern void smp_capture(void); | ||
333 | extern void smp_release(void); | ||
334 | |||
335 | #define do_atomic(srcdest_reg, mem, errh) ({ \ | ||
336 | unsigned long flags, tmp; \ | ||
337 | \ | ||
338 | smp_capture(); \ | ||
339 | local_irq_save(flags); \ | ||
340 | tmp = *srcdest_reg; \ | ||
341 | do_integer_load(srcdest_reg, 4, mem, 0, errh); \ | ||
342 | store_common(mem, 4, &tmp, errh); \ | ||
343 | local_irq_restore(flags); \ | ||
344 | smp_release(); \ | ||
345 | }) | ||
346 | 205 | ||
347 | static inline void advance(struct pt_regs *regs) | 206 | static inline void advance(struct pt_regs *regs) |
348 | { | 207 | { |
@@ -364,24 +223,29 @@ static inline int ok_for_kernel(unsigned int insn) | |||
364 | return !floating_point_load_or_store_p(insn); | 223 | return !floating_point_load_or_store_p(insn); |
365 | } | 224 | } |
366 | 225 | ||
367 | void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) __asm__ ("kernel_mna_trap_fault"); | 226 | void kernel_mna_trap_fault(void) |
368 | |||
369 | void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn) | ||
370 | { | 227 | { |
371 | unsigned long g2 = regs->u_regs [UREG_G2]; | 228 | struct pt_regs *regs = current_thread_info()->kern_una_regs; |
229 | unsigned int insn = current_thread_info()->kern_una_insn; | ||
230 | unsigned long g2 = regs->u_regs[UREG_G2]; | ||
372 | unsigned long fixup = search_extables_range(regs->tpc, &g2); | 231 | unsigned long fixup = search_extables_range(regs->tpc, &g2); |
373 | 232 | ||
374 | if (!fixup) { | 233 | if (!fixup) { |
375 | unsigned long address = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); | 234 | unsigned long address; |
235 | |||
236 | address = compute_effective_address(regs, insn, | ||
237 | ((insn >> 25) & 0x1f)); | ||
376 | if (address < PAGE_SIZE) { | 238 | if (address < PAGE_SIZE) { |
377 | printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler"); | 239 | printk(KERN_ALERT "Unable to handle kernel NULL " |
240 | "pointer dereference in mna handler"); | ||
378 | } else | 241 | } else |
379 | printk(KERN_ALERT "Unable to handle kernel paging request in mna handler"); | 242 | printk(KERN_ALERT "Unable to handle kernel paging " |
243 | "request in mna handler"); | ||
380 | printk(KERN_ALERT " at virtual address %016lx\n",address); | 244 | printk(KERN_ALERT " at virtual address %016lx\n",address); |
381 | printk(KERN_ALERT "current->{mm,active_mm}->context = %016lx\n", | 245 | printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n", |
382 | (current->mm ? CTX_HWBITS(current->mm->context) : | 246 | (current->mm ? CTX_HWBITS(current->mm->context) : |
383 | CTX_HWBITS(current->active_mm->context))); | 247 | CTX_HWBITS(current->active_mm->context))); |
384 | printk(KERN_ALERT "current->{mm,active_mm}->pgd = %016lx\n", | 248 | printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n", |
385 | (current->mm ? (unsigned long) current->mm->pgd : | 249 | (current->mm ? (unsigned long) current->mm->pgd : |
386 | (unsigned long) current->active_mm->pgd)); | 250 | (unsigned long) current->active_mm->pgd)); |
387 | die_if_kernel("Oops", regs); | 251 | die_if_kernel("Oops", regs); |
@@ -400,48 +264,41 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u | |||
400 | enum direction dir = decode_direction(insn); | 264 | enum direction dir = decode_direction(insn); |
401 | int size = decode_access_size(insn); | 265 | int size = decode_access_size(insn); |
402 | 266 | ||
267 | current_thread_info()->kern_una_regs = regs; | ||
268 | current_thread_info()->kern_una_insn = insn; | ||
269 | |||
403 | if (!ok_for_kernel(insn) || dir == both) { | 270 | if (!ok_for_kernel(insn) || dir == both) { |
404 | printk("Unsupported unaligned load/store trap for kernel at <%016lx>.\n", | 271 | printk("Unsupported unaligned load/store trap for kernel " |
405 | regs->tpc); | 272 | "at <%016lx>.\n", regs->tpc); |
406 | unaligned_panic("Kernel does fpu/atomic unaligned load/store.", regs); | 273 | unaligned_panic("Kernel does fpu/atomic " |
407 | 274 | "unaligned load/store.", regs); | |
408 | __asm__ __volatile__ ("\n" | 275 | |
409 | "kernel_unaligned_trap_fault:\n\t" | 276 | kernel_mna_trap_fault(); |
410 | "mov %0, %%o0\n\t" | ||
411 | "call kernel_mna_trap_fault\n\t" | ||
412 | " mov %1, %%o1\n\t" | ||
413 | : | ||
414 | : "r" (regs), "r" (insn) | ||
415 | : "o0", "o1", "o2", "o3", "o4", "o5", "o7", | ||
416 | "g1", "g2", "g3", "g4", "g7", "cc"); | ||
417 | } else { | 277 | } else { |
418 | unsigned long addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); | 278 | unsigned long addr; |
419 | 279 | ||
280 | addr = compute_effective_address(regs, insn, | ||
281 | ((insn >> 25) & 0x1f)); | ||
420 | #ifdef DEBUG_MNA | 282 | #ifdef DEBUG_MNA |
421 | printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] retpc[%016lx]\n", | 283 | printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] " |
422 | regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); | 284 | "retpc[%016lx]\n", |
285 | regs->tpc, dirstrings[dir], addr, size, | ||
286 | regs->u_regs[UREG_RETPC]); | ||
423 | #endif | 287 | #endif |
424 | switch (dir) { | 288 | switch (dir) { |
425 | case load: | 289 | case load: |
426 | do_integer_load(fetch_reg_addr(((insn>>25)&0x1f), regs), | 290 | do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), |
427 | size, (unsigned long *) addr, | 291 | size, (unsigned long *) addr, |
428 | decode_signedness(insn), decode_asi(insn, regs), | 292 | decode_signedness(insn), |
429 | kernel_unaligned_trap_fault); | 293 | decode_asi(insn, regs)); |
430 | break; | 294 | break; |
431 | 295 | ||
432 | case store: | 296 | case store: |
433 | do_integer_store(((insn>>25)&0x1f), size, | 297 | do_int_store(((insn>>25)&0x1f), size, |
434 | (unsigned long *) addr, regs, | 298 | (unsigned long *) addr, regs, |
435 | decode_asi(insn, regs), | 299 | decode_asi(insn, regs)); |
436 | kernel_unaligned_trap_fault); | ||
437 | break; | ||
438 | #if 0 /* unsupported */ | ||
439 | case both: | ||
440 | do_atomic(fetch_reg_addr(((insn>>25)&0x1f), regs), | ||
441 | (unsigned long *) addr, | ||
442 | kernel_unaligned_trap_fault); | ||
443 | break; | 300 | break; |
444 | #endif | 301 | |
445 | default: | 302 | default: |
446 | panic("Impossible kernel unaligned trap."); | 303 | panic("Impossible kernel unaligned trap."); |
447 | /* Not reached... */ | 304 | /* Not reached... */ |
@@ -492,9 +349,9 @@ int handle_popc(u32 insn, struct pt_regs *regs) | |||
492 | 349 | ||
493 | extern void do_fpother(struct pt_regs *regs); | 350 | extern void do_fpother(struct pt_regs *regs); |
494 | extern void do_privact(struct pt_regs *regs); | 351 | extern void do_privact(struct pt_regs *regs); |
495 | extern void data_access_exception(struct pt_regs *regs, | 352 | extern void spitfire_data_access_exception(struct pt_regs *regs, |
496 | unsigned long sfsr, | 353 | unsigned long sfsr, |
497 | unsigned long sfar); | 354 | unsigned long sfar); |
498 | 355 | ||
499 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) | 356 | int handle_ldf_stq(u32 insn, struct pt_regs *regs) |
500 | { | 357 | { |
@@ -537,14 +394,14 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
537 | break; | 394 | break; |
538 | } | 395 | } |
539 | default: | 396 | default: |
540 | data_access_exception(regs, 0, addr); | 397 | spitfire_data_access_exception(regs, 0, addr); |
541 | return 1; | 398 | return 1; |
542 | } | 399 | } |
543 | if (put_user (first >> 32, (u32 __user *)addr) || | 400 | if (put_user (first >> 32, (u32 __user *)addr) || |
544 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || | 401 | __put_user ((u32)first, (u32 __user *)(addr + 4)) || |
545 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || | 402 | __put_user (second >> 32, (u32 __user *)(addr + 8)) || |
546 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { | 403 | __put_user ((u32)second, (u32 __user *)(addr + 12))) { |
547 | data_access_exception(regs, 0, addr); | 404 | spitfire_data_access_exception(regs, 0, addr); |
548 | return 1; | 405 | return 1; |
549 | } | 406 | } |
550 | } else { | 407 | } else { |
@@ -557,7 +414,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
557 | do_privact(regs); | 414 | do_privact(regs); |
558 | return 1; | 415 | return 1; |
559 | } else if (asi > ASI_SNFL) { | 416 | } else if (asi > ASI_SNFL) { |
560 | data_access_exception(regs, 0, addr); | 417 | spitfire_data_access_exception(regs, 0, addr); |
561 | return 1; | 418 | return 1; |
562 | } | 419 | } |
563 | switch (insn & 0x180000) { | 420 | switch (insn & 0x180000) { |
@@ -574,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) | |||
574 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); | 431 | err |= __get_user (data[i], (u32 __user *)(addr + 4*i)); |
575 | } | 432 | } |
576 | if (err && !(asi & 0x2 /* NF */)) { | 433 | if (err && !(asi & 0x2 /* NF */)) { |
577 | data_access_exception(regs, 0, addr); | 434 | spitfire_data_access_exception(regs, 0, addr); |
578 | return 1; | 435 | return 1; |
579 | } | 436 | } |
580 | if (asi & 0x8) /* Little */ { | 437 | if (asi & 0x8) /* Little */ { |
@@ -677,7 +534,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
677 | *(u64 *)(f->regs + freg) = value; | 534 | *(u64 *)(f->regs + freg) = value; |
678 | current_thread_info()->fpsaved[0] |= flag; | 535 | current_thread_info()->fpsaved[0] |= flag; |
679 | } else { | 536 | } else { |
680 | daex: data_access_exception(regs, sfsr, sfar); | 537 | daex: spitfire_data_access_exception(regs, sfsr, sfar); |
681 | return; | 538 | return; |
682 | } | 539 | } |
683 | advance(regs); | 540 | advance(regs); |
@@ -721,7 +578,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
721 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) | 578 | __put_user ((u32)value, (u32 __user *)(sfar + 4))) |
722 | goto daex; | 579 | goto daex; |
723 | } else { | 580 | } else { |
724 | daex: data_access_exception(regs, sfsr, sfar); | 581 | daex: spitfire_data_access_exception(regs, sfsr, sfar); |
725 | return; | 582 | return; |
726 | } | 583 | } |
727 | advance(regs); | 584 | advance(regs); |
diff --git a/arch/sparc64/kernel/us2e_cpufreq.c b/arch/sparc64/kernel/us2e_cpufreq.c index 7aae0a18aabe..686e526bec04 100644 --- a/arch/sparc64/kernel/us2e_cpufreq.c +++ b/arch/sparc64/kernel/us2e_cpufreq.c | |||
@@ -88,7 +88,6 @@ static void frob_mem_refresh(int cpu_slowing_down, | |||
88 | { | 88 | { |
89 | unsigned long old_refr_count, refr_count, mctrl; | 89 | unsigned long old_refr_count, refr_count, mctrl; |
90 | 90 | ||
91 | |||
92 | refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); | 91 | refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); |
93 | refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); | 92 | refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); |
94 | 93 | ||
@@ -230,6 +229,25 @@ static unsigned long estar_to_divisor(unsigned long estar) | |||
230 | return ret; | 229 | return ret; |
231 | } | 230 | } |
232 | 231 | ||
232 | static unsigned int us2e_freq_get(unsigned int cpu) | ||
233 | { | ||
234 | cpumask_t cpus_allowed; | ||
235 | unsigned long clock_tick, estar; | ||
236 | |||
237 | if (!cpu_online(cpu)) | ||
238 | return 0; | ||
239 | |||
240 | cpus_allowed = current->cpus_allowed; | ||
241 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
242 | |||
243 | clock_tick = sparc64_get_clock_tick(cpu) / 1000; | ||
244 | estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); | ||
245 | |||
246 | set_cpus_allowed(current, cpus_allowed); | ||
247 | |||
248 | return clock_tick / estar_to_divisor(estar); | ||
249 | } | ||
250 | |||
233 | static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) | 251 | static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) |
234 | { | 252 | { |
235 | unsigned long new_bits, new_freq; | 253 | unsigned long new_bits, new_freq; |
@@ -243,7 +261,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) | |||
243 | cpus_allowed = current->cpus_allowed; | 261 | cpus_allowed = current->cpus_allowed; |
244 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 262 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
245 | 263 | ||
246 | new_freq = clock_tick = sparc64_get_clock_tick(cpu); | 264 | new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; |
247 | new_bits = index_to_estar_mode(index); | 265 | new_bits = index_to_estar_mode(index); |
248 | divisor = index_to_divisor(index); | 266 | divisor = index_to_divisor(index); |
249 | new_freq /= divisor; | 267 | new_freq /= divisor; |
@@ -258,7 +276,8 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) | |||
258 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 276 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
259 | 277 | ||
260 | if (old_divisor != divisor) | 278 | if (old_divisor != divisor) |
261 | us2e_transition(estar, new_bits, clock_tick, old_divisor, divisor); | 279 | us2e_transition(estar, new_bits, clock_tick * 1000, |
280 | old_divisor, divisor); | ||
262 | 281 | ||
263 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 282 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
264 | 283 | ||
@@ -272,10 +291,8 @@ static int us2e_freq_target(struct cpufreq_policy *policy, | |||
272 | unsigned int new_index = 0; | 291 | unsigned int new_index = 0; |
273 | 292 | ||
274 | if (cpufreq_frequency_table_target(policy, | 293 | if (cpufreq_frequency_table_target(policy, |
275 | &us2e_freq_table[policy->cpu].table[0], | 294 | &us2e_freq_table[policy->cpu].table[0], |
276 | target_freq, | 295 | target_freq, relation, &new_index)) |
277 | relation, | ||
278 | &new_index)) | ||
279 | return -EINVAL; | 296 | return -EINVAL; |
280 | 297 | ||
281 | us2e_set_cpu_divider_index(policy->cpu, new_index); | 298 | us2e_set_cpu_divider_index(policy->cpu, new_index); |
@@ -292,7 +309,7 @@ static int us2e_freq_verify(struct cpufreq_policy *policy) | |||
292 | static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) | 309 | static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) |
293 | { | 310 | { |
294 | unsigned int cpu = policy->cpu; | 311 | unsigned int cpu = policy->cpu; |
295 | unsigned long clock_tick = sparc64_get_clock_tick(cpu); | 312 | unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; |
296 | struct cpufreq_frequency_table *table = | 313 | struct cpufreq_frequency_table *table = |
297 | &us2e_freq_table[cpu].table[0]; | 314 | &us2e_freq_table[cpu].table[0]; |
298 | 315 | ||
@@ -351,9 +368,10 @@ static int __init us2e_freq_init(void) | |||
351 | memset(us2e_freq_table, 0, | 368 | memset(us2e_freq_table, 0, |
352 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info))); | 369 | (NR_CPUS * sizeof(struct us2e_freq_percpu_info))); |
353 | 370 | ||
371 | driver->init = us2e_freq_cpu_init; | ||
354 | driver->verify = us2e_freq_verify; | 372 | driver->verify = us2e_freq_verify; |
355 | driver->target = us2e_freq_target; | 373 | driver->target = us2e_freq_target; |
356 | driver->init = us2e_freq_cpu_init; | 374 | driver->get = us2e_freq_get; |
357 | driver->exit = us2e_freq_cpu_exit; | 375 | driver->exit = us2e_freq_cpu_exit; |
358 | driver->owner = THIS_MODULE, | 376 | driver->owner = THIS_MODULE, |
359 | strcpy(driver->name, "UltraSPARC-IIe"); | 377 | strcpy(driver->name, "UltraSPARC-IIe"); |
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c index 18fe54b8aa55..9080e7cd4bb0 100644 --- a/arch/sparc64/kernel/us3_cpufreq.c +++ b/arch/sparc64/kernel/us3_cpufreq.c | |||
@@ -56,7 +56,7 @@ static void write_safari_cfg(unsigned long val) | |||
56 | 56 | ||
57 | static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) | 57 | static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) |
58 | { | 58 | { |
59 | unsigned long clock_tick = sparc64_get_clock_tick(cpu); | 59 | unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; |
60 | unsigned long ret; | 60 | unsigned long ret; |
61 | 61 | ||
62 | switch (safari_cfg & SAFARI_CFG_DIV_MASK) { | 62 | switch (safari_cfg & SAFARI_CFG_DIV_MASK) { |
@@ -76,6 +76,26 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg | |||
76 | return ret; | 76 | return ret; |
77 | } | 77 | } |
78 | 78 | ||
79 | static unsigned int us3_freq_get(unsigned int cpu) | ||
80 | { | ||
81 | cpumask_t cpus_allowed; | ||
82 | unsigned long reg; | ||
83 | unsigned int ret; | ||
84 | |||
85 | if (!cpu_online(cpu)) | ||
86 | return 0; | ||
87 | |||
88 | cpus_allowed = current->cpus_allowed; | ||
89 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | ||
90 | |||
91 | reg = read_safari_cfg(); | ||
92 | ret = get_current_freq(cpu, reg); | ||
93 | |||
94 | set_cpus_allowed(current, cpus_allowed); | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | |||
79 | static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) | 99 | static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) |
80 | { | 100 | { |
81 | unsigned long new_bits, new_freq, reg; | 101 | unsigned long new_bits, new_freq, reg; |
@@ -88,7 +108,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) | |||
88 | cpus_allowed = current->cpus_allowed; | 108 | cpus_allowed = current->cpus_allowed; |
89 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 109 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
90 | 110 | ||
91 | new_freq = sparc64_get_clock_tick(cpu); | 111 | new_freq = sparc64_get_clock_tick(cpu) / 1000; |
92 | switch (index) { | 112 | switch (index) { |
93 | case 0: | 113 | case 0: |
94 | new_bits = SAFARI_CFG_DIV_1; | 114 | new_bits = SAFARI_CFG_DIV_1; |
@@ -150,7 +170,7 @@ static int us3_freq_verify(struct cpufreq_policy *policy) | |||
150 | static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) | 170 | static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) |
151 | { | 171 | { |
152 | unsigned int cpu = policy->cpu; | 172 | unsigned int cpu = policy->cpu; |
153 | unsigned long clock_tick = sparc64_get_clock_tick(cpu); | 173 | unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; |
154 | struct cpufreq_frequency_table *table = | 174 | struct cpufreq_frequency_table *table = |
155 | &us3_freq_table[cpu].table[0]; | 175 | &us3_freq_table[cpu].table[0]; |
156 | 176 | ||
@@ -206,9 +226,10 @@ static int __init us3_freq_init(void) | |||
206 | memset(us3_freq_table, 0, | 226 | memset(us3_freq_table, 0, |
207 | (NR_CPUS * sizeof(struct us3_freq_percpu_info))); | 227 | (NR_CPUS * sizeof(struct us3_freq_percpu_info))); |
208 | 228 | ||
229 | driver->init = us3_freq_cpu_init; | ||
209 | driver->verify = us3_freq_verify; | 230 | driver->verify = us3_freq_verify; |
210 | driver->target = us3_freq_target; | 231 | driver->target = us3_freq_target; |
211 | driver->init = us3_freq_cpu_init; | 232 | driver->get = us3_freq_get; |
212 | driver->exit = us3_freq_cpu_exit; | 233 | driver->exit = us3_freq_cpu_exit; |
213 | driver->owner = THIS_MODULE, | 234 | driver->owner = THIS_MODULE, |
214 | strcpy(driver->name, "UltraSPARC-III"); | 235 | strcpy(driver->name, "UltraSPARC-III"); |
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S index dfbc7e0dcf70..99c809a1e5ac 100644 --- a/arch/sparc64/kernel/winfixup.S +++ b/arch/sparc64/kernel/winfixup.S | |||
@@ -318,7 +318,7 @@ fill_fixup_dax: | |||
318 | nop | 318 | nop |
319 | rdpr %pstate, %l1 ! Prepare to change globals. | 319 | rdpr %pstate, %l1 ! Prepare to change globals. |
320 | mov %g4, %o1 ! Setup args for | 320 | mov %g4, %o1 ! Setup args for |
321 | mov %g5, %o2 ! final call to data_access_exception. | 321 | mov %g5, %o2 ! final call to spitfire_data_access_exception. |
322 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO | 322 | andn %l1, PSTATE_MM, %l1 ! We want to be in RMO |
323 | 323 | ||
324 | mov %g6, %o7 ! Stash away current. | 324 | mov %g6, %o7 ! Stash away current. |
@@ -330,7 +330,7 @@ fill_fixup_dax: | |||
330 | mov TSB_REG, %g1 | 330 | mov TSB_REG, %g1 |
331 | ldxa [%g1] ASI_IMMU, %g5 | 331 | ldxa [%g1] ASI_IMMU, %g5 |
332 | #endif | 332 | #endif |
333 | call data_access_exception | 333 | call spitfire_data_access_exception |
334 | add %sp, PTREGS_OFF, %o0 | 334 | add %sp, PTREGS_OFF, %o0 |
335 | 335 | ||
336 | b,pt %xcc, rtrap | 336 | b,pt %xcc, rtrap |
@@ -391,7 +391,7 @@ window_dax_from_user_common: | |||
391 | 109: or %g7, %lo(109b), %g7 | 391 | 109: or %g7, %lo(109b), %g7 |
392 | mov %l4, %o1 | 392 | mov %l4, %o1 |
393 | mov %l5, %o2 | 393 | mov %l5, %o2 |
394 | call data_access_exception | 394 | call spitfire_data_access_exception |
395 | add %sp, PTREGS_OFF, %o0 | 395 | add %sp, PTREGS_OFF, %o0 |
396 | ba,pt %xcc, rtrap | 396 | ba,pt %xcc, rtrap |
397 | clr %l6 | 397 | clr %l6 |