aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/traps.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r--arch/mips/kernel/traps.c276
1 files changed, 160 insertions, 116 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 5b4d711f878d..e334c641a81b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -12,6 +12,7 @@
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd. 13 * Copyright (C) 2014, Imagination Technologies Ltd.
14 */ 14 */
15#include <linux/bitops.h>
15#include <linux/bug.h> 16#include <linux/bug.h>
16#include <linux/compiler.h> 17#include <linux/compiler.h>
17#include <linux/context_tracking.h> 18#include <linux/context_tracking.h>
@@ -699,36 +700,60 @@ asmlinkage void do_ov(struct pt_regs *regs)
699 exception_exit(prev_state); 700 exception_exit(prev_state);
700} 701}
701 702
702int process_fpemu_return(int sig, void __user *fault_addr) 703int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
703{ 704{
704 /* 705 struct siginfo si = { 0 };
705 * We can't allow the emulated instruction to leave any of the cause 706
706 * bits set in FCSR. If they were then the kernel would take an FP 707 switch (sig) {
707 * exception when restoring FP context. 708 case 0:
708 */ 709 return 0;
709 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
710 710
711 if (sig == SIGSEGV || sig == SIGBUS) { 711 case SIGFPE:
712 struct siginfo si = {0};
713 si.si_addr = fault_addr; 712 si.si_addr = fault_addr;
714 si.si_signo = sig; 713 si.si_signo = sig;
715 if (sig == SIGSEGV) { 714 /*
716 down_read(&current->mm->mmap_sem); 715 * Inexact can happen together with Overflow or Underflow.
717 if (find_vma(current->mm, (unsigned long)fault_addr)) 716 * Respect the mask to deliver the correct exception.
718 si.si_code = SEGV_ACCERR; 717 */
719 else 718 fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
720 si.si_code = SEGV_MAPERR; 719 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
721 up_read(&current->mm->mmap_sem); 720 if (fcr31 & FPU_CSR_INV_X)
722 } else { 721 si.si_code = FPE_FLTINV;
723 si.si_code = BUS_ADRERR; 722 else if (fcr31 & FPU_CSR_DIV_X)
724 } 723 si.si_code = FPE_FLTDIV;
724 else if (fcr31 & FPU_CSR_OVF_X)
725 si.si_code = FPE_FLTOVF;
726 else if (fcr31 & FPU_CSR_UDF_X)
727 si.si_code = FPE_FLTUND;
728 else if (fcr31 & FPU_CSR_INE_X)
729 si.si_code = FPE_FLTRES;
730 else
731 si.si_code = __SI_FAULT;
732 force_sig_info(sig, &si, current);
733 return 1;
734
735 case SIGBUS:
736 si.si_addr = fault_addr;
737 si.si_signo = sig;
738 si.si_code = BUS_ADRERR;
739 force_sig_info(sig, &si, current);
740 return 1;
741
742 case SIGSEGV:
743 si.si_addr = fault_addr;
744 si.si_signo = sig;
745 down_read(&current->mm->mmap_sem);
746 if (find_vma(current->mm, (unsigned long)fault_addr))
747 si.si_code = SEGV_ACCERR;
748 else
749 si.si_code = SEGV_MAPERR;
750 up_read(&current->mm->mmap_sem);
725 force_sig_info(sig, &si, current); 751 force_sig_info(sig, &si, current);
726 return 1; 752 return 1;
727 } else if (sig) { 753
754 default:
728 force_sig(sig, current); 755 force_sig(sig, current);
729 return 1; 756 return 1;
730 } else {
731 return 0;
732 } 757 }
733} 758}
734 759
@@ -736,7 +761,8 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
736 unsigned long old_epc, unsigned long old_ra) 761 unsigned long old_epc, unsigned long old_ra)
737{ 762{
738 union mips_instruction inst = { .word = opcode }; 763 union mips_instruction inst = { .word = opcode };
739 void __user *fault_addr = NULL; 764 void __user *fault_addr;
765 unsigned long fcr31;
740 int sig; 766 int sig;
741 767
742 /* If it's obviously not an FP instruction, skip it */ 768 /* If it's obviously not an FP instruction, skip it */
@@ -766,13 +792,20 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
766 /* Run the emulator */ 792 /* Run the emulator */
767 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 793 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
768 &fault_addr); 794 &fault_addr);
795 fcr31 = current->thread.fpu.fcr31;
769 796
770 /* If something went wrong, signal */ 797 /*
771 process_fpemu_return(sig, fault_addr); 798 * We can't allow the emulated instruction to leave any of
799 * the cause bits set in $fcr31.
800 */
801 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
772 802
773 /* Restore the hardware register state */ 803 /* Restore the hardware register state */
774 own_fpu(1); 804 own_fpu(1);
775 805
806 /* Send a signal if required. */
807 process_fpemu_return(sig, fault_addr, fcr31);
808
776 return 0; 809 return 0;
777} 810}
778 811
@@ -782,7 +815,8 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
782asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) 815asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
783{ 816{
784 enum ctx_state prev_state; 817 enum ctx_state prev_state;
785 siginfo_t info = {0}; 818 void __user *fault_addr;
819 int sig;
786 820
787 prev_state = exception_enter(); 821 prev_state = exception_enter();
788 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), 822 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
@@ -796,9 +830,6 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
796 die_if_kernel("FP exception in kernel code", regs); 830 die_if_kernel("FP exception in kernel code", regs);
797 831
798 if (fcr31 & FPU_CSR_UNI_X) { 832 if (fcr31 & FPU_CSR_UNI_X) {
799 int sig;
800 void __user *fault_addr = NULL;
801
802 /* 833 /*
803 * Unimplemented operation exception. If we've got the full 834 * Unimplemented operation exception. If we've got the full
804 * software emulator on-board, let's use it... 835 * software emulator on-board, let's use it...
@@ -815,30 +846,23 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
815 /* Run the emulator */ 846 /* Run the emulator */
816 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1, 847 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
817 &fault_addr); 848 &fault_addr);
849 fcr31 = current->thread.fpu.fcr31;
818 850
819 /* If something went wrong, signal */ 851 /*
820 process_fpemu_return(sig, fault_addr); 852 * We can't allow the emulated instruction to leave any of
853 * the cause bits set in $fcr31.
854 */
855 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
821 856
822 /* Restore the hardware register state */ 857 /* Restore the hardware register state */
823 own_fpu(1); /* Using the FPU again. */ 858 own_fpu(1); /* Using the FPU again. */
859 } else {
860 sig = SIGFPE;
861 fault_addr = (void __user *) regs->cp0_epc;
862 }
824 863
825 goto out; 864 /* Send a signal if required. */
826 } else if (fcr31 & FPU_CSR_INV_X) 865 process_fpemu_return(sig, fault_addr, fcr31);
827 info.si_code = FPE_FLTINV;
828 else if (fcr31 & FPU_CSR_DIV_X)
829 info.si_code = FPE_FLTDIV;
830 else if (fcr31 & FPU_CSR_OVF_X)
831 info.si_code = FPE_FLTOVF;
832 else if (fcr31 & FPU_CSR_UDF_X)
833 info.si_code = FPE_FLTUND;
834 else if (fcr31 & FPU_CSR_INE_X)
835 info.si_code = FPE_FLTRES;
836 else
837 info.si_code = __SI_FAULT;
838 info.si_signo = SIGFPE;
839 info.si_errno = 0;
840 info.si_addr = (void __user *) regs->cp0_epc;
841 force_sig_info(SIGFPE, &info, current);
842 866
843out: 867out:
844 exception_exit(prev_state); 868 exception_exit(prev_state);
@@ -885,9 +909,9 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
885 break; 909 break;
886 case BRK_MEMU: 910 case BRK_MEMU:
887 /* 911 /*
888 * Address errors may be deliberately induced by the FPU 912 * This breakpoint code is used by the FPU emulator to retake
889 * emulator to retake control of the CPU after executing the 913 * control of the CPU after executing the instruction from the
890 * instruction in the delay slot of an emulated branch. 914 * delay slot of an emulated branch.
891 * 915 *
892 * Terminate if exception was recognized as a delay slot return 916 * Terminate if exception was recognized as a delay slot return
893 * otherwise handle as normal. 917 * otherwise handle as normal.
@@ -907,10 +931,9 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
907 931
908asmlinkage void do_bp(struct pt_regs *regs) 932asmlinkage void do_bp(struct pt_regs *regs)
909{ 933{
934 unsigned long epc = msk_isa16_mode(exception_epc(regs));
910 unsigned int opcode, bcode; 935 unsigned int opcode, bcode;
911 enum ctx_state prev_state; 936 enum ctx_state prev_state;
912 unsigned long epc;
913 u16 instr[2];
914 mm_segment_t seg; 937 mm_segment_t seg;
915 938
916 seg = get_fs(); 939 seg = get_fs();
@@ -919,26 +942,28 @@ asmlinkage void do_bp(struct pt_regs *regs)
919 942
920 prev_state = exception_enter(); 943 prev_state = exception_enter();
921 if (get_isa16_mode(regs->cp0_epc)) { 944 if (get_isa16_mode(regs->cp0_epc)) {
922 /* Calculate EPC. */ 945 u16 instr[2];
923 epc = exception_epc(regs); 946
924 if (cpu_has_mmips) { 947 if (__get_user(instr[0], (u16 __user *)epc))
925 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || 948 goto out_sigsegv;
926 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) 949
927 goto out_sigsegv; 950 if (!cpu_has_mmips) {
928 opcode = (instr[0] << 16) | instr[1];
929 } else {
930 /* MIPS16e mode */ 951 /* MIPS16e mode */
931 if (__get_user(instr[0], 952 bcode = (instr[0] >> 5) & 0x3f;
932 (u16 __user *)msk_isa16_mode(epc))) 953 } else if (mm_insn_16bit(instr[0])) {
954 /* 16-bit microMIPS BREAK */
955 bcode = instr[0] & 0xf;
956 } else {
957 /* 32-bit microMIPS BREAK */
958 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
933 goto out_sigsegv; 959 goto out_sigsegv;
934 bcode = (instr[0] >> 6) & 0x3f; 960 opcode = (instr[0] << 16) | instr[1];
935 do_trap_or_bp(regs, bcode, "Break"); 961 bcode = (opcode >> 6) & ((1 << 20) - 1);
936 goto out;
937 } 962 }
938 } else { 963 } else {
939 if (__get_user(opcode, 964 if (__get_user(opcode, (unsigned int __user *)epc))
940 (unsigned int __user *) exception_epc(regs)))
941 goto out_sigsegv; 965 goto out_sigsegv;
966 bcode = (opcode >> 6) & ((1 << 20) - 1);
942 } 967 }
943 968
944 /* 969 /*
@@ -947,9 +972,8 @@ asmlinkage void do_bp(struct pt_regs *regs)
947 * Gas is bug-compatible, but not always, grrr... 972 * Gas is bug-compatible, but not always, grrr...
948 * We handle both cases with a simple heuristics. --macro 973 * We handle both cases with a simple heuristics. --macro
949 */ 974 */
950 bcode = ((opcode >> 6) & ((1 << 20) - 1));
951 if (bcode >= (1 << 10)) 975 if (bcode >= (1 << 10))
952 bcode >>= 10; 976 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
953 977
954 /* 978 /*
955 * notify the kprobe handlers, if instruction is likely to 979 * notify the kprobe handlers, if instruction is likely to
@@ -1039,22 +1063,24 @@ asmlinkage void do_ri(struct pt_regs *regs)
1039 * as quickly as possible. 1063 * as quickly as possible.
1040 */ 1064 */
1041 if (mipsr2_emulation && cpu_has_mips_r6 && 1065 if (mipsr2_emulation && cpu_has_mips_r6 &&
1042 likely(user_mode(regs))) { 1066 likely(user_mode(regs)) &&
1043 if (likely(get_user(opcode, epc) >= 0)) { 1067 likely(get_user(opcode, epc) >= 0)) {
1044 status = mipsr2_decoder(regs, opcode); 1068 unsigned long fcr31 = 0;
1045 switch (status) { 1069
1046 case 0: 1070 status = mipsr2_decoder(regs, opcode, &fcr31);
1047 case SIGEMT: 1071 switch (status) {
1048 task_thread_info(current)->r2_emul_return = 1; 1072 case 0:
1049 return; 1073 case SIGEMT:
1050 case SIGILL: 1074 task_thread_info(current)->r2_emul_return = 1;
1051 goto no_r2_instr; 1075 return;
1052 default: 1076 case SIGILL:
1053 process_fpemu_return(status, 1077 goto no_r2_instr;
1054 &current->thread.cp0_baduaddr); 1078 default:
1055 task_thread_info(current)->r2_emul_return = 1; 1079 process_fpemu_return(status,
1056 return; 1080 &current->thread.cp0_baduaddr,
1057 } 1081 fcr31);
1082 task_thread_info(current)->r2_emul_return = 1;
1083 return;
1058 } 1084 }
1059 } 1085 }
1060 1086
@@ -1299,10 +1325,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1299 enum ctx_state prev_state; 1325 enum ctx_state prev_state;
1300 unsigned int __user *epc; 1326 unsigned int __user *epc;
1301 unsigned long old_epc, old31; 1327 unsigned long old_epc, old31;
1328 void __user *fault_addr;
1302 unsigned int opcode; 1329 unsigned int opcode;
1330 unsigned long fcr31;
1303 unsigned int cpid; 1331 unsigned int cpid;
1304 int status, err; 1332 int status, err;
1305 unsigned long __maybe_unused flags; 1333 unsigned long __maybe_unused flags;
1334 int sig;
1306 1335
1307 prev_state = exception_enter(); 1336 prev_state = exception_enter();
1308 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; 1337 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
@@ -1319,7 +1348,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1319 status = -1; 1348 status = -1;
1320 1349
1321 if (unlikely(compute_return_epc(regs) < 0)) 1350 if (unlikely(compute_return_epc(regs) < 0))
1322 goto out; 1351 break;
1323 1352
1324 if (get_isa16_mode(regs->cp0_epc)) { 1353 if (get_isa16_mode(regs->cp0_epc)) {
1325 unsigned short mmop[2] = { 0 }; 1354 unsigned short mmop[2] = { 0 };
@@ -1352,49 +1381,54 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1352 force_sig(status, current); 1381 force_sig(status, current);
1353 } 1382 }
1354 1383
1355 goto out; 1384 break;
1356 1385
1357 case 3: 1386 case 3:
1358 /* 1387 /*
1359 * Old (MIPS I and MIPS II) processors will set this code 1388 * The COP3 opcode space and consequently the CP0.Status.CU3
1360 * for COP1X opcode instructions that replaced the original 1389 * bit and the CP0.Cause.CE=3 encoding have been removed as
1361 * COP3 space. We don't limit COP1 space instructions in 1390 * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
1362 * the emulator according to the CPU ISA, so we want to 1391 * up the space has been reused for COP1X instructions, that
1363 * treat COP1X instructions consistently regardless of which 1392 * are enabled by the CP0.Status.CU1 bit and consequently
1364 * code the CPU chose. Therefore we redirect this trap to 1393 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1365 * the FP emulator too. 1394 * exceptions. Some FPU-less processors that implement one
1366 * 1395 * of these ISAs however use this code erroneously for COP1X
1367 * Then some newer FPU-less processors use this code 1396 * instructions. Therefore we redirect this trap to the FP
1368 * erroneously too, so they are covered by this choice 1397 * emulator too.
1369 * as well.
1370 */ 1398 */
1371 if (raw_cpu_has_fpu) 1399 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1400 force_sig(SIGILL, current);
1372 break; 1401 break;
1402 }
1373 /* Fall through. */ 1403 /* Fall through. */
1374 1404
1375 case 1: 1405 case 1:
1376 err = enable_restore_fp_context(0); 1406 err = enable_restore_fp_context(0);
1377 1407
1378 if (!raw_cpu_has_fpu || err) { 1408 if (raw_cpu_has_fpu && !err)
1379 int sig; 1409 break;
1380 void __user *fault_addr = NULL;
1381 sig = fpu_emulator_cop1Handler(regs,
1382 &current->thread.fpu,
1383 0, &fault_addr);
1384 if (!process_fpemu_return(sig, fault_addr) && !err)
1385 mt_ase_fp_affinity();
1386 }
1387 1410
1388 goto out; 1411 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1412 &fault_addr);
1413 fcr31 = current->thread.fpu.fcr31;
1414
1415 /*
1416 * We can't allow the emulated instruction to leave
1417 * any of the cause bits set in $fcr31.
1418 */
1419 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1420
1421 /* Send a signal if required. */
1422 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1423 mt_ase_fp_affinity();
1424
1425 break;
1389 1426
1390 case 2: 1427 case 2:
1391 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); 1428 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1392 goto out; 1429 break;
1393 } 1430 }
1394 1431
1395 force_sig(SIGILL, current);
1396
1397out:
1398 exception_exit(prev_state); 1432 exception_exit(prev_state);
1399} 1433}
1400 1434
@@ -1984,6 +2018,12 @@ int cp0_compare_irq_shift;
1984int cp0_perfcount_irq; 2018int cp0_perfcount_irq;
1985EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 2019EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1986 2020
2021/*
2022 * Fast debug channel IRQ or -1 if not present
2023 */
2024int cp0_fdc_irq;
2025EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2026
1987static int noulri; 2027static int noulri;
1988 2028
1989static int __init ulri_disable(char *s) 2029static int __init ulri_disable(char *s)
@@ -2065,17 +2105,21 @@ void per_cpu_trap_init(bool is_boot_cpu)
2065 * 2105 *
2066 * o read IntCtl.IPTI to determine the timer interrupt 2106 * o read IntCtl.IPTI to determine the timer interrupt
2067 * o read IntCtl.IPPCI to determine the performance counter interrupt 2107 * o read IntCtl.IPPCI to determine the performance counter interrupt
2108 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
2068 */ 2109 */
2069 if (cpu_has_mips_r2_r6) { 2110 if (cpu_has_mips_r2_r6) {
2070 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2111 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2071 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2112 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2072 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; 2113 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2073 if (cp0_perfcount_irq == cp0_compare_irq) 2114 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2074 cp0_perfcount_irq = -1; 2115 if (!cp0_fdc_irq)
2116 cp0_fdc_irq = -1;
2117
2075 } else { 2118 } else {
2076 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; 2119 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2077 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; 2120 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2078 cp0_perfcount_irq = -1; 2121 cp0_perfcount_irq = -1;
2122 cp0_fdc_irq = -1;
2079 } 2123 }
2080 2124
2081 if (!cpu_data[cpu].asid_cache) 2125 if (!cpu_data[cpu].asid_cache)