aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c82
-rw-r--r--arch/mips/kernel/bmips_vec.S2
-rw-r--r--arch/mips/kernel/cps-vec.S191
-rw-r--r--arch/mips/kernel/cpu-probe.c74
-rw-r--r--arch/mips/kernel/ftrace.c4
-rw-r--r--arch/mips/kernel/genex.S8
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/idle.c9
-rw-r--r--arch/mips/kernel/irq-gic.c1
-rw-r--r--arch/mips/kernel/kgdb.c18
-rw-r--r--arch/mips/kernel/mips-cm.c121
-rw-r--r--arch/mips/kernel/mips-cpc.c52
-rw-r--r--arch/mips/kernel/mips_ksyms.c24
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c80
-rw-r--r--arch/mips/kernel/proc.c25
-rw-r--r--arch/mips/kernel/process.c23
-rw-r--r--arch/mips/kernel/ptrace.c161
-rw-r--r--arch/mips/kernel/ptrace32.c67
-rw-r--r--arch/mips/kernel/r4k_fpu.S215
-rw-r--r--arch/mips/kernel/r4k_switch.S60
-rw-r--r--arch/mips/kernel/scall32-o32.S24
-rw-r--r--arch/mips/kernel/scall64-64.S5
-rw-r--r--arch/mips/kernel/scall64-n32.S5
-rw-r--r--arch/mips/kernel/scall64-o32.S17
-rw-r--r--arch/mips/kernel/signal.c170
-rw-r--r--arch/mips/kernel/signal32.c137
-rw-r--r--arch/mips/kernel/smp-cmp.c55
-rw-r--r--arch/mips/kernel/smp-cps.c335
-rw-r--r--arch/mips/kernel/smp-gic.c53
-rw-r--r--arch/mips/kernel/smp-mt.c45
-rw-r--r--arch/mips/kernel/smtc-proc.c23
-rw-r--r--arch/mips/kernel/spram.c5
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/kernel/traps.c144
-rw-r--r--arch/mips/kernel/unaligned.c135
36 files changed, 2010 insertions, 371 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 26c6175e1379..277dab301cea 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -53,6 +53,8 @@ obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
53obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o 53obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
54obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 54obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
55obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 55obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
56obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
57obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
56obj-$(CONFIG_CPU_MIPSR2) += spram.o 58obj-$(CONFIG_CPU_MIPSR2) += spram.o
57 59
58obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 60obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
@@ -102,6 +104,9 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
102 104
103obj-$(CONFIG_JUMP_LABEL) += jump_label.o 105obj-$(CONFIG_JUMP_LABEL) += jump_label.o
104 106
107obj-$(CONFIG_MIPS_CM) += mips-cm.o
108obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
109
105# 110#
106# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not 111# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
107# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches 112# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0c2e853c3db4..0ea75c244b48 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/smp-cps.h>
19 20
20#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
21 22
@@ -168,6 +169,72 @@ void output_thread_fpu_defines(void)
168 OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); 169 OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
169 OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); 170 OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
170 171
172 /* the least significant 64 bits of each FP register */
173 OFFSET(THREAD_FPR0_LS64, task_struct,
174 thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
175 OFFSET(THREAD_FPR1_LS64, task_struct,
176 thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
177 OFFSET(THREAD_FPR2_LS64, task_struct,
178 thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
179 OFFSET(THREAD_FPR3_LS64, task_struct,
180 thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
181 OFFSET(THREAD_FPR4_LS64, task_struct,
182 thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
183 OFFSET(THREAD_FPR5_LS64, task_struct,
184 thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
185 OFFSET(THREAD_FPR6_LS64, task_struct,
186 thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
187 OFFSET(THREAD_FPR7_LS64, task_struct,
188 thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
189 OFFSET(THREAD_FPR8_LS64, task_struct,
190 thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
191 OFFSET(THREAD_FPR9_LS64, task_struct,
192 thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
193 OFFSET(THREAD_FPR10_LS64, task_struct,
194 thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
195 OFFSET(THREAD_FPR11_LS64, task_struct,
196 thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
197 OFFSET(THREAD_FPR12_LS64, task_struct,
198 thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
199 OFFSET(THREAD_FPR13_LS64, task_struct,
200 thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
201 OFFSET(THREAD_FPR14_LS64, task_struct,
202 thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
203 OFFSET(THREAD_FPR15_LS64, task_struct,
204 thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
205 OFFSET(THREAD_FPR16_LS64, task_struct,
206 thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
207 OFFSET(THREAD_FPR17_LS64, task_struct,
208 thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
209 OFFSET(THREAD_FPR18_LS64, task_struct,
210 thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
211 OFFSET(THREAD_FPR19_LS64, task_struct,
212 thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
213 OFFSET(THREAD_FPR20_LS64, task_struct,
214 thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
215 OFFSET(THREAD_FPR21_LS64, task_struct,
216 thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
217 OFFSET(THREAD_FPR22_LS64, task_struct,
218 thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
219 OFFSET(THREAD_FPR23_LS64, task_struct,
220 thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
221 OFFSET(THREAD_FPR24_LS64, task_struct,
222 thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
223 OFFSET(THREAD_FPR25_LS64, task_struct,
224 thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
225 OFFSET(THREAD_FPR26_LS64, task_struct,
226 thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
227 OFFSET(THREAD_FPR27_LS64, task_struct,
228 thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
229 OFFSET(THREAD_FPR28_LS64, task_struct,
230 thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
231 OFFSET(THREAD_FPR29_LS64, task_struct,
232 thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
233 OFFSET(THREAD_FPR30_LS64, task_struct,
234 thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
235 OFFSET(THREAD_FPR31_LS64, task_struct,
236 thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
237
171 OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); 238 OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
172 BLANK(); 239 BLANK();
173} 240}
@@ -228,6 +295,7 @@ void output_sc_defines(void)
228 OFFSET(SC_LO2, sigcontext, sc_lo2); 295 OFFSET(SC_LO2, sigcontext, sc_lo2);
229 OFFSET(SC_HI3, sigcontext, sc_hi3); 296 OFFSET(SC_HI3, sigcontext, sc_hi3);
230 OFFSET(SC_LO3, sigcontext, sc_lo3); 297 OFFSET(SC_LO3, sigcontext, sc_lo3);
298 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
231 BLANK(); 299 BLANK();
232} 300}
233#endif 301#endif
@@ -242,6 +310,7 @@ void output_sc_defines(void)
242 OFFSET(SC_MDLO, sigcontext, sc_mdlo); 310 OFFSET(SC_MDLO, sigcontext, sc_mdlo);
243 OFFSET(SC_PC, sigcontext, sc_pc); 311 OFFSET(SC_PC, sigcontext, sc_pc);
244 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); 312 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
313 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
245 BLANK(); 314 BLANK();
246} 315}
247#endif 316#endif
@@ -253,6 +322,7 @@ void output_sc32_defines(void)
253 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); 322 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
254 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); 323 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
255 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); 324 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
325 OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
256 BLANK(); 326 BLANK();
257} 327}
258#endif 328#endif
@@ -397,3 +467,15 @@ void output_kvm_defines(void)
397 OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]); 467 OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
398 BLANK(); 468 BLANK();
399} 469}
470
471#ifdef CONFIG_MIPS_CPS
472void output_cps_defines(void)
473{
474 COMMENT(" MIPS CPS offsets. ");
475 OFFSET(BOOTCFG_CORE, boot_config, core);
476 OFFSET(BOOTCFG_VPE, boot_config, vpe);
477 OFFSET(BOOTCFG_PC, boot_config, pc);
478 OFFSET(BOOTCFG_SP, boot_config, sp);
479 OFFSET(BOOTCFG_GP, boot_config, gp);
480}
481#endif
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index a5bf73d22fcc..290c23b51678 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -122,7 +122,7 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
122 jr k0 122 jr k0
123 123
124 RESTORE_ALL 124 RESTORE_ALL
125 .set mips3 125 .set arch=r4000
126 eret 126 eret
127 127
128/*********************************************************************** 128/***********************************************************************
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644
index 000000000000..f7a46db4b161
--- /dev/null
+++ b/arch/mips/kernel/cps-vec.S
@@ -0,0 +1,191 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <asm/addrspace.h>
12#include <asm/asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/mipsregs.h>
17
18#define GCR_CL_COHERENCE_OFS 0x2008
19
20.section .text.cps-vec
21.balign 0x1000
22.set noreorder
23
24LEAF(mips_cps_core_entry)
25 /*
26 * These first 8 bytes will be patched by cps_smp_setup to load the
27 * base address of the CM GCRs into register v1.
28 */
29 .quad 0
30
31 /* Check whether we're here due to an NMI */
32 mfc0 k0, CP0_STATUS
33 and k0, k0, ST0_NMI
34 beqz k0, not_nmi
35 nop
36
37 /* This is an NMI */
38 la k0, nmi_handler
39 jr k0
40 nop
41
42not_nmi:
43 /* Setup Cause */
44 li t0, CAUSEF_IV
45 mtc0 t0, CP0_CAUSE
46
47 /* Setup Status */
48 li t0, ST0_CU1 | ST0_CU0
49 mtc0 t0, CP0_STATUS
50
51 /*
52 * Clear the bits used to index the caches. Note that the architecture
53 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
54 * be valid for all MIPS32 CPUs, even those for which said writes are
55 * unnecessary.
56 */
57 mtc0 zero, CP0_TAGLO, 0
58 mtc0 zero, CP0_TAGHI, 0
59 mtc0 zero, CP0_TAGLO, 2
60 mtc0 zero, CP0_TAGHI, 2
61 ehb
62
63 /* Primary cache configuration is indicated by Config1 */
64 mfc0 v0, CP0_CONFIG, 1
65
66 /* Detect I-cache line size */
67 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
68 beqz t0, icache_done
69 li t1, 2
70 sllv t0, t1, t0
71
72 /* Detect I-cache size */
73 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
74 xori t2, t1, 0x7
75 beqz t2, 1f
76 li t3, 32
77 addi t1, t1, 1
78 sllv t1, t3, t1
791: /* At this point t1 == I-cache sets per way */
80 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
81 addi t2, t2, 1
82 mul t1, t1, t0
83 mul t1, t1, t2
84
85 li a0, KSEG0
86 add a1, a0, t1
871: cache Index_Store_Tag_I, 0(a0)
88 add a0, a0, t0
89 bne a0, a1, 1b
90 nop
91icache_done:
92
93 /* Detect D-cache line size */
94 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
95 beqz t0, dcache_done
96 li t1, 2
97 sllv t0, t1, t0
98
99 /* Detect D-cache size */
100 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
101 xori t2, t1, 0x7
102 beqz t2, 1f
103 li t3, 32
104 addi t1, t1, 1
105 sllv t1, t3, t1
1061: /* At this point t1 == D-cache sets per way */
107 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
108 addi t2, t2, 1
109 mul t1, t1, t0
110 mul t1, t1, t2
111
112 li a0, KSEG0
113 addu a1, a0, t1
114 subu a1, a1, t0
1151: cache Index_Store_Tag_D, 0(a0)
116 bne a0, a1, 1b
117 add a0, a0, t0
118dcache_done:
119
120 /* Set Kseg0 cacheable, coherent, write-back, write-allocate */
121 mfc0 t0, CP0_CONFIG
122 ori t0, 0x7
123 xori t0, 0x2
124 mtc0 t0, CP0_CONFIG
125 ehb
126
127 /* Enter the coherent domain */
128 li t0, 0xff
129 sw t0, GCR_CL_COHERENCE_OFS(v1)
130 ehb
131
132 /* Jump to kseg0 */
133 la t0, 1f
134 jr t0
135 nop
136
1371: /* We're up, cached & coherent */
138
139 /*
140 * TODO: We should check the VPE number we intended to boot here, and
141 * if non-zero we should start that VPE and stop this one. For
142 * the moment this doesn't matter since CPUs are brought up
143 * sequentially and in order, but once hotplug is implemented
144 * this will need revisiting.
145 */
146
147 /* Off we go! */
148 la t0, mips_cps_bootcfg
149 lw t1, BOOTCFG_PC(t0)
150 lw gp, BOOTCFG_GP(t0)
151 lw sp, BOOTCFG_SP(t0)
152 jr t1
153 nop
154 END(mips_cps_core_entry)
155
156.org 0x200
157LEAF(excep_tlbfill)
158 b .
159 nop
160 END(excep_tlbfill)
161
162.org 0x280
163LEAF(excep_xtlbfill)
164 b .
165 nop
166 END(excep_xtlbfill)
167
168.org 0x300
169LEAF(excep_cache)
170 b .
171 nop
172 END(excep_cache)
173
174.org 0x380
175LEAF(excep_genex)
176 b .
177 nop
178 END(excep_genex)
179
180.org 0x400
181LEAF(excep_intex)
182 b .
183 nop
184 END(excep_intex)
185
186.org 0x480
187LEAF(excep_ejtag)
188 la k0, ejtag_debug_handler
189 jr k0
190 nop
191 END(excep_ejtag)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 530f832de02c..6e8fb85ce7c3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -23,6 +23,8 @@
23#include <asm/cpu-type.h> 23#include <asm/cpu-type.h>
24#include <asm/fpu.h> 24#include <asm/fpu.h>
25#include <asm/mipsregs.h> 25#include <asm/mipsregs.h>
26#include <asm/mipsmtregs.h>
27#include <asm/msa.h>
26#include <asm/watch.h> 28#include <asm/watch.h>
27#include <asm/elf.h> 29#include <asm/elf.h>
28#include <asm/spram.h> 30#include <asm/spram.h>
@@ -126,6 +128,20 @@ static inline int __cpu_has_fpu(void)
126 return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE); 128 return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
127} 129}
128 130
131static inline unsigned long cpu_get_msa_id(void)
132{
133 unsigned long status, conf5, msa_id;
134
135 status = read_c0_status();
136 __enable_fpu(FPU_64BIT);
137 conf5 = read_c0_config5();
138 enable_msa();
139 msa_id = read_msa_ir();
140 write_c0_config5(conf5);
141 write_c0_status(status);
142 return msa_id;
143}
144
129static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) 145static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
130{ 146{
131#ifdef __NEED_VMBITS_PROBE 147#ifdef __NEED_VMBITS_PROBE
@@ -166,11 +182,12 @@ static char unknown_isa[] = KERN_ERR \
166static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) 182static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
167{ 183{
168 unsigned int config6; 184 unsigned int config6;
169 /* 185
170 * Config6 is implementation dependent and it's currently only 186 /* It's implementation dependent how the FTLB can be enabled */
171 * used by proAptiv 187 switch (c->cputype) {
172 */ 188 case CPU_PROAPTIV:
173 if (c->cputype == CPU_PROAPTIV) { 189 case CPU_P5600:
190 /* proAptiv & related cores use Config6 to enable the FTLB */
174 config6 = read_c0_config6(); 191 config6 = read_c0_config6();
175 if (enable) 192 if (enable)
176 /* Enable FTLB */ 193 /* Enable FTLB */
@@ -179,6 +196,7 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
179 /* Disable FTLB */ 196 /* Disable FTLB */
180 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); 197 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
181 back_to_back_c0_hazard(); 198 back_to_back_c0_hazard();
199 break;
182 } 200 }
183} 201}
184 202
@@ -301,6 +319,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
301 c->ases |= MIPS_ASE_VZ; 319 c->ases |= MIPS_ASE_VZ;
302 if (config3 & MIPS_CONF3_SC) 320 if (config3 & MIPS_CONF3_SC)
303 c->options |= MIPS_CPU_SEGMENTS; 321 c->options |= MIPS_CPU_SEGMENTS;
322 if (config3 & MIPS_CONF3_MSA)
323 c->ases |= MIPS_ASE_MSA;
304 324
305 return config3 & MIPS_CONF_M; 325 return config3 & MIPS_CONF_M;
306} 326}
@@ -367,6 +387,9 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
367 config5 &= ~MIPS_CONF5_UFR; 387 config5 &= ~MIPS_CONF5_UFR;
368 write_c0_config5(config5); 388 write_c0_config5(config5);
369 389
390 if (config5 & MIPS_CONF5_EVA)
391 c->options |= MIPS_CPU_EVA;
392
370 return config5 & MIPS_CONF_M; 393 return config5 & MIPS_CONF_M;
371} 394}
372 395
@@ -398,8 +421,13 @@ static void decode_configs(struct cpuinfo_mips *c)
398 421
399 mips_probe_watch_registers(c); 422 mips_probe_watch_registers(c);
400 423
401 if (cpu_has_mips_r2) 424#ifndef CONFIG_MIPS_CPS
425 if (cpu_has_mips_r2) {
402 c->core = read_c0_ebase() & 0x3ff; 426 c->core = read_c0_ebase() & 0x3ff;
427 if (cpu_has_mipsmt)
428 c->core >>= fls(core_nvpes()) - 1;
429 }
430#endif
403} 431}
404 432
405#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ 433#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
@@ -710,17 +738,23 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
710 MIPS_CPU_LLSC; 738 MIPS_CPU_LLSC;
711 c->tlbsize = 64; 739 c->tlbsize = 64;
712 break; 740 break;
713 case PRID_IMP_LOONGSON2: 741 case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
714 c->cputype = CPU_LOONGSON2;
715 __cpu_name[cpu] = "ICT Loongson-2";
716
717 switch (c->processor_id & PRID_REV_MASK) { 742 switch (c->processor_id & PRID_REV_MASK) {
718 case PRID_REV_LOONGSON2E: 743 case PRID_REV_LOONGSON2E:
744 c->cputype = CPU_LOONGSON2;
745 __cpu_name[cpu] = "ICT Loongson-2";
719 set_elf_platform(cpu, "loongson2e"); 746 set_elf_platform(cpu, "loongson2e");
720 break; 747 break;
721 case PRID_REV_LOONGSON2F: 748 case PRID_REV_LOONGSON2F:
749 c->cputype = CPU_LOONGSON2;
750 __cpu_name[cpu] = "ICT Loongson-2";
722 set_elf_platform(cpu, "loongson2f"); 751 set_elf_platform(cpu, "loongson2f");
723 break; 752 break;
753 case PRID_REV_LOONGSON3A:
754 c->cputype = CPU_LOONGSON3;
755 __cpu_name[cpu] = "ICT Loongson-3";
756 set_elf_platform(cpu, "loongson3a");
757 break;
724 } 758 }
725 759
726 set_isa(c, MIPS_CPU_ISA_III); 760 set_isa(c, MIPS_CPU_ISA_III);
@@ -729,7 +763,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
729 MIPS_CPU_32FPR; 763 MIPS_CPU_32FPR;
730 c->tlbsize = 64; 764 c->tlbsize = 64;
731 break; 765 break;
732 case PRID_IMP_LOONGSON1: 766 case PRID_IMP_LOONGSON_32: /* Loongson-1 */
733 decode_configs(c); 767 decode_configs(c);
734 768
735 c->cputype = CPU_LOONGSON1; 769 c->cputype = CPU_LOONGSON1;
@@ -806,7 +840,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
806 __cpu_name[cpu] = "MIPS 1004Kc"; 840 __cpu_name[cpu] = "MIPS 1004Kc";
807 break; 841 break;
808 case PRID_IMP_1074K: 842 case PRID_IMP_1074K:
809 c->cputype = CPU_74K; 843 c->cputype = CPU_1074K;
810 __cpu_name[cpu] = "MIPS 1074Kc"; 844 __cpu_name[cpu] = "MIPS 1074Kc";
811 break; 845 break;
812 case PRID_IMP_INTERAPTIV_UP: 846 case PRID_IMP_INTERAPTIV_UP:
@@ -825,6 +859,14 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
825 c->cputype = CPU_PROAPTIV; 859 c->cputype = CPU_PROAPTIV;
826 __cpu_name[cpu] = "MIPS proAptiv (multi)"; 860 __cpu_name[cpu] = "MIPS proAptiv (multi)";
827 break; 861 break;
862 case PRID_IMP_P5600:
863 c->cputype = CPU_P5600;
864 __cpu_name[cpu] = "MIPS P5600";
865 break;
866 case PRID_IMP_M5150:
867 c->cputype = CPU_M5150;
868 __cpu_name[cpu] = "MIPS M5150";
869 break;
828 } 870 }
829 871
830 decode_configs(c); 872 decode_configs(c);
@@ -1176,6 +1218,12 @@ void cpu_probe(void)
1176 else 1218 else
1177 c->srsets = 1; 1219 c->srsets = 1;
1178 1220
1221 if (cpu_has_msa) {
1222 c->msa_id = cpu_get_msa_id();
1223 WARN(c->msa_id & MSA_IR_WRPF,
1224 "Vector register partitioning unimplemented!");
1225 }
1226
1179 cpu_probe_vmbits(c); 1227 cpu_probe_vmbits(c);
1180 1228
1181#ifdef CONFIG_64BIT 1229#ifdef CONFIG_64BIT
@@ -1192,4 +1240,6 @@ void cpu_report(void)
1192 smp_processor_id(), c->processor_id, cpu_name_string()); 1240 smp_processor_id(), c->processor_id, cpu_name_string());
1193 if (c->options & MIPS_CPU_FPU) 1241 if (c->options & MIPS_CPU_FPU)
1194 printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); 1242 printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
1243 if (cpu_has_msa)
1244 pr_info("MSA revision is: %08x\n", c->msa_id);
1195} 1245}
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 374ed74cd516..74fe73506d8f 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -90,6 +90,7 @@ static inline void ftrace_dyn_arch_init_insns(void)
90static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 90static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
91{ 91{
92 int faulted; 92 int faulted;
93 mm_segment_t old_fs;
93 94
94 /* *(unsigned int *)ip = new_code; */ 95 /* *(unsigned int *)ip = new_code; */
95 safe_store_code(new_code, ip, faulted); 96 safe_store_code(new_code, ip, faulted);
@@ -97,7 +98,10 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
97 if (unlikely(faulted)) 98 if (unlikely(faulted))
98 return -EFAULT; 99 return -EFAULT;
99 100
101 old_fs = get_fs();
102 set_fs(get_ds());
100 flush_icache_range(ip, ip + 8); 103 flush_icache_range(ip, ip + 8);
104 set_fs(old_fs);
101 105
102 return 0; 106 return 0;
103} 107}
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index d84f6a509502..a9ce3408be25 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -67,7 +67,7 @@ NESTED(except_vec3_generic, 0, sp)
67 */ 67 */
68NESTED(except_vec3_r4000, 0, sp) 68NESTED(except_vec3_r4000, 0, sp)
69 .set push 69 .set push
70 .set mips3 70 .set arch=r4000
71 .set noat 71 .set noat
72 mfc0 k1, CP0_CAUSE 72 mfc0 k1, CP0_CAUSE
73 li k0, 31<<2 73 li k0, 31<<2
@@ -139,7 +139,7 @@ LEAF(__r4k_wait)
139 nop 139 nop
140 nop 140 nop
141#endif 141#endif
142 .set mips3 142 .set arch=r4000
143 wait 143 wait
144 /* end of rollback region (the region size must be power of two) */ 144 /* end of rollback region (the region size must be power of two) */
1451: 1451:
@@ -475,8 +475,10 @@ NESTED(nmi_handler, PT_SIZE, sp)
475 BUILD_HANDLER cpu cpu sti silent /* #11 */ 475 BUILD_HANDLER cpu cpu sti silent /* #11 */
476 BUILD_HANDLER ov ov sti silent /* #12 */ 476 BUILD_HANDLER ov ov sti silent /* #12 */
477 BUILD_HANDLER tr tr sti silent /* #13 */ 477 BUILD_HANDLER tr tr sti silent /* #13 */
478 BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
478 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 479 BUILD_HANDLER fpe fpe fpe silent /* #15 */
479 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 480 BUILD_HANDLER ftlb ftlb none silent /* #16 */
481 BUILD_HANDLER msa msa sti silent /* #21 */
480 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 482 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
481#ifdef CONFIG_HARDWARE_WATCHPOINTS 483#ifdef CONFIG_HARDWARE_WATCHPOINTS
482 /* 484 /*
@@ -575,7 +577,7 @@ isrdhwr:
575 ori k1, _THREAD_MASK 577 ori k1, _THREAD_MASK
576 xori k1, _THREAD_MASK 578 xori k1, _THREAD_MASK
577 LONG_L v1, TI_TP_VALUE(k1) 579 LONG_L v1, TI_TP_VALUE(k1)
578 .set mips3 580 .set arch=r4000
579 eret 581 eret
580 .set mips0 582 .set mips0
581#endif 583#endif
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 7b6a5b3e3acf..e712dcf18b2d 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -175,8 +175,8 @@ NESTED(smp_bootstrap, 16, sp)
175 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ 175 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
176 jal mips_ihb 176 jal mips_ihb
177#endif /* CONFIG_MIPS_MT_SMTC */ 177#endif /* CONFIG_MIPS_MT_SMTC */
178 setup_c0_status_sec
179 smp_slave_setup 178 smp_slave_setup
179 setup_c0_status_sec
180#ifdef CONFIG_MIPS_MT_SMTC 180#ifdef CONFIG_MIPS_MT_SMTC
181 andi t2, t2, VPECONTROL_TE 181 andi t2, t2, VPECONTROL_TE
182 beqz t2, 2f 182 beqz t2, 2f
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 3553243bf9d6..837ff27950bc 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -64,7 +64,7 @@ void r4k_wait_irqoff(void)
64 if (!need_resched()) 64 if (!need_resched())
65 __asm__( 65 __asm__(
66 " .set push \n" 66 " .set push \n"
67 " .set mips3 \n" 67 " .set arch=r4000 \n"
68 " wait \n" 68 " wait \n"
69 " .set pop \n"); 69 " .set pop \n");
70 local_irq_enable(); 70 local_irq_enable();
@@ -82,7 +82,7 @@ static void rm7k_wait_irqoff(void)
82 if (!need_resched()) 82 if (!need_resched())
83 __asm__( 83 __asm__(
84 " .set push \n" 84 " .set push \n"
85 " .set mips3 \n" 85 " .set arch=r4000 \n"
86 " .set noat \n" 86 " .set noat \n"
87 " mfc0 $1, $12 \n" 87 " mfc0 $1, $12 \n"
88 " sync \n" 88 " sync \n"
@@ -103,7 +103,7 @@ static void au1k_wait(void)
103 unsigned long c0status = read_c0_status() | 1; /* irqs on */ 103 unsigned long c0status = read_c0_status() | 1; /* irqs on */
104 104
105 __asm__( 105 __asm__(
106 " .set mips3 \n" 106 " .set arch=r4000 \n"
107 " cache 0x14, 0(%0) \n" 107 " cache 0x14, 0(%0) \n"
108 " cache 0x14, 32(%0) \n" 108 " cache 0x14, 32(%0) \n"
109 " sync \n" 109 " sync \n"
@@ -184,8 +184,11 @@ void __init check_wait(void)
184 case CPU_24K: 184 case CPU_24K:
185 case CPU_34K: 185 case CPU_34K:
186 case CPU_1004K: 186 case CPU_1004K:
187 case CPU_1074K:
187 case CPU_INTERAPTIV: 188 case CPU_INTERAPTIV:
188 case CPU_PROAPTIV: 189 case CPU_PROAPTIV:
190 case CPU_P5600:
191 case CPU_M5150:
189 cpu_wait = r4k_wait; 192 cpu_wait = r4k_wait;
190 if (read_c0_config7() & MIPS_CONF7_WII) 193 if (read_c0_config7() & MIPS_CONF7_WII)
191 cpu_wait = r4k_wait_irqoff; 194 cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 5b5ddb231f26..8520dad6d4e3 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -16,7 +16,6 @@
16#include <asm/gic.h> 16#include <asm/gic.h>
17#include <asm/setup.h> 17#include <asm/setup.h>
18#include <asm/traps.h> 18#include <asm/traps.h>
19#include <asm/gcmpregs.h>
20#include <linux/hardirq.h> 19#include <linux/hardirq.h>
21#include <asm-generic/bitops/find.h> 20#include <asm-generic/bitops/find.h>
22 21
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index fcaac2f132f0..7afcc2f22c0d 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -32,6 +32,7 @@
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/processor.h> 33#include <asm/processor.h>
34#include <asm/sigcontext.h> 34#include <asm/sigcontext.h>
35#include <asm/uaccess.h>
35 36
36static struct hard_trap_info { 37static struct hard_trap_info {
37 unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ 38 unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@@ -208,7 +209,14 @@ void arch_kgdb_breakpoint(void)
208 209
209static void kgdb_call_nmi_hook(void *ignored) 210static void kgdb_call_nmi_hook(void *ignored)
210{ 211{
212 mm_segment_t old_fs;
213
214 old_fs = get_fs();
215 set_fs(get_ds());
216
211 kgdb_nmicallback(raw_smp_processor_id(), NULL); 217 kgdb_nmicallback(raw_smp_processor_id(), NULL);
218
219 set_fs(old_fs);
212} 220}
213 221
214void kgdb_roundup_cpus(unsigned long flags) 222void kgdb_roundup_cpus(unsigned long flags)
@@ -282,6 +290,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
282 struct die_args *args = (struct die_args *)ptr; 290 struct die_args *args = (struct die_args *)ptr;
283 struct pt_regs *regs = args->regs; 291 struct pt_regs *regs = args->regs;
284 int trap = (regs->cp0_cause & 0x7c) >> 2; 292 int trap = (regs->cp0_cause & 0x7c) >> 2;
293 mm_segment_t old_fs;
285 294
286#ifdef CONFIG_KPROBES 295#ifdef CONFIG_KPROBES
287 /* 296 /*
@@ -296,11 +305,17 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
296 if (user_mode(regs)) 305 if (user_mode(regs))
297 return NOTIFY_DONE; 306 return NOTIFY_DONE;
298 307
308 /* Kernel mode. Set correct address limit */
309 old_fs = get_fs();
310 set_fs(get_ds());
311
299 if (atomic_read(&kgdb_active) != -1) 312 if (atomic_read(&kgdb_active) != -1)
300 kgdb_nmicallback(smp_processor_id(), regs); 313 kgdb_nmicallback(smp_processor_id(), regs);
301 314
302 if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) 315 if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
316 set_fs(old_fs);
303 return NOTIFY_DONE; 317 return NOTIFY_DONE;
318 }
304 319
305 if (atomic_read(&kgdb_setting_breakpoint)) 320 if (atomic_read(&kgdb_setting_breakpoint))
306 if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) 321 if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
@@ -310,6 +325,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
310 local_irq_enable(); 325 local_irq_enable();
311 __flush_cache_all(); 326 __flush_cache_all();
312 327
328 set_fs(old_fs);
313 return NOTIFY_STOP; 329 return NOTIFY_STOP;
314} 330}
315 331
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644
index 000000000000..f76f7a08412d
--- /dev/null
+++ b/arch/mips/kernel/mips-cm.c
@@ -0,0 +1,121 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/errno.h>
12
13#include <asm/mips-cm.h>
14#include <asm/mipsregs.h>
15
16void __iomem *mips_cm_base;
17void __iomem *mips_cm_l2sync_base;
18
19phys_t __mips_cm_phys_base(void)
20{
21 u32 config3 = read_c0_config3();
22 u32 cmgcr;
23
24 /* Check the CMGCRBase register is implemented */
25 if (!(config3 & MIPS_CONF3_CMGCR))
26 return 0;
27
28 /* Read the address from CMGCRBase */
29 cmgcr = read_c0_cmgcrbase();
30 return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
31}
32
33phys_t mips_cm_phys_base(void)
34 __attribute__((weak, alias("__mips_cm_phys_base")));
35
36phys_t __mips_cm_l2sync_phys_base(void)
37{
38 u32 base_reg;
39
40 /*
41 * If the L2-only sync region is already enabled then leave it at it's
42 * current location.
43 */
44 base_reg = read_gcr_l2_only_sync_base();
45 if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
46 return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
47
48 /* Default to following the CM */
49 return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
50}
51
52phys_t mips_cm_l2sync_phys_base(void)
53 __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
54
55static void mips_cm_probe_l2sync(void)
56{
57 unsigned major_rev;
58 phys_t addr;
59
60 /* L2-only sync was introduced with CM major revision 6 */
61 major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
62 CM_GCR_REV_MAJOR_SHF;
63 if (major_rev < 6)
64 return;
65
66 /* Find a location for the L2 sync region */
67 addr = mips_cm_l2sync_phys_base();
68 BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
69 if (!addr)
70 return;
71
72 /* Set the region base address & enable it */
73 write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
74
75 /* Map the region */
76 mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
77}
78
79int mips_cm_probe(void)
80{
81 phys_t addr;
82 u32 base_reg;
83
84 addr = mips_cm_phys_base();
85 BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
86 if (!addr)
87 return -ENODEV;
88
89 mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
90 if (!mips_cm_base)
91 return -ENXIO;
92
93 /* sanity check that we're looking at a CM */
94 base_reg = read_gcr_base();
95 if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
96 pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
97 (unsigned long)addr);
98 mips_cm_base = NULL;
99 return -ENODEV;
100 }
101
102 /* set default target to memory */
103 base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
104 base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
105 write_gcr_base(base_reg);
106
107 /* disable CM regions */
108 write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
109 write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
110 write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
111 write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
112 write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
113 write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
114 write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
115 write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
116
117 /* probe for an L2-only sync region */
118 mips_cm_probe_l2sync();
119
120 return 0;
121}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644
index 000000000000..c9dc67402969
--- /dev/null
+++ b/arch/mips/kernel/mips-cpc.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/errno.h>
12
13#include <asm/mips-cm.h>
14#include <asm/mips-cpc.h>
15
16void __iomem *mips_cpc_base;
17
18phys_t __weak mips_cpc_phys_base(void)
19{
20 u32 cpc_base;
21
22 if (!mips_cm_present())
23 return 0;
24
25 if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
26 return 0;
27
28 /* If the CPC is already enabled, leave it so */
29 cpc_base = read_gcr_cpc_base();
30 if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
31 return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
32
33 /* Otherwise, give it the default address & enable it */
34 cpc_base = mips_cpc_default_phys_base();
35 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
36 return cpc_base;
37}
38
39int mips_cpc_probe(void)
40{
41 phys_t addr;
42
43 addr = mips_cpc_phys_base();
44 if (!addr)
45 return -ENODEV;
46
47 mips_cpc_base = ioremap_nocache(addr, 0x8000);
48 if (!mips_cpc_base)
49 return -ENXIO;
50
51 return 0;
52}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 6e58e97fcd39..2607c3a4ff7e 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -16,12 +16,20 @@
16#include <asm/ftrace.h> 16#include <asm/ftrace.h>
17 17
18extern void *__bzero(void *__s, size_t __count); 18extern void *__bzero(void *__s, size_t __count);
19extern long __strncpy_from_kernel_nocheck_asm(char *__to,
20 const char *__from, long __len);
21extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
22 long __len);
19extern long __strncpy_from_user_nocheck_asm(char *__to, 23extern long __strncpy_from_user_nocheck_asm(char *__to,
20 const char *__from, long __len); 24 const char *__from, long __len);
21extern long __strncpy_from_user_asm(char *__to, const char *__from, 25extern long __strncpy_from_user_asm(char *__to, const char *__from,
22 long __len); 26 long __len);
27extern long __strlen_kernel_nocheck_asm(const char *s);
28extern long __strlen_kernel_asm(const char *s);
23extern long __strlen_user_nocheck_asm(const char *s); 29extern long __strlen_user_nocheck_asm(const char *s);
24extern long __strlen_user_asm(const char *s); 30extern long __strlen_user_asm(const char *s);
31extern long __strnlen_kernel_nocheck_asm(const char *s);
32extern long __strnlen_kernel_asm(const char *s);
25extern long __strnlen_user_nocheck_asm(const char *s); 33extern long __strnlen_user_nocheck_asm(const char *s);
26extern long __strnlen_user_asm(const char *s); 34extern long __strnlen_user_asm(const char *s);
27 35
@@ -43,17 +51,31 @@ EXPORT_SYMBOL(copy_page);
43 */ 51 */
44EXPORT_SYMBOL(__copy_user); 52EXPORT_SYMBOL(__copy_user);
45EXPORT_SYMBOL(__copy_user_inatomic); 53EXPORT_SYMBOL(__copy_user_inatomic);
54#ifdef CONFIG_EVA
55EXPORT_SYMBOL(__copy_from_user_eva);
56EXPORT_SYMBOL(__copy_in_user_eva);
57EXPORT_SYMBOL(__copy_to_user_eva);
58EXPORT_SYMBOL(__copy_user_inatomic_eva);
59#endif
46EXPORT_SYMBOL(__bzero); 60EXPORT_SYMBOL(__bzero);
61EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
62EXPORT_SYMBOL(__strncpy_from_kernel_asm);
47EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); 63EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
48EXPORT_SYMBOL(__strncpy_from_user_asm); 64EXPORT_SYMBOL(__strncpy_from_user_asm);
65EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
66EXPORT_SYMBOL(__strlen_kernel_asm);
49EXPORT_SYMBOL(__strlen_user_nocheck_asm); 67EXPORT_SYMBOL(__strlen_user_nocheck_asm);
50EXPORT_SYMBOL(__strlen_user_asm); 68EXPORT_SYMBOL(__strlen_user_asm);
69EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
70EXPORT_SYMBOL(__strnlen_kernel_asm);
51EXPORT_SYMBOL(__strnlen_user_nocheck_asm); 71EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
52EXPORT_SYMBOL(__strnlen_user_asm); 72EXPORT_SYMBOL(__strnlen_user_asm);
53 73
54EXPORT_SYMBOL(csum_partial); 74EXPORT_SYMBOL(csum_partial);
55EXPORT_SYMBOL(csum_partial_copy_nocheck); 75EXPORT_SYMBOL(csum_partial_copy_nocheck);
56EXPORT_SYMBOL(__csum_partial_copy_user); 76EXPORT_SYMBOL(__csum_partial_copy_kernel);
77EXPORT_SYMBOL(__csum_partial_copy_to_user);
78EXPORT_SYMBOL(__csum_partial_copy_from_user);
57 79
58EXPORT_SYMBOL(invalid_pte_table); 80EXPORT_SYMBOL(invalid_pte_table);
59#ifdef CONFIG_FUNCTION_TRACER 81#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 24cdf64789c3..4f2d9dece7ab 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -805,7 +805,7 @@ static void reset_counters(void *arg)
805 } 805 }
806} 806}
807 807
808/* 24K/34K/1004K cores can share the same event map. */ 808/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
809static const struct mips_perf_event mipsxxcore_event_map 809static const struct mips_perf_event mipsxxcore_event_map
810 [PERF_COUNT_HW_MAX] = { 810 [PERF_COUNT_HW_MAX] = {
811 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 811 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
@@ -814,8 +814,8 @@ static const struct mips_perf_event mipsxxcore_event_map
814 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, 814 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
815}; 815};
816 816
817/* 74K core has different branch event code. */ 817/* 74K/proAptiv core has different branch event code. */
818static const struct mips_perf_event mipsxx74Kcore_event_map 818static const struct mips_perf_event mipsxxcore_event_map2
819 [PERF_COUNT_HW_MAX] = { 819 [PERF_COUNT_HW_MAX] = {
820 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 820 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
821 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, 821 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
@@ -849,7 +849,7 @@ static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
849 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ 849 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
850}; 850};
851 851
852/* 24K/34K/1004K cores can share the same cache event map. */ 852/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
853static const struct mips_perf_event mipsxxcore_cache_map 853static const struct mips_perf_event mipsxxcore_cache_map
854 [PERF_COUNT_HW_CACHE_MAX] 854 [PERF_COUNT_HW_CACHE_MAX]
855 [PERF_COUNT_HW_CACHE_OP_MAX] 855 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -930,8 +930,8 @@ static const struct mips_perf_event mipsxxcore_cache_map
930}, 930},
931}; 931};
932 932
933/* 74K core has completely different cache event map. */ 933/* 74K/proAptiv core has completely different cache event map. */
934static const struct mips_perf_event mipsxx74Kcore_cache_map 934static const struct mips_perf_event mipsxxcore_cache_map2
935 [PERF_COUNT_HW_CACHE_MAX] 935 [PERF_COUNT_HW_CACHE_MAX]
936 [PERF_COUNT_HW_CACHE_OP_MAX] 936 [PERF_COUNT_HW_CACHE_OP_MAX]
937 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 937 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -978,6 +978,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
978 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, 978 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
979 }, 979 },
980}, 980},
981/*
982 * 74K core does not have specific DTLB events. proAptiv core has
983 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
984 * not included here. One can use raw events if really needed.
985 */
981[C(ITLB)] = { 986[C(ITLB)] = {
982 [C(OP_READ)] = { 987 [C(OP_READ)] = {
983 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, 988 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
@@ -1378,6 +1383,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1378#define IS_BOTH_COUNTERS_74K_EVENT(b) \ 1383#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1379 ((b) == 0 || (b) == 1) 1384 ((b) == 0 || (b) == 1)
1380 1385
1386/* proAptiv */
1387#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
1388 ((b) == 0 || (b) == 1)
1389
1381/* 1004K */ 1390/* 1004K */
1382#define IS_BOTH_COUNTERS_1004K_EVENT(b) \ 1391#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1383 ((b) == 0 || (b) == 1 || (b) == 11) 1392 ((b) == 0 || (b) == 1 || (b) == 11)
@@ -1391,6 +1400,20 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1391#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) 1400#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1392#endif 1401#endif
1393 1402
1403/* interAptiv */
1404#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
1405 ((b) == 0 || (b) == 1 || (b) == 11)
1406#ifdef CONFIG_MIPS_MT_SMP
1407/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1408#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
1409 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1410 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
1411 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
1412 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
1413 ((b) >= 64 && (b) <= 67))
1414#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
1415#endif
1416
1394/* BMIPS5000 */ 1417/* BMIPS5000 */
1395#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \ 1418#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1396 ((b) == 0 || (b) == 1) 1419 ((b) == 0 || (b) == 1)
@@ -1442,6 +1465,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1442#endif 1465#endif
1443 break; 1466 break;
1444 case CPU_74K: 1467 case CPU_74K:
1468 case CPU_1074K:
1445 if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) 1469 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1446 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1470 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1447 else 1471 else
@@ -1451,6 +1475,16 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1451 raw_event.range = P; 1475 raw_event.range = P;
1452#endif 1476#endif
1453 break; 1477 break;
1478 case CPU_PROAPTIV:
1479 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1480 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1481 else
1482 raw_event.cntr_mask =
1483 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1484#ifdef CONFIG_MIPS_MT_SMP
1485 raw_event.range = P;
1486#endif
1487 break;
1454 case CPU_1004K: 1488 case CPU_1004K:
1455 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) 1489 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1456 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1490 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1466,6 +1500,21 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1466 raw_event.range = T; 1500 raw_event.range = T;
1467#endif 1501#endif
1468 break; 1502 break;
1503 case CPU_INTERAPTIV:
1504 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1505 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1506 else
1507 raw_event.cntr_mask =
1508 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1509#ifdef CONFIG_MIPS_MT_SMP
1510 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1511 raw_event.range = P;
1512 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1513 raw_event.range = V;
1514 else
1515 raw_event.range = T;
1516#endif
1517 break;
1469 case CPU_BMIPS5000: 1518 case CPU_BMIPS5000:
1470 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id)) 1519 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1471 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1520 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1576,14 +1625,29 @@ init_hw_perf_events(void)
1576 break; 1625 break;
1577 case CPU_74K: 1626 case CPU_74K:
1578 mipspmu.name = "mips/74K"; 1627 mipspmu.name = "mips/74K";
1579 mipspmu.general_event_map = &mipsxx74Kcore_event_map; 1628 mipspmu.general_event_map = &mipsxxcore_event_map2;
1580 mipspmu.cache_event_map = &mipsxx74Kcore_cache_map; 1629 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1630 break;
1631 case CPU_PROAPTIV:
1632 mipspmu.name = "mips/proAptiv";
1633 mipspmu.general_event_map = &mipsxxcore_event_map2;
1634 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1581 break; 1635 break;
1582 case CPU_1004K: 1636 case CPU_1004K:
1583 mipspmu.name = "mips/1004K"; 1637 mipspmu.name = "mips/1004K";
1584 mipspmu.general_event_map = &mipsxxcore_event_map; 1638 mipspmu.general_event_map = &mipsxxcore_event_map;
1585 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1639 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1586 break; 1640 break;
1641 case CPU_1074K:
1642 mipspmu.name = "mips/1074K";
1643 mipspmu.general_event_map = &mipsxxcore_event_map;
1644 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1645 break;
1646 case CPU_INTERAPTIV:
1647 mipspmu.name = "mips/interAptiv";
1648 mipspmu.general_event_map = &mipsxxcore_event_map;
1649 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1650 break;
1587 case CPU_LOONGSON1: 1651 case CPU_LOONGSON1:
1588 mipspmu.name = "mips/loongson1"; 1652 mipspmu.name = "mips/loongson1";
1589 mipspmu.general_event_map = &mipsxxcore_event_map; 1653 mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 00d20974b3e7..e40971b51d2f 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -17,8 +17,24 @@
17 17
18unsigned int vced_count, vcei_count; 18unsigned int vced_count, vcei_count;
19 19
20/*
21 * * No lock; only written during early bootup by CPU 0.
22 * */
23static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
24
25int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
26{
27 return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
28}
29
30int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
31{
32 return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
33}
34
20static int show_cpuinfo(struct seq_file *m, void *v) 35static int show_cpuinfo(struct seq_file *m, void *v)
21{ 36{
37 struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
22 unsigned long n = (unsigned long) v - 1; 38 unsigned long n = (unsigned long) v - 1;
23 unsigned int version = cpu_data[n].processor_id; 39 unsigned int version = cpu_data[n].processor_id;
24 unsigned int fp_vers = cpu_data[n].fpu_id; 40 unsigned int fp_vers = cpu_data[n].fpu_id;
@@ -95,6 +111,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
95 if (cpu_has_mipsmt) seq_printf(m, "%s", " mt"); 111 if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
96 if (cpu_has_mmips) seq_printf(m, "%s", " micromips"); 112 if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
97 if (cpu_has_vz) seq_printf(m, "%s", " vz"); 113 if (cpu_has_vz) seq_printf(m, "%s", " vz");
114 if (cpu_has_msa) seq_printf(m, "%s", " msa");
115 if (cpu_has_eva) seq_printf(m, "%s", " eva");
98 seq_printf(m, "\n"); 116 seq_printf(m, "\n");
99 117
100 if (cpu_has_mmips) { 118 if (cpu_has_mmips) {
@@ -118,6 +136,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
118 cpu_has_vce ? "%u" : "not available"); 136 cpu_has_vce ? "%u" : "not available");
119 seq_printf(m, fmt, 'D', vced_count); 137 seq_printf(m, fmt, 'D', vced_count);
120 seq_printf(m, fmt, 'I', vcei_count); 138 seq_printf(m, fmt, 'I', vcei_count);
139
140 proc_cpuinfo_notifier_args.m = m;
141 proc_cpuinfo_notifier_args.n = n;
142
143 raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
144 &proc_cpuinfo_notifier_args);
145
121 seq_printf(m, "\n"); 146 seq_printf(m, "\n");
122 147
123 return 0; 148 return 0;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 6ae540e133b2..60e39dc7f1eb 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
32#include <asm/cpu.h> 32#include <asm/cpu.h>
33#include <asm/dsp.h> 33#include <asm/dsp.h>
34#include <asm/fpu.h> 34#include <asm/fpu.h>
35#include <asm/msa.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/mipsregs.h> 37#include <asm/mipsregs.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
@@ -65,6 +66,8 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
65 clear_used_math(); 66 clear_used_math();
66 clear_fpu_owner(); 67 clear_fpu_owner();
67 init_dsp(); 68 init_dsp();
69 clear_thread_flag(TIF_MSA_CTX_LIVE);
70 disable_msa();
68 regs->cp0_epc = pc; 71 regs->cp0_epc = pc;
69 regs->regs[29] = sp; 72 regs->regs[29] = sp;
70} 73}
@@ -89,7 +92,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
89 92
90 preempt_disable(); 93 preempt_disable();
91 94
92 if (is_fpu_owner()) 95 if (is_msa_enabled())
96 save_msa(p);
97 else if (is_fpu_owner())
93 save_fp(p); 98 save_fp(p);
94 99
95 if (cpu_has_dsp) 100 if (cpu_has_dsp)
@@ -157,7 +162,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
157/* Fill in the fpu structure for a core dump.. */ 162/* Fill in the fpu structure for a core dump.. */
158int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) 163int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
159{ 164{
160 memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu)); 165 int i;
166
167 for (i = 0; i < NUM_FPU_REGS; i++)
168 memcpy(&r[i], &current->thread.fpu.fpr[i], sizeof(*r));
169
170 memcpy(&r[NUM_FPU_REGS], &current->thread.fpu.fcr31,
171 sizeof(current->thread.fpu.fcr31));
161 172
162 return 1; 173 return 1;
163} 174}
@@ -192,7 +203,13 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
192 203
193int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) 204int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
194{ 205{
195 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); 206 int i;
207
208 for (i = 0; i < NUM_FPU_REGS; i++)
209 memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr));
210
211 memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31,
212 sizeof(t->thread.fpu.fcr31));
196 213
197 return 1; 214 return 1;
198} 215}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 7da9b76db4d9..7271e5a83081 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -114,51 +114,30 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
114int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 114int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
115{ 115{
116 int i; 116 int i;
117 unsigned int tmp;
118 117
119 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 118 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
120 return -EIO; 119 return -EIO;
121 120
122 if (tsk_used_math(child)) { 121 if (tsk_used_math(child)) {
123 fpureg_t *fregs = get_fpu_regs(child); 122 union fpureg *fregs = get_fpu_regs(child);
124 for (i = 0; i < 32; i++) 123 for (i = 0; i < 32; i++)
125 __put_user(fregs[i], i + (__u64 __user *) data); 124 __put_user(get_fpr64(&fregs[i], 0),
125 i + (__u64 __user *)data);
126 } else { 126 } else {
127 for (i = 0; i < 32; i++) 127 for (i = 0; i < 32; i++)
128 __put_user((__u64) -1, i + (__u64 __user *) data); 128 __put_user((__u64) -1, i + (__u64 __user *) data);
129 } 129 }
130 130
131 __put_user(child->thread.fpu.fcr31, data + 64); 131 __put_user(child->thread.fpu.fcr31, data + 64);
132 132 __put_user(current_cpu_data.fpu_id, data + 65);
133 preempt_disable();
134 if (cpu_has_fpu) {
135 unsigned int flags;
136
137 if (cpu_has_mipsmt) {
138 unsigned int vpflags = dvpe();
139 flags = read_c0_status();
140 __enable_fpu(FPU_AS_IS);
141 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
142 write_c0_status(flags);
143 evpe(vpflags);
144 } else {
145 flags = read_c0_status();
146 __enable_fpu(FPU_AS_IS);
147 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
148 write_c0_status(flags);
149 }
150 } else {
151 tmp = 0;
152 }
153 preempt_enable();
154 __put_user(tmp, data + 65);
155 133
156 return 0; 134 return 0;
157} 135}
158 136
159int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 137int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
160{ 138{
161 fpureg_t *fregs; 139 union fpureg *fregs;
140 u64 fpr_val;
162 int i; 141 int i;
163 142
164 if (!access_ok(VERIFY_READ, data, 33 * 8)) 143 if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -166,8 +145,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
166 145
167 fregs = get_fpu_regs(child); 146 fregs = get_fpu_regs(child);
168 147
169 for (i = 0; i < 32; i++) 148 for (i = 0; i < 32; i++) {
170 __get_user(fregs[i], i + (__u64 __user *) data); 149 __get_user(fpr_val, i + (__u64 __user *)data);
150 set_fpr64(&fregs[i], 0, fpr_val);
151 }
171 152
172 __get_user(child->thread.fpu.fcr31, data + 64); 153 __get_user(child->thread.fpu.fcr31, data + 64);
173 154
@@ -300,10 +281,27 @@ static int fpr_get(struct task_struct *target,
300 unsigned int pos, unsigned int count, 281 unsigned int pos, unsigned int count,
301 void *kbuf, void __user *ubuf) 282 void *kbuf, void __user *ubuf)
302{ 283{
303 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 284 unsigned i;
304 &target->thread.fpu, 285 int err;
305 0, sizeof(elf_fpregset_t)); 286 u64 fpr_val;
287
306 /* XXX fcr31 */ 288 /* XXX fcr31 */
289
290 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
291 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
292 &target->thread.fpu,
293 0, sizeof(elf_fpregset_t));
294
295 for (i = 0; i < NUM_FPU_REGS; i++) {
296 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
297 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
298 &fpr_val, i * sizeof(elf_fpreg_t),
299 (i + 1) * sizeof(elf_fpreg_t));
300 if (err)
301 return err;
302 }
303
304 return 0;
307} 305}
308 306
309static int fpr_set(struct task_struct *target, 307static int fpr_set(struct task_struct *target,
@@ -311,10 +309,27 @@ static int fpr_set(struct task_struct *target,
311 unsigned int pos, unsigned int count, 309 unsigned int pos, unsigned int count,
312 const void *kbuf, const void __user *ubuf) 310 const void *kbuf, const void __user *ubuf)
313{ 311{
314 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 312 unsigned i;
315 &target->thread.fpu, 313 int err;
316 0, sizeof(elf_fpregset_t)); 314 u64 fpr_val;
315
317 /* XXX fcr31 */ 316 /* XXX fcr31 */
317
318 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
319 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
320 &target->thread.fpu,
321 0, sizeof(elf_fpregset_t));
322
323 for (i = 0; i < NUM_FPU_REGS; i++) {
324 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
325 &fpr_val, i * sizeof(elf_fpreg_t),
326 (i + 1) * sizeof(elf_fpreg_t));
327 if (err)
328 return err;
329 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
330 }
331
332 return 0;
318} 333}
319 334
320enum mips_regset { 335enum mips_regset {
@@ -408,7 +423,7 @@ long arch_ptrace(struct task_struct *child, long request,
408 /* Read the word at location addr in the USER area. */ 423 /* Read the word at location addr in the USER area. */
409 case PTRACE_PEEKUSR: { 424 case PTRACE_PEEKUSR: {
410 struct pt_regs *regs; 425 struct pt_regs *regs;
411 fpureg_t *fregs; 426 union fpureg *fregs;
412 unsigned long tmp = 0; 427 unsigned long tmp = 0;
413 428
414 regs = task_pt_regs(child); 429 regs = task_pt_regs(child);
@@ -433,14 +448,12 @@ long arch_ptrace(struct task_struct *child, long request,
433 * order bits of the values stored in the even 448 * order bits of the values stored in the even
434 * registers - unless we're using r2k_switch.S. 449 * registers - unless we're using r2k_switch.S.
435 */ 450 */
436 if (addr & 1) 451 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
437 tmp = fregs[(addr & ~1) - 32] >> 32; 452 addr & 1);
438 else
439 tmp = fregs[addr - 32];
440 break; 453 break;
441 } 454 }
442#endif 455#endif
443 tmp = fregs[addr - FPR_BASE]; 456 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
444 break; 457 break;
445 case PC: 458 case PC:
446 tmp = regs->cp0_epc; 459 tmp = regs->cp0_epc;
@@ -465,44 +478,10 @@ long arch_ptrace(struct task_struct *child, long request,
465 case FPC_CSR: 478 case FPC_CSR:
466 tmp = child->thread.fpu.fcr31; 479 tmp = child->thread.fpu.fcr31;
467 break; 480 break;
468 case FPC_EIR: { /* implementation / version register */ 481 case FPC_EIR:
469 unsigned int flags; 482 /* implementation / version register */
470#ifdef CONFIG_MIPS_MT_SMTC 483 tmp = current_cpu_data.fpu_id;
471 unsigned long irqflags;
472 unsigned int mtflags;
473#endif /* CONFIG_MIPS_MT_SMTC */
474
475 preempt_disable();
476 if (!cpu_has_fpu) {
477 preempt_enable();
478 break;
479 }
480
481#ifdef CONFIG_MIPS_MT_SMTC
482 /* Read-modify-write of Status must be atomic */
483 local_irq_save(irqflags);
484 mtflags = dmt();
485#endif /* CONFIG_MIPS_MT_SMTC */
486 if (cpu_has_mipsmt) {
487 unsigned int vpflags = dvpe();
488 flags = read_c0_status();
489 __enable_fpu(FPU_AS_IS);
490 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
491 write_c0_status(flags);
492 evpe(vpflags);
493 } else {
494 flags = read_c0_status();
495 __enable_fpu(FPU_AS_IS);
496 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
497 write_c0_status(flags);
498 }
499#ifdef CONFIG_MIPS_MT_SMTC
500 emt(mtflags);
501 local_irq_restore(irqflags);
502#endif /* CONFIG_MIPS_MT_SMTC */
503 preempt_enable();
504 break; 484 break;
505 }
506 case DSP_BASE ... DSP_BASE + 5: { 485 case DSP_BASE ... DSP_BASE + 5: {
507 dspreg_t *dregs; 486 dspreg_t *dregs;
508 487
@@ -548,7 +527,7 @@ long arch_ptrace(struct task_struct *child, long request,
548 regs->regs[addr] = data; 527 regs->regs[addr] = data;
549 break; 528 break;
550 case FPR_BASE ... FPR_BASE + 31: { 529 case FPR_BASE ... FPR_BASE + 31: {
551 fpureg_t *fregs = get_fpu_regs(child); 530 union fpureg *fregs = get_fpu_regs(child);
552 531
553 if (!tsk_used_math(child)) { 532 if (!tsk_used_math(child)) {
554 /* FP not yet used */ 533 /* FP not yet used */
@@ -563,19 +542,12 @@ long arch_ptrace(struct task_struct *child, long request,
563 * order bits of the values stored in the even 542 * order bits of the values stored in the even
564 * registers - unless we're using r2k_switch.S. 543 * registers - unless we're using r2k_switch.S.
565 */ 544 */
566 if (addr & 1) { 545 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
567 fregs[(addr & ~1) - FPR_BASE] &= 546 addr & 1, data);
568 0xffffffff;
569 fregs[(addr & ~1) - FPR_BASE] |=
570 ((u64)data) << 32;
571 } else {
572 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
573 fregs[addr - FPR_BASE] |= data;
574 }
575 break; 547 break;
576 } 548 }
577#endif 549#endif
578 fregs[addr - FPR_BASE] = data; 550 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
579 break; 551 break;
580 } 552 }
581 case PC: 553 case PC:
@@ -662,13 +634,13 @@ long arch_ptrace(struct task_struct *child, long request,
662 * Notification of system call entry/exit 634 * Notification of system call entry/exit
663 * - triggered by current->work.syscall_trace 635 * - triggered by current->work.syscall_trace
664 */ 636 */
665asmlinkage void syscall_trace_enter(struct pt_regs *regs) 637asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
666{ 638{
667 long ret = 0; 639 long ret = 0;
668 user_exit(); 640 user_exit();
669 641
670 /* do the secure computing check first */ 642 if (secure_computing(syscall) == -1)
671 secure_computing_strict(regs->regs[2]); 643 return -1;
672 644
673 if (test_thread_flag(TIF_SYSCALL_TRACE) && 645 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
674 tracehook_report_syscall_entry(regs)) 646 tracehook_report_syscall_entry(regs))
@@ -677,10 +649,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
677 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 649 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
678 trace_sys_enter(regs, regs->regs[2]); 650 trace_sys_enter(regs, regs->regs[2]);
679 651
680 audit_syscall_entry(__syscall_get_arch(), 652 audit_syscall_entry(syscall_get_arch(current, regs),
681 regs->regs[2], 653 syscall,
682 regs->regs[4], regs->regs[5], 654 regs->regs[4], regs->regs[5],
683 regs->regs[6], regs->regs[7]); 655 regs->regs[6], regs->regs[7]);
656 return syscall;
684} 657}
685 658
686/* 659/*
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index b8aa2dd5b00b..b40c3ca60ee5 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -80,7 +80,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80 /* Read the word at location addr in the USER area. */ 80 /* Read the word at location addr in the USER area. */
81 case PTRACE_PEEKUSR: { 81 case PTRACE_PEEKUSR: {
82 struct pt_regs *regs; 82 struct pt_regs *regs;
83 fpureg_t *fregs; 83 union fpureg *fregs;
84 unsigned int tmp; 84 unsigned int tmp;
85 85
86 regs = task_pt_regs(child); 86 regs = task_pt_regs(child);
@@ -103,13 +103,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
103 * order bits of the values stored in the even 103 * order bits of the values stored in the even
104 * registers - unless we're using r2k_switch.S. 104 * registers - unless we're using r2k_switch.S.
105 */ 105 */
106 if (addr & 1) 106 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
107 tmp = fregs[(addr & ~1) - 32] >> 32; 107 addr & 1);
108 else
109 tmp = fregs[addr - 32];
110 break; 108 break;
111 } 109 }
112 tmp = fregs[addr - FPR_BASE]; 110 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
113 break; 111 break;
114 case PC: 112 case PC:
115 tmp = regs->cp0_epc; 113 tmp = regs->cp0_epc;
@@ -129,46 +127,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
129 case FPC_CSR: 127 case FPC_CSR:
130 tmp = child->thread.fpu.fcr31; 128 tmp = child->thread.fpu.fcr31;
131 break; 129 break;
132 case FPC_EIR: { /* implementation / version register */ 130 case FPC_EIR:
133 unsigned int flags; 131 /* implementation / version register */
134#ifdef CONFIG_MIPS_MT_SMTC 132 tmp = current_cpu_data.fpu_id;
135 unsigned int irqflags;
136 unsigned int mtflags;
137#endif /* CONFIG_MIPS_MT_SMTC */
138
139 preempt_disable();
140 if (!cpu_has_fpu) {
141 preempt_enable();
142 tmp = 0;
143 break;
144 }
145
146#ifdef CONFIG_MIPS_MT_SMTC
147 /* Read-modify-write of Status must be atomic */
148 local_irq_save(irqflags);
149 mtflags = dmt();
150#endif /* CONFIG_MIPS_MT_SMTC */
151
152 if (cpu_has_mipsmt) {
153 unsigned int vpflags = dvpe();
154 flags = read_c0_status();
155 __enable_fpu(FPU_AS_IS);
156 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
157 write_c0_status(flags);
158 evpe(vpflags);
159 } else {
160 flags = read_c0_status();
161 __enable_fpu(FPU_AS_IS);
162 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
163 write_c0_status(flags);
164 }
165#ifdef CONFIG_MIPS_MT_SMTC
166 emt(mtflags);
167 local_irq_restore(irqflags);
168#endif /* CONFIG_MIPS_MT_SMTC */
169 preempt_enable();
170 break; 133 break;
171 }
172 case DSP_BASE ... DSP_BASE + 5: { 134 case DSP_BASE ... DSP_BASE + 5: {
173 dspreg_t *dregs; 135 dspreg_t *dregs;
174 136
@@ -233,7 +195,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
233 regs->regs[addr] = data; 195 regs->regs[addr] = data;
234 break; 196 break;
235 case FPR_BASE ... FPR_BASE + 31: { 197 case FPR_BASE ... FPR_BASE + 31: {
236 fpureg_t *fregs = get_fpu_regs(child); 198 union fpureg *fregs = get_fpu_regs(child);
237 199
238 if (!tsk_used_math(child)) { 200 if (!tsk_used_math(child)) {
239 /* FP not yet used */ 201 /* FP not yet used */
@@ -247,18 +209,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
247 * order bits of the values stored in the even 209 * order bits of the values stored in the even
248 * registers - unless we're using r2k_switch.S. 210 * registers - unless we're using r2k_switch.S.
249 */ 211 */
250 if (addr & 1) { 212 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
251 fregs[(addr & ~1) - FPR_BASE] &= 213 addr & 1, data);
252 0xffffffff;
253 fregs[(addr & ~1) - FPR_BASE] |=
254 ((u64)data) << 32;
255 } else {
256 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
257 fregs[addr - FPR_BASE] |= data;
258 }
259 break; 214 break;
260 } 215 }
261 fregs[addr - FPR_BASE] = data; 216 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
262 break; 217 break;
263 } 218 }
264 case PC: 219 case PC:
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 73b0ddf910d4..71814272d148 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc. 13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
14 */ 14 */
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/asmmacro.h>
16#include <asm/errno.h> 17#include <asm/errno.h>
17#include <asm/fpregdef.h> 18#include <asm/fpregdef.h>
18#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
@@ -30,7 +31,7 @@
30 .endm 31 .endm
31 32
32 .set noreorder 33 .set noreorder
33 .set mips3 34 .set arch=r4000
34 35
35LEAF(_save_fp_context) 36LEAF(_save_fp_context)
36 cfc1 t1, fcr31 37 cfc1 t1, fcr31
@@ -245,6 +246,218 @@ LEAF(_restore_fp_context32)
245 END(_restore_fp_context32) 246 END(_restore_fp_context32)
246#endif 247#endif
247 248
249#ifdef CONFIG_CPU_HAS_MSA
250
251 .macro save_sc_msareg wr, off, sc, tmp
252#ifdef CONFIG_64BIT
253 copy_u_d \tmp, \wr, 1
254 EX sd \tmp, (\off+(\wr*8))(\sc)
255#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
256 copy_u_w \tmp, \wr, 2
257 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
258 copy_u_w \tmp, \wr, 3
259 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
260#else /* CONFIG_CPU_BIG_ENDIAN */
261 copy_u_w \tmp, \wr, 2
262 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
263 copy_u_w \tmp, \wr, 3
264 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
265#endif
266 .endm
267
268/*
269 * int _save_msa_context(struct sigcontext *sc)
270 *
271 * Save the upper 64 bits of each vector register along with the MSA_CSR
272 * register into sc. Returns zero on success, else non-zero.
273 */
274LEAF(_save_msa_context)
275 save_sc_msareg 0, SC_MSAREGS, a0, t0
276 save_sc_msareg 1, SC_MSAREGS, a0, t0
277 save_sc_msareg 2, SC_MSAREGS, a0, t0
278 save_sc_msareg 3, SC_MSAREGS, a0, t0
279 save_sc_msareg 4, SC_MSAREGS, a0, t0
280 save_sc_msareg 5, SC_MSAREGS, a0, t0
281 save_sc_msareg 6, SC_MSAREGS, a0, t0
282 save_sc_msareg 7, SC_MSAREGS, a0, t0
283 save_sc_msareg 8, SC_MSAREGS, a0, t0
284 save_sc_msareg 9, SC_MSAREGS, a0, t0
285 save_sc_msareg 10, SC_MSAREGS, a0, t0
286 save_sc_msareg 11, SC_MSAREGS, a0, t0
287 save_sc_msareg 12, SC_MSAREGS, a0, t0
288 save_sc_msareg 13, SC_MSAREGS, a0, t0
289 save_sc_msareg 14, SC_MSAREGS, a0, t0
290 save_sc_msareg 15, SC_MSAREGS, a0, t0
291 save_sc_msareg 16, SC_MSAREGS, a0, t0
292 save_sc_msareg 17, SC_MSAREGS, a0, t0
293 save_sc_msareg 18, SC_MSAREGS, a0, t0
294 save_sc_msareg 19, SC_MSAREGS, a0, t0
295 save_sc_msareg 20, SC_MSAREGS, a0, t0
296 save_sc_msareg 21, SC_MSAREGS, a0, t0
297 save_sc_msareg 22, SC_MSAREGS, a0, t0
298 save_sc_msareg 23, SC_MSAREGS, a0, t0
299 save_sc_msareg 24, SC_MSAREGS, a0, t0
300 save_sc_msareg 25, SC_MSAREGS, a0, t0
301 save_sc_msareg 26, SC_MSAREGS, a0, t0
302 save_sc_msareg 27, SC_MSAREGS, a0, t0
303 save_sc_msareg 28, SC_MSAREGS, a0, t0
304 save_sc_msareg 29, SC_MSAREGS, a0, t0
305 save_sc_msareg 30, SC_MSAREGS, a0, t0
306 save_sc_msareg 31, SC_MSAREGS, a0, t0
307 jr ra
308 li v0, 0
309 END(_save_msa_context)
310
311#ifdef CONFIG_MIPS32_COMPAT
312
313/*
314 * int _save_msa_context32(struct sigcontext32 *sc)
315 *
316 * Save the upper 64 bits of each vector register along with the MSA_CSR
317 * register into sc. Returns zero on success, else non-zero.
318 */
319LEAF(_save_msa_context32)
320 save_sc_msareg 0, SC32_MSAREGS, a0, t0
321 save_sc_msareg 1, SC32_MSAREGS, a0, t0
322 save_sc_msareg 2, SC32_MSAREGS, a0, t0
323 save_sc_msareg 3, SC32_MSAREGS, a0, t0
324 save_sc_msareg 4, SC32_MSAREGS, a0, t0
325 save_sc_msareg 5, SC32_MSAREGS, a0, t0
326 save_sc_msareg 6, SC32_MSAREGS, a0, t0
327 save_sc_msareg 7, SC32_MSAREGS, a0, t0
328 save_sc_msareg 8, SC32_MSAREGS, a0, t0
329 save_sc_msareg 9, SC32_MSAREGS, a0, t0
330 save_sc_msareg 10, SC32_MSAREGS, a0, t0
331 save_sc_msareg 11, SC32_MSAREGS, a0, t0
332 save_sc_msareg 12, SC32_MSAREGS, a0, t0
333 save_sc_msareg 13, SC32_MSAREGS, a0, t0
334 save_sc_msareg 14, SC32_MSAREGS, a0, t0
335 save_sc_msareg 15, SC32_MSAREGS, a0, t0
336 save_sc_msareg 16, SC32_MSAREGS, a0, t0
337 save_sc_msareg 17, SC32_MSAREGS, a0, t0
338 save_sc_msareg 18, SC32_MSAREGS, a0, t0
339 save_sc_msareg 19, SC32_MSAREGS, a0, t0
340 save_sc_msareg 20, SC32_MSAREGS, a0, t0
341 save_sc_msareg 21, SC32_MSAREGS, a0, t0
342 save_sc_msareg 22, SC32_MSAREGS, a0, t0
343 save_sc_msareg 23, SC32_MSAREGS, a0, t0
344 save_sc_msareg 24, SC32_MSAREGS, a0, t0
345 save_sc_msareg 25, SC32_MSAREGS, a0, t0
346 save_sc_msareg 26, SC32_MSAREGS, a0, t0
347 save_sc_msareg 27, SC32_MSAREGS, a0, t0
348 save_sc_msareg 28, SC32_MSAREGS, a0, t0
349 save_sc_msareg 29, SC32_MSAREGS, a0, t0
350 save_sc_msareg 30, SC32_MSAREGS, a0, t0
351 save_sc_msareg 31, SC32_MSAREGS, a0, t0
352 jr ra
353 li v0, 0
354 END(_save_msa_context32)
355
356#endif /* CONFIG_MIPS32_COMPAT */
357
358 .macro restore_sc_msareg wr, off, sc, tmp
359#ifdef CONFIG_64BIT
360 EX ld \tmp, (\off+(\wr*8))(\sc)
361 insert_d \wr, 1, \tmp
362#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
363 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
364 insert_w \wr, 2, \tmp
365 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
366 insert_w \wr, 3, \tmp
367#else /* CONFIG_CPU_BIG_ENDIAN */
368 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
369 insert_w \wr, 2, \tmp
370 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
371 insert_w \wr, 3, \tmp
372#endif
373 .endm
374
375/*
376 * int _restore_msa_context(struct sigcontext *sc)
377 */
378LEAF(_restore_msa_context)
379 restore_sc_msareg 0, SC_MSAREGS, a0, t0
380 restore_sc_msareg 1, SC_MSAREGS, a0, t0
381 restore_sc_msareg 2, SC_MSAREGS, a0, t0
382 restore_sc_msareg 3, SC_MSAREGS, a0, t0
383 restore_sc_msareg 4, SC_MSAREGS, a0, t0
384 restore_sc_msareg 5, SC_MSAREGS, a0, t0
385 restore_sc_msareg 6, SC_MSAREGS, a0, t0
386 restore_sc_msareg 7, SC_MSAREGS, a0, t0
387 restore_sc_msareg 8, SC_MSAREGS, a0, t0
388 restore_sc_msareg 9, SC_MSAREGS, a0, t0
389 restore_sc_msareg 10, SC_MSAREGS, a0, t0
390 restore_sc_msareg 11, SC_MSAREGS, a0, t0
391 restore_sc_msareg 12, SC_MSAREGS, a0, t0
392 restore_sc_msareg 13, SC_MSAREGS, a0, t0
393 restore_sc_msareg 14, SC_MSAREGS, a0, t0
394 restore_sc_msareg 15, SC_MSAREGS, a0, t0
395 restore_sc_msareg 16, SC_MSAREGS, a0, t0
396 restore_sc_msareg 17, SC_MSAREGS, a0, t0
397 restore_sc_msareg 18, SC_MSAREGS, a0, t0
398 restore_sc_msareg 19, SC_MSAREGS, a0, t0
399 restore_sc_msareg 20, SC_MSAREGS, a0, t0
400 restore_sc_msareg 21, SC_MSAREGS, a0, t0
401 restore_sc_msareg 22, SC_MSAREGS, a0, t0
402 restore_sc_msareg 23, SC_MSAREGS, a0, t0
403 restore_sc_msareg 24, SC_MSAREGS, a0, t0
404 restore_sc_msareg 25, SC_MSAREGS, a0, t0
405 restore_sc_msareg 26, SC_MSAREGS, a0, t0
406 restore_sc_msareg 27, SC_MSAREGS, a0, t0
407 restore_sc_msareg 28, SC_MSAREGS, a0, t0
408 restore_sc_msareg 29, SC_MSAREGS, a0, t0
409 restore_sc_msareg 30, SC_MSAREGS, a0, t0
410 restore_sc_msareg 31, SC_MSAREGS, a0, t0
411 jr ra
412 li v0, 0
413 END(_restore_msa_context)
414
415#ifdef CONFIG_MIPS32_COMPAT
416
417/*
418 * int _restore_msa_context32(struct sigcontext32 *sc)
419 */
420LEAF(_restore_msa_context32)
421 restore_sc_msareg 0, SC32_MSAREGS, a0, t0
422 restore_sc_msareg 1, SC32_MSAREGS, a0, t0
423 restore_sc_msareg 2, SC32_MSAREGS, a0, t0
424 restore_sc_msareg 3, SC32_MSAREGS, a0, t0
425 restore_sc_msareg 4, SC32_MSAREGS, a0, t0
426 restore_sc_msareg 5, SC32_MSAREGS, a0, t0
427 restore_sc_msareg 6, SC32_MSAREGS, a0, t0
428 restore_sc_msareg 7, SC32_MSAREGS, a0, t0
429 restore_sc_msareg 8, SC32_MSAREGS, a0, t0
430 restore_sc_msareg 9, SC32_MSAREGS, a0, t0
431 restore_sc_msareg 10, SC32_MSAREGS, a0, t0
432 restore_sc_msareg 11, SC32_MSAREGS, a0, t0
433 restore_sc_msareg 12, SC32_MSAREGS, a0, t0
434 restore_sc_msareg 13, SC32_MSAREGS, a0, t0
435 restore_sc_msareg 14, SC32_MSAREGS, a0, t0
436 restore_sc_msareg 15, SC32_MSAREGS, a0, t0
437 restore_sc_msareg 16, SC32_MSAREGS, a0, t0
438 restore_sc_msareg 17, SC32_MSAREGS, a0, t0
439 restore_sc_msareg 18, SC32_MSAREGS, a0, t0
440 restore_sc_msareg 19, SC32_MSAREGS, a0, t0
441 restore_sc_msareg 20, SC32_MSAREGS, a0, t0
442 restore_sc_msareg 21, SC32_MSAREGS, a0, t0
443 restore_sc_msareg 22, SC32_MSAREGS, a0, t0
444 restore_sc_msareg 23, SC32_MSAREGS, a0, t0
445 restore_sc_msareg 24, SC32_MSAREGS, a0, t0
446 restore_sc_msareg 25, SC32_MSAREGS, a0, t0
447 restore_sc_msareg 26, SC32_MSAREGS, a0, t0
448 restore_sc_msareg 27, SC32_MSAREGS, a0, t0
449 restore_sc_msareg 28, SC32_MSAREGS, a0, t0
450 restore_sc_msareg 29, SC32_MSAREGS, a0, t0
451 restore_sc_msareg 30, SC32_MSAREGS, a0, t0
452 restore_sc_msareg 31, SC32_MSAREGS, a0, t0
453 jr ra
454 li v0, 0
455 END(_restore_msa_context32)
456
457#endif /* CONFIG_MIPS32_COMPAT */
458
459#endif /* CONFIG_CPU_HAS_MSA */
460
248 .set reorder 461 .set reorder
249 462
250 .type fault@function 463 .type fault@function
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index cc78dd9a17c7..abacac7c33ef 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -29,18 +29,8 @@
29#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 29#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
30 30
31/* 31/*
32 * FPU context is saved iff the process has used it's FPU in the current
33 * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user
34 * space STATUS register should be 0, so that a process *always* starts its
35 * userland with FPU disabled after each context switch.
36 *
37 * FPU will be enabled as soon as the process accesses FPU again, through
38 * do_cpu() trap.
39 */
40
41/*
42 * task_struct *resume(task_struct *prev, task_struct *next, 32 * task_struct *resume(task_struct *prev, task_struct *next,
43 * struct thread_info *next_ti, int usedfpu) 33 * struct thread_info *next_ti, s32 fp_save)
44 */ 34 */
45 .align 5 35 .align 5
46 LEAF(resume) 36 LEAF(resume)
@@ -50,23 +40,37 @@
50 LONG_S ra, THREAD_REG31(a0) 40 LONG_S ra, THREAD_REG31(a0)
51 41
52 /* 42 /*
53 * check if we need to save FPU registers 43 * Check whether we need to save any FP context. FP context is saved
44 * iff the process has used the context with the scalar FPU or the MSA
45 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
46 * _TIF_USEDMSA respectively. switch_to will have set fp_save
47 * accordingly to an FP_SAVE_ enum value.
54 */ 48 */
49 beqz a3, 2f
55 50
56 beqz a3, 1f
57
58 PTR_L t3, TASK_THREAD_INFO(a0)
59 /* 51 /*
60 * clear saved user stack CU1 bit 52 * We do. Clear the saved CU1 bit for prev, such that next time it is
53 * scheduled it will start in userland with the FPU disabled. If the
54 * task uses the FPU then it will be enabled again via the do_cpu trap.
55 * This allows us to lazily restore the FP context.
61 */ 56 */
57 PTR_L t3, TASK_THREAD_INFO(a0)
62 LONG_L t0, ST_OFF(t3) 58 LONG_L t0, ST_OFF(t3)
63 li t1, ~ST0_CU1 59 li t1, ~ST0_CU1
64 and t0, t0, t1 60 and t0, t0, t1
65 LONG_S t0, ST_OFF(t3) 61 LONG_S t0, ST_OFF(t3)
66 62
63 /* Check whether we're saving scalar or vector context. */
64 bgtz a3, 1f
65
66 /* Save 128b MSA vector context. */
67 msa_save_all a0
68 b 2f
69
701: /* Save 32b/64b scalar FP context. */
67 fpu_save_double a0 t0 t1 # c0_status passed in t0 71 fpu_save_double a0 t0 t1 # c0_status passed in t0
68 # clobbers t1 72 # clobbers t1
691: 732:
70 74
71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
72 PTR_LA t8, __stack_chk_guard 76 PTR_LA t8, __stack_chk_guard
@@ -141,6 +145,26 @@ LEAF(_restore_fp)
141 jr ra 145 jr ra
142 END(_restore_fp) 146 END(_restore_fp)
143 147
148#ifdef CONFIG_CPU_HAS_MSA
149
150/*
151 * Save a thread's MSA vector context.
152 */
153LEAF(_save_msa)
154 msa_save_all a0
155 jr ra
156 END(_save_msa)
157
158/*
159 * Restore a thread's MSA vector context.
160 */
161LEAF(_restore_msa)
162 msa_restore_all a0
163 jr ra
164 END(_restore_msa)
165
166#endif
167
144/* 168/*
145 * Load the FPU with signalling NANS. This bit pattern we're using has 169 * Load the FPU with signalling NANS. This bit pattern we're using has
146 * the property that no matter whether considered as single or as double 170 * the property that no matter whether considered as single or as double
@@ -270,7 +294,7 @@ LEAF(_init_fpu)
2701: .set pop 2941: .set pop
271#endif /* CONFIG_CPU_MIPS32_R2 */ 295#endif /* CONFIG_CPU_MIPS32_R2 */
272#else 296#else
273 .set mips3 297 .set arch=r4000
274 dmtc1 t1, $f0 298 dmtc1 t1, $f0
275 dmtc1 t1, $f2 299 dmtc1 t1, $f2
276 dmtc1 t1, $f4 300 dmtc1 t1, $f4
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a5b14f48e1af..fdc70b400442 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -6,6 +6,7 @@
6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> 6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2001 MIPS Technologies, Inc. 7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 * Copyright (C) 2004 Thiemo Seufer 8 * Copyright (C) 2004 Thiemo Seufer
9 * Copyright (C) 2014 Imagination Technologies Ltd.
9 */ 10 */
10#include <linux/errno.h> 11#include <linux/errno.h>
11#include <asm/asm.h> 12#include <asm/asm.h>
@@ -74,10 +75,10 @@ NESTED(handle_sys, PT_SIZE, sp)
74 .set noreorder 75 .set noreorder
75 .set nomacro 76 .set nomacro
76 77
771: lw t5, 16(t0) # argument #5 from usp 781: user_lw(t5, 16(t0)) # argument #5 from usp
784: lw t6, 20(t0) # argument #6 from usp 794: user_lw(t6, 20(t0)) # argument #6 from usp
793: lw t7, 24(t0) # argument #7 from usp 803: user_lw(t7, 24(t0)) # argument #7 from usp
802: lw t8, 28(t0) # argument #8 from usp 812: user_lw(t8, 28(t0)) # argument #8 from usp
81 82
82 sw t5, 16(sp) # argument #5 to ksp 83 sw t5, 16(sp) # argument #5 to ksp
83 sw t6, 20(sp) # argument #6 to ksp 84 sw t6, 20(sp) # argument #6 to ksp
@@ -118,7 +119,18 @@ syscall_trace_entry:
118 SAVE_STATIC 119 SAVE_STATIC
119 move s0, t2 120 move s0, t2
120 move a0, sp 121 move a0, sp
121 jal syscall_trace_enter 122
123 /*
124 * syscall number is in v0 unless we called syscall(__NR_###)
125 * where the real syscall number is in a0
126 */
127 addiu a1, v0, __NR_O32_Linux
128 bnez v0, 1f /* __NR_syscall at offset 0 */
129 lw a1, PT_R4(sp)
130
1311: jal syscall_trace_enter
132
133 bltz v0, 2f # seccomp failed? Skip syscall
122 134
123 move t0, s0 135 move t0, s0
124 RESTORE_STATIC 136 RESTORE_STATIC
@@ -138,7 +150,7 @@ syscall_trace_entry:
138 sw t1, PT_R0(sp) # save it for syscall restarting 150 sw t1, PT_R0(sp) # save it for syscall restarting
1391: sw v0, PT_R2(sp) # result 1511: sw v0, PT_R2(sp) # result
140 152
141 j syscall_exit 1532: j syscall_exit
142 154
143/* ------------------------------------------------------------------------ */ 155/* ------------------------------------------------------------------------ */
144 156
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index b56e254beb15..dd99c3285aea 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,8 +80,11 @@ syscall_trace_entry:
80 SAVE_STATIC 80 SAVE_STATIC
81 move s0, t2 81 move s0, t2
82 move a0, sp 82 move a0, sp
83 daddiu a1, v0, __NR_64_Linux
83 jal syscall_trace_enter 84 jal syscall_trace_enter
84 85
86 bltz v0, 2f # seccomp failed? Skip syscall
87
85 move t0, s0 88 move t0, s0
86 RESTORE_STATIC 89 RESTORE_STATIC
87 ld a0, PT_R4(sp) # Restore argument registers 90 ld a0, PT_R4(sp) # Restore argument registers
@@ -102,7 +105,7 @@ syscall_trace_entry:
102 sd t1, PT_R0(sp) # save it for syscall restarting 105 sd t1, PT_R0(sp) # save it for syscall restarting
1031: sd v0, PT_R2(sp) # result 1061: sd v0, PT_R2(sp) # result
104 107
105 j syscall_exit 1082: j syscall_exit
106 109
107illegal_syscall: 110illegal_syscall:
108 /* This also isn't a 64-bit syscall, throw an error. */ 111 /* This also isn't a 64-bit syscall, throw an error. */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f7e5b72cf481..f68d2f4f0090 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,8 +72,11 @@ n32_syscall_trace_entry:
72 SAVE_STATIC 72 SAVE_STATIC
73 move s0, t2 73 move s0, t2
74 move a0, sp 74 move a0, sp
75 daddiu a1, v0, __NR_N32_Linux
75 jal syscall_trace_enter 76 jal syscall_trace_enter
76 77
78 bltz v0, 2f # seccomp failed? Skip syscall
79
77 move t0, s0 80 move t0, s0
78 RESTORE_STATIC 81 RESTORE_STATIC
79 ld a0, PT_R4(sp) # Restore argument registers 82 ld a0, PT_R4(sp) # Restore argument registers
@@ -94,7 +97,7 @@ n32_syscall_trace_entry:
94 sd t1, PT_R0(sp) # save it for syscall restarting 97 sd t1, PT_R0(sp) # save it for syscall restarting
951: sd v0, PT_R2(sp) # result 981: sd v0, PT_R2(sp) # result
96 99
97 j syscall_exit 1002: j syscall_exit
98 101
99not_n32_scall: 102not_n32_scall:
100 /* This is not an n32 compatibility syscall, pass it on to 103 /* This is not an n32 compatibility syscall, pass it on to
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6788727d91af..70f6acecd928 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -112,7 +112,20 @@ trace_a_syscall:
112 112
113 move s0, t2 # Save syscall pointer 113 move s0, t2 # Save syscall pointer
114 move a0, sp 114 move a0, sp
115 jal syscall_trace_enter 115 /*
116 * syscall number is in v0 unless we called syscall(__NR_###)
117 * where the real syscall number is in a0
118 * note: NR_syscall is the first O32 syscall but the macro is
119 * only defined when compiling with -mabi=32 (CONFIG_32BIT)
120 * therefore __NR_O32_Linux is used (4000)
121 */
122 addiu a1, v0, __NR_O32_Linux
123 bnez v0, 1f /* __NR_syscall at offset 0 */
124 lw a1, PT_R4(sp)
125
1261: jal syscall_trace_enter
127
128 bltz v0, 2f # seccomp failed? Skip syscall
116 129
117 move t0, s0 130 move t0, s0
118 RESTORE_STATIC 131 RESTORE_STATIC
@@ -136,7 +149,7 @@ trace_a_syscall:
136 sd t1, PT_R0(sp) # save it for syscall restarting 149 sd t1, PT_R0(sp) # save it for syscall restarting
1371: sd v0, PT_R2(sp) # result 1501: sd v0, PT_R2(sp) # result
138 151
139 j syscall_exit 1522: j syscall_exit
140 153
141/* ------------------------------------------------------------------------ */ 154/* ------------------------------------------------------------------------ */
142 155
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5199563c4403..33133d3df3e5 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2014, Imagination Technologies Ltd.
9 */ 10 */
10#include <linux/cache.h> 11#include <linux/cache.h>
11#include <linux/context_tracking.h> 12#include <linux/context_tracking.h>
@@ -30,6 +31,7 @@
30#include <linux/bitops.h> 31#include <linux/bitops.h>
31#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
32#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/msa.h>
33#include <asm/sim.h> 35#include <asm/sim.h>
34#include <asm/ucontext.h> 36#include <asm/ucontext.h>
35#include <asm/cpu-features.h> 37#include <asm/cpu-features.h>
@@ -46,8 +48,8 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
46extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 48extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
47extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 49extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
48 50
49extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 51extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
50extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 52extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
51 53
52struct sigframe { 54struct sigframe {
53 u32 sf_ass[4]; /* argument save space for o32 */ 55 u32 sf_ass[4]; /* argument save space for o32 */
@@ -64,17 +66,95 @@ struct rt_sigframe {
64}; 66};
65 67
66/* 68/*
69 * Thread saved context copy to/from a signal context presumed to be on the
70 * user stack, and therefore accessed with appropriate macros from uaccess.h.
71 */
72static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
73{
74 int i;
75 int err = 0;
76
77 for (i = 0; i < NUM_FPU_REGS; i++) {
78 err |=
79 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
80 &sc->sc_fpregs[i]);
81 }
82 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
83
84 return err;
85}
86
87static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
88{
89 int i;
90 int err = 0;
91 u64 fpr_val;
92
93 for (i = 0; i < NUM_FPU_REGS; i++) {
94 err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
95 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
96 }
97 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
98
99 return err;
100}
101
102/*
103 * These functions will save only the upper 64 bits of the vector registers,
104 * since the lower 64 bits have already been saved as the scalar FP context.
105 */
106static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
107{
108 int i;
109 int err = 0;
110
111 for (i = 0; i < NUM_FPU_REGS; i++) {
112 err |=
113 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
114 &sc->sc_msaregs[i]);
115 }
116 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
117
118 return err;
119}
120
121static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
122{
123 int i;
124 int err = 0;
125 u64 val;
126
127 for (i = 0; i < NUM_FPU_REGS; i++) {
128 err |= __get_user(val, &sc->sc_msaregs[i]);
129 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
130 }
131 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136/*
67 * Helper routines 137 * Helper routines
68 */ 138 */
69static int protected_save_fp_context(struct sigcontext __user *sc) 139static int protected_save_fp_context(struct sigcontext __user *sc,
140 unsigned used_math)
70{ 141{
71 int err; 142 int err;
143 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
144#ifndef CONFIG_EVA
72 while (1) { 145 while (1) {
73 lock_fpu_owner(); 146 lock_fpu_owner();
74 err = own_fpu_inatomic(1); 147 if (is_fpu_owner()) {
75 if (!err) 148 err = save_fp_context(sc);
76 err = save_fp_context(sc); /* this might fail */ 149 if (save_msa && !err)
77 unlock_fpu_owner(); 150 err = _save_msa_context(sc);
151 unlock_fpu_owner();
152 } else {
153 unlock_fpu_owner();
154 err = copy_fp_to_sigcontext(sc);
155 if (save_msa && !err)
156 err = copy_msa_to_sigcontext(sc);
157 }
78 if (likely(!err)) 158 if (likely(!err))
79 break; 159 break;
80 /* touch the sigcontext and try again */ 160 /* touch the sigcontext and try again */
@@ -84,18 +164,44 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
84 if (err) 164 if (err)
85 break; /* really bad sigcontext */ 165 break; /* really bad sigcontext */
86 } 166 }
167#else
168 /*
169 * EVA does not have FPU EVA instructions so saving fpu context directly
170 * does not work.
171 */
172 disable_msa();
173 lose_fpu(1);
174 err = save_fp_context(sc); /* this might fail */
175 if (save_msa && !err)
176 err = copy_msa_to_sigcontext(sc);
177#endif
87 return err; 178 return err;
88} 179}
89 180
90static int protected_restore_fp_context(struct sigcontext __user *sc) 181static int protected_restore_fp_context(struct sigcontext __user *sc,
182 unsigned used_math)
91{ 183{
92 int err, tmp __maybe_unused; 184 int err, tmp __maybe_unused;
185 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
186#ifndef CONFIG_EVA
93 while (1) { 187 while (1) {
94 lock_fpu_owner(); 188 lock_fpu_owner();
95 err = own_fpu_inatomic(0); 189 if (is_fpu_owner()) {
96 if (!err) 190 err = restore_fp_context(sc);
97 err = restore_fp_context(sc); /* this might fail */ 191 if (restore_msa && !err) {
98 unlock_fpu_owner(); 192 enable_msa();
193 err = _restore_msa_context(sc);
194 } else {
195 /* signal handler may have used MSA */
196 disable_msa();
197 }
198 unlock_fpu_owner();
199 } else {
200 unlock_fpu_owner();
201 err = copy_fp_from_sigcontext(sc);
202 if (!err && (used_math & USEDMATH_MSA))
203 err = copy_msa_from_sigcontext(sc);
204 }
99 if (likely(!err)) 205 if (likely(!err))
100 break; 206 break;
101 /* touch the sigcontext and try again */ 207 /* touch the sigcontext and try again */
@@ -105,6 +211,17 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
105 if (err) 211 if (err)
106 break; /* really bad sigcontext */ 212 break; /* really bad sigcontext */
107 } 213 }
214#else
215 /*
216 * EVA does not have FPU EVA instructions so restoring fpu context
217 * directly does not work.
218 */
219 enable_msa();
220 lose_fpu(0);
221 err = restore_fp_context(sc); /* this might fail */
222 if (restore_msa && !err)
223 err = copy_msa_from_sigcontext(sc);
224#endif
108 return err; 225 return err;
109} 226}
110 227
@@ -135,7 +252,8 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
135 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 252 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
136 } 253 }
137 254
138 used_math = !!used_math(); 255 used_math = used_math() ? USEDMATH_FP : 0;
256 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
139 err |= __put_user(used_math, &sc->sc_used_math); 257 err |= __put_user(used_math, &sc->sc_used_math);
140 258
141 if (used_math) { 259 if (used_math) {
@@ -143,7 +261,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
143 * Save FPU state to signal context. Signal handler 261 * Save FPU state to signal context. Signal handler
144 * will "inherit" current FPU state. 262 * will "inherit" current FPU state.
145 */ 263 */
146 err |= protected_save_fp_context(sc); 264 err |= protected_save_fp_context(sc, used_math);
147 } 265 }
148 return err; 266 return err;
149} 267}
@@ -168,14 +286,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
168} 286}
169 287
170static int 288static int
171check_and_restore_fp_context(struct sigcontext __user *sc) 289check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
172{ 290{
173 int err, sig; 291 int err, sig;
174 292
175 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 293 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
176 if (err > 0) 294 if (err > 0)
177 err = 0; 295 err = 0;
178 err |= protected_restore_fp_context(sc); 296 err |= protected_restore_fp_context(sc, used_math);
179 return err ?: sig; 297 return err ?: sig;
180} 298}
181 299
@@ -215,9 +333,10 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
215 if (used_math) { 333 if (used_math) {
216 /* restore fpu context if we have used it before */ 334 /* restore fpu context if we have used it before */
217 if (!err) 335 if (!err)
218 err = check_and_restore_fp_context(sc); 336 err = check_and_restore_fp_context(sc, used_math);
219 } else { 337 } else {
220 /* signal handler may have used FPU. Give it up. */ 338 /* signal handler may have used FPU or MSA. Disable them. */
339 disable_msa();
221 lose_fpu(0); 340 lose_fpu(0);
222 } 341 }
223 342
@@ -591,23 +710,26 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
591} 710}
592 711
593#ifdef CONFIG_SMP 712#ifdef CONFIG_SMP
713#ifndef CONFIG_EVA
594static int smp_save_fp_context(struct sigcontext __user *sc) 714static int smp_save_fp_context(struct sigcontext __user *sc)
595{ 715{
596 return raw_cpu_has_fpu 716 return raw_cpu_has_fpu
597 ? _save_fp_context(sc) 717 ? _save_fp_context(sc)
598 : fpu_emulator_save_context(sc); 718 : copy_fp_to_sigcontext(sc);
599} 719}
600 720
601static int smp_restore_fp_context(struct sigcontext __user *sc) 721static int smp_restore_fp_context(struct sigcontext __user *sc)
602{ 722{
603 return raw_cpu_has_fpu 723 return raw_cpu_has_fpu
604 ? _restore_fp_context(sc) 724 ? _restore_fp_context(sc)
605 : fpu_emulator_restore_context(sc); 725 : copy_fp_from_sigcontext(sc);
606} 726}
727#endif /* CONFIG_EVA */
607#endif 728#endif
608 729
609static int signal_setup(void) 730static int signal_setup(void)
610{ 731{
732#ifndef CONFIG_EVA
611#ifdef CONFIG_SMP 733#ifdef CONFIG_SMP
612 /* For now just do the cpu_has_fpu check when the functions are invoked */ 734 /* For now just do the cpu_has_fpu check when the functions are invoked */
613 save_fp_context = smp_save_fp_context; 735 save_fp_context = smp_save_fp_context;
@@ -617,9 +739,13 @@ static int signal_setup(void)
617 save_fp_context = _save_fp_context; 739 save_fp_context = _save_fp_context;
618 restore_fp_context = _restore_fp_context; 740 restore_fp_context = _restore_fp_context;
619 } else { 741 } else {
620 save_fp_context = fpu_emulator_save_context; 742 save_fp_context = copy_fp_from_sigcontext;
621 restore_fp_context = fpu_emulator_restore_context; 743 restore_fp_context = copy_fp_to_sigcontext;
622 } 744 }
745#endif /* CONFIG_SMP */
746#else
747 save_fp_context = copy_fp_from_sigcontext;;
748 restore_fp_context = copy_fp_to_sigcontext;
623#endif 749#endif
624 750
625 return 0; 751 return 0;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 3d60f7750fa8..299f956e4db3 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -30,6 +30,7 @@
30#include <asm/sim.h> 30#include <asm/sim.h>
31#include <asm/ucontext.h> 31#include <asm/ucontext.h>
32#include <asm/fpu.h> 32#include <asm/fpu.h>
33#include <asm/msa.h>
33#include <asm/war.h> 34#include <asm/war.h>
34#include <asm/vdso.h> 35#include <asm/vdso.h>
35#include <asm/dsp.h> 36#include <asm/dsp.h>
@@ -42,8 +43,8 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
42extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 43extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
43extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 44extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
44 45
45extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); 46extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
46extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); 47extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
47 48
48/* 49/*
49 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 50 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
@@ -78,17 +79,96 @@ struct rt_sigframe32 {
78}; 79};
79 80
80/* 81/*
82 * Thread saved context copy to/from a signal context presumed to be on the
83 * user stack, and therefore accessed with appropriate macros from uaccess.h.
84 */
85static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
86{
87 int i;
88 int err = 0;
89 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
90
91 for (i = 0; i < NUM_FPU_REGS; i += inc) {
92 err |=
93 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
94 &sc->sc_fpregs[i]);
95 }
96 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
97
98 return err;
99}
100
101static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
102{
103 int i;
104 int err = 0;
105 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
106 u64 fpr_val;
107
108 for (i = 0; i < NUM_FPU_REGS; i += inc) {
109 err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
110 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
111 }
112 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
113
114 return err;
115}
116
117/*
118 * These functions will save only the upper 64 bits of the vector registers,
119 * since the lower 64 bits have already been saved as the scalar FP context.
120 */
121static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
122{
123 int i;
124 int err = 0;
125
126 for (i = 0; i < NUM_FPU_REGS; i++) {
127 err |=
128 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
129 &sc->sc_msaregs[i]);
130 }
131 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
137{
138 int i;
139 int err = 0;
140 u64 val;
141
142 for (i = 0; i < NUM_FPU_REGS; i++) {
143 err |= __get_user(val, &sc->sc_msaregs[i]);
144 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
145 }
146 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
147
148 return err;
149}
150
151/*
81 * sigcontext handlers 152 * sigcontext handlers
82 */ 153 */
83static int protected_save_fp_context32(struct sigcontext32 __user *sc) 154static int protected_save_fp_context32(struct sigcontext32 __user *sc,
155 unsigned used_math)
84{ 156{
85 int err; 157 int err;
158 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
86 while (1) { 159 while (1) {
87 lock_fpu_owner(); 160 lock_fpu_owner();
88 err = own_fpu_inatomic(1); 161 if (is_fpu_owner()) {
89 if (!err) 162 err = save_fp_context32(sc);
90 err = save_fp_context32(sc); /* this might fail */ 163 if (save_msa && !err)
91 unlock_fpu_owner(); 164 err = _save_msa_context32(sc);
165 unlock_fpu_owner();
166 } else {
167 unlock_fpu_owner();
168 err = copy_fp_to_sigcontext32(sc);
169 if (save_msa && !err)
170 err = copy_msa_to_sigcontext32(sc);
171 }
92 if (likely(!err)) 172 if (likely(!err))
93 break; 173 break;
94 /* touch the sigcontext and try again */ 174 /* touch the sigcontext and try again */
@@ -101,15 +181,29 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
101 return err; 181 return err;
102} 182}
103 183
104static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 184static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
185 unsigned used_math)
105{ 186{
106 int err, tmp __maybe_unused; 187 int err, tmp __maybe_unused;
188 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
107 while (1) { 189 while (1) {
108 lock_fpu_owner(); 190 lock_fpu_owner();
109 err = own_fpu_inatomic(0); 191 if (is_fpu_owner()) {
110 if (!err) 192 err = restore_fp_context32(sc);
111 err = restore_fp_context32(sc); /* this might fail */ 193 if (restore_msa && !err) {
112 unlock_fpu_owner(); 194 enable_msa();
195 err = _restore_msa_context32(sc);
196 } else {
197 /* signal handler may have used MSA */
198 disable_msa();
199 }
200 unlock_fpu_owner();
201 } else {
202 unlock_fpu_owner();
203 err = copy_fp_from_sigcontext32(sc);
204 if (restore_msa && !err)
205 err = copy_msa_from_sigcontext32(sc);
206 }
113 if (likely(!err)) 207 if (likely(!err))
114 break; 208 break;
115 /* touch the sigcontext and try again */ 209 /* touch the sigcontext and try again */
@@ -147,7 +241,8 @@ static int setup_sigcontext32(struct pt_regs *regs,
147 err |= __put_user(mflo3(), &sc->sc_lo3); 241 err |= __put_user(mflo3(), &sc->sc_lo3);
148 } 242 }
149 243
150 used_math = !!used_math(); 244 used_math = used_math() ? USEDMATH_FP : 0;
245 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
151 err |= __put_user(used_math, &sc->sc_used_math); 246 err |= __put_user(used_math, &sc->sc_used_math);
152 247
153 if (used_math) { 248 if (used_math) {
@@ -155,20 +250,21 @@ static int setup_sigcontext32(struct pt_regs *regs,
155 * Save FPU state to signal context. Signal handler 250 * Save FPU state to signal context. Signal handler
156 * will "inherit" current FPU state. 251 * will "inherit" current FPU state.
157 */ 252 */
158 err |= protected_save_fp_context32(sc); 253 err |= protected_save_fp_context32(sc, used_math);
159 } 254 }
160 return err; 255 return err;
161} 256}
162 257
163static int 258static int
164check_and_restore_fp_context32(struct sigcontext32 __user *sc) 259check_and_restore_fp_context32(struct sigcontext32 __user *sc,
260 unsigned used_math)
165{ 261{
166 int err, sig; 262 int err, sig;
167 263
168 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 264 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
169 if (err > 0) 265 if (err > 0)
170 err = 0; 266 err = 0;
171 err |= protected_restore_fp_context32(sc); 267 err |= protected_restore_fp_context32(sc, used_math);
172 return err ?: sig; 268 return err ?: sig;
173} 269}
174 270
@@ -205,9 +301,10 @@ static int restore_sigcontext32(struct pt_regs *regs,
205 if (used_math) { 301 if (used_math) {
206 /* restore fpu context if we have used it before */ 302 /* restore fpu context if we have used it before */
207 if (!err) 303 if (!err)
208 err = check_and_restore_fp_context32(sc); 304 err = check_and_restore_fp_context32(sc, used_math);
209 } else { 305 } else {
210 /* signal handler may have used FPU. Give it up. */ 306 /* signal handler may have used FPU or MSA. Disable them. */
307 disable_msa();
211 lose_fpu(0); 308 lose_fpu(0);
212 } 309 }
213 310
@@ -566,8 +663,8 @@ static int signal32_init(void)
566 save_fp_context32 = _save_fp_context32; 663 save_fp_context32 = _save_fp_context32;
567 restore_fp_context32 = _restore_fp_context32; 664 restore_fp_context32 = _restore_fp_context32;
568 } else { 665 } else {
569 save_fp_context32 = fpu_emulator_save_context32; 666 save_fp_context32 = copy_fp_to_sigcontext32;
570 restore_fp_context32 = fpu_emulator_restore_context32; 667 restore_fp_context32 = copy_fp_from_sigcontext32;
571 } 668 }
572 669
573 return 0; 670 return 0;
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 1b925d8a610c..3ef55fb7ac03 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -39,57 +39,9 @@
39#include <asm/amon.h> 39#include <asm/amon.h>
40#include <asm/gic.h> 40#include <asm/gic.h>
41 41
42static void ipi_call_function(unsigned int cpu)
43{
44 pr_debug("CPU%d: %s cpu %d status %08x\n",
45 smp_processor_id(), __func__, cpu, read_c0_status());
46
47 gic_send_ipi(plat_ipi_call_int_xlate(cpu));
48}
49
50
51static void ipi_resched(unsigned int cpu)
52{
53 pr_debug("CPU%d: %s cpu %d status %08x\n",
54 smp_processor_id(), __func__, cpu, read_c0_status());
55
56 gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
57}
58
59/*
60 * FIXME: This isn't restricted to CMP
61 * The SMVP kernel could use GIC interrupts if available
62 */
63void cmp_send_ipi_single(int cpu, unsigned int action)
64{
65 unsigned long flags;
66
67 local_irq_save(flags);
68
69 switch (action) {
70 case SMP_CALL_FUNCTION:
71 ipi_call_function(cpu);
72 break;
73
74 case SMP_RESCHEDULE_YOURSELF:
75 ipi_resched(cpu);
76 break;
77 }
78
79 local_irq_restore(flags);
80}
81
82static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
83{
84 unsigned int i;
85
86 for_each_cpu(i, mask)
87 cmp_send_ipi_single(i, action);
88}
89
90static void cmp_init_secondary(void) 42static void cmp_init_secondary(void)
91{ 43{
92 struct cpuinfo_mips *c = &current_cpu_data; 44 struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
93 45
94 /* Assume GIC is present */ 46 /* Assume GIC is present */
95 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | 47 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
@@ -97,7 +49,6 @@ static void cmp_init_secondary(void)
97 49
98 /* Enable per-cpu interrupts: platform specific */ 50 /* Enable per-cpu interrupts: platform specific */
99 51
100 c->core = (read_c0_ebase() >> 1) & 0x1ff;
101#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 52#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
102 if (cpu_has_mipsmt) 53 if (cpu_has_mipsmt)
103 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
@@ -210,8 +161,8 @@ void __init cmp_prepare_cpus(unsigned int max_cpus)
210} 161}
211 162
212struct plat_smp_ops cmp_smp_ops = { 163struct plat_smp_ops cmp_smp_ops = {
213 .send_ipi_single = cmp_send_ipi_single, 164 .send_ipi_single = gic_send_ipi_single,
214 .send_ipi_mask = cmp_send_ipi_mask, 165 .send_ipi_mask = gic_send_ipi_mask,
215 .init_secondary = cmp_init_secondary, 166 .init_secondary = cmp_init_secondary,
216 .smp_finish = cmp_smp_finish, 167 .smp_finish = cmp_smp_finish,
217 .cpus_done = cmp_cpus_done, 168 .cpus_done = cmp_cpus_done,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
new file mode 100644
index 000000000000..536eec0d21b6
--- /dev/null
+++ b/arch/mips/kernel/smp-cps.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/io.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/smp.h>
15#include <linux/types.h>
16
17#include <asm/cacheflush.h>
18#include <asm/gic.h>
19#include <asm/mips-cm.h>
20#include <asm/mips-cpc.h>
21#include <asm/mips_mt.h>
22#include <asm/mipsregs.h>
23#include <asm/smp-cps.h>
24#include <asm/time.h>
25#include <asm/uasm.h>
26
27static DECLARE_BITMAP(core_power, NR_CPUS);
28
29struct boot_config mips_cps_bootcfg;
30
31static void init_core(void)
32{
33 unsigned int nvpes, t;
34 u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
35
36 if (!cpu_has_mipsmt)
37 return;
38
39 /* Enter VPE configuration state */
40 dvpe();
41 set_c0_mvpcontrol(MVPCONTROL_VPC);
42
43 /* Retrieve the count of VPEs in this core */
44 mvpconf0 = read_c0_mvpconf0();
45 nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
46 smp_num_siblings = nvpes;
47
48 for (t = 1; t < nvpes; t++) {
49 /* Use a 1:1 mapping of TC index to VPE index */
50 settc(t);
51
52 /* Bind 1 TC to this VPE */
53 tcbind = read_tc_c0_tcbind();
54 tcbind &= ~TCBIND_CURVPE;
55 tcbind |= t << TCBIND_CURVPE_SHIFT;
56 write_tc_c0_tcbind(tcbind);
57
58 /* Set exclusive TC, non-active, master */
59 vpeconf0 = read_vpe_c0_vpeconf0();
60 vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
61 vpeconf0 |= t << VPECONF0_XTC_SHIFT;
62 vpeconf0 |= VPECONF0_MVP;
63 write_vpe_c0_vpeconf0(vpeconf0);
64
65 /* Declare TC non-active, non-allocatable & interrupt exempt */
66 tcstatus = read_tc_c0_tcstatus();
67 tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
68 tcstatus |= TCSTATUS_IXMT;
69 write_tc_c0_tcstatus(tcstatus);
70
71 /* Halt the TC */
72 write_tc_c0_tchalt(TCHALT_H);
73
74 /* Allow only 1 TC to execute */
75 vpecontrol = read_vpe_c0_vpecontrol();
76 vpecontrol &= ~VPECONTROL_TE;
77 write_vpe_c0_vpecontrol(vpecontrol);
78
79 /* Copy (most of) Status from VPE 0 */
80 status = read_c0_status();
81 status &= ~(ST0_IM | ST0_IE | ST0_KSU);
82 status |= ST0_CU0;
83 write_vpe_c0_status(status);
84
85 /* Copy Config from VPE 0 */
86 write_vpe_c0_config(read_c0_config());
87 write_vpe_c0_config7(read_c0_config7());
88
89 /* Ensure no software interrupts are pending */
90 write_vpe_c0_cause(0);
91
92 /* Sync Count */
93 write_vpe_c0_count(read_c0_count());
94 }
95
96 /* Leave VPE configuration state */
97 clear_c0_mvpcontrol(MVPCONTROL_VPC);
98}
99
100static void __init cps_smp_setup(void)
101{
102 unsigned int ncores, nvpes, core_vpes;
103 int c, v;
104 u32 core_cfg, *entry_code;
105
106 /* Detect & record VPE topology */
107 ncores = mips_cm_numcores();
108 pr_info("VPE topology ");
109 for (c = nvpes = 0; c < ncores; c++) {
110 if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) {
111 write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
112 core_cfg = read_gcr_co_config();
113 core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
114 CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
115 } else {
116 core_vpes = 1;
117 }
118
119 pr_cont("%c%u", c ? ',' : '{', core_vpes);
120
121 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
122 cpu_data[nvpes + v].core = c;
123#ifdef CONFIG_MIPS_MT_SMP
124 cpu_data[nvpes + v].vpe_id = v;
125#endif
126 }
127
128 nvpes += core_vpes;
129 }
130 pr_cont("} total %u\n", nvpes);
131
132 /* Indicate present CPUs (CPU being synonymous with VPE) */
133 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
134 set_cpu_possible(v, true);
135 set_cpu_present(v, true);
136 __cpu_number_map[v] = v;
137 __cpu_logical_map[v] = v;
138 }
139
140 /* Core 0 is powered up (we're running on it) */
141 bitmap_set(core_power, 0, 1);
142
143 /* Disable MT - we only want to run 1 TC per VPE */
144 if (cpu_has_mipsmt)
145 dmt();
146
147 /* Initialise core 0 */
148 init_core();
149
150 /* Patch the start of mips_cps_core_entry to provide the CM base */
151 entry_code = (u32 *)&mips_cps_core_entry;
152 UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
153
154 /* Make core 0 coherent with everything */
155 write_gcr_cl_coherence(0xff);
156}
157
158static void __init cps_prepare_cpus(unsigned int max_cpus)
159{
160 mips_mt_set_cpuoptions();
161}
162
163static void boot_core(struct boot_config *cfg)
164{
165 u32 access;
166
167 /* Select the appropriate core */
168 write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF);
169
170 /* Set its reset vector */
171 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
172
173 /* Ensure its coherency is disabled */
174 write_gcr_co_coherence(0);
175
176 /* Ensure the core can access the GCRs */
177 access = read_gcr_access();
178 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core);
179 write_gcr_access(access);
180
181 /* Copy cfg */
182 mips_cps_bootcfg = *cfg;
183
184 if (mips_cpc_present()) {
185 /* Select the appropriate core */
186 write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
187
188 /* Reset the core */
189 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
190 } else {
191 /* Take the core out of reset */
192 write_gcr_co_reset_release(0);
193 }
194
195 /* The core is now powered up */
196 bitmap_set(core_power, cfg->core, 1);
197}
198
199static void boot_vpe(void *info)
200{
201 struct boot_config *cfg = info;
202 u32 tcstatus, vpeconf0;
203
204 /* Enter VPE configuration state */
205 dvpe();
206 set_c0_mvpcontrol(MVPCONTROL_VPC);
207
208 settc(cfg->vpe);
209
210 /* Set the TC restart PC */
211 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
212
213 /* Activate the TC, allow interrupts */
214 tcstatus = read_tc_c0_tcstatus();
215 tcstatus &= ~TCSTATUS_IXMT;
216 tcstatus |= TCSTATUS_A;
217 write_tc_c0_tcstatus(tcstatus);
218
219 /* Clear the TC halt bit */
220 write_tc_c0_tchalt(0);
221
222 /* Activate the VPE */
223 vpeconf0 = read_vpe_c0_vpeconf0();
224 vpeconf0 |= VPECONF0_VPA;
225 write_vpe_c0_vpeconf0(vpeconf0);
226
227 /* Set the stack & global pointer registers */
228 write_tc_gpr_sp(cfg->sp);
229 write_tc_gpr_gp(cfg->gp);
230
231 /* Leave VPE configuration state */
232 clear_c0_mvpcontrol(MVPCONTROL_VPC);
233
234 /* Enable other VPEs to execute */
235 evpe(EVPE_ENABLE);
236}
237
238static void cps_boot_secondary(int cpu, struct task_struct *idle)
239{
240 struct boot_config cfg;
241 unsigned int remote;
242 int err;
243
244 cfg.core = cpu_data[cpu].core;
245 cfg.vpe = cpu_vpe_id(&cpu_data[cpu]);
246 cfg.pc = (unsigned long)&smp_bootstrap;
247 cfg.sp = __KSTK_TOS(idle);
248 cfg.gp = (unsigned long)task_thread_info(idle);
249
250 if (!test_bit(cfg.core, core_power)) {
251 /* Boot a VPE on a powered down core */
252 boot_core(&cfg);
253 return;
254 }
255
256 if (cfg.core != current_cpu_data.core) {
257 /* Boot a VPE on another powered up core */
258 for (remote = 0; remote < NR_CPUS; remote++) {
259 if (cpu_data[remote].core != cfg.core)
260 continue;
261 if (cpu_online(remote))
262 break;
263 }
264 BUG_ON(remote >= NR_CPUS);
265
266 err = smp_call_function_single(remote, boot_vpe, &cfg, 1);
267 if (err)
268 panic("Failed to call remote CPU\n");
269 return;
270 }
271
272 BUG_ON(!cpu_has_mipsmt);
273
274 /* Boot a VPE on this core */
275 boot_vpe(&cfg);
276}
277
278static void cps_init_secondary(void)
279{
280 /* Disable MT - we only want to run 1 TC per VPE */
281 if (cpu_has_mipsmt)
282 dmt();
283
284 /* TODO: revisit this assumption once hotplug is implemented */
285 if (cpu_vpe_id(&current_cpu_data) == 0)
286 init_core();
287
288 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
289 STATUSF_IP6 | STATUSF_IP7);
290}
291
292static void cps_smp_finish(void)
293{
294 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
295
296#ifdef CONFIG_MIPS_MT_FPAFF
297 /* If we have an FPU, enroll ourselves in the FPU-full mask */
298 if (cpu_has_fpu)
299 cpu_set(smp_processor_id(), mt_fpu_cpumask);
300#endif /* CONFIG_MIPS_MT_FPAFF */
301
302 local_irq_enable();
303}
304
305static void cps_cpus_done(void)
306{
307}
308
309static struct plat_smp_ops cps_smp_ops = {
310 .smp_setup = cps_smp_setup,
311 .prepare_cpus = cps_prepare_cpus,
312 .boot_secondary = cps_boot_secondary,
313 .init_secondary = cps_init_secondary,
314 .smp_finish = cps_smp_finish,
315 .send_ipi_single = gic_send_ipi_single,
316 .send_ipi_mask = gic_send_ipi_mask,
317 .cpus_done = cps_cpus_done,
318};
319
320int register_cps_smp_ops(void)
321{
322 if (!mips_cm_present()) {
323 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
324 return -ENODEV;
325 }
326
327 /* check we have a GIC - we need one for IPIs */
328 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
329 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
330 return -ENODEV;
331 }
332
333 register_smp_ops(&cps_smp_ops);
334 return 0;
335}
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
new file mode 100644
index 000000000000..3bb1f92ab525
--- /dev/null
+++ b/arch/mips/kernel/smp-gic.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * Based on smp-cmp.c:
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Author: Chris Dearman (chris@mips.com)
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#include <linux/printk.h>
16
17#include <asm/gic.h>
18#include <asm/smp-ops.h>
19
20void gic_send_ipi_single(int cpu, unsigned int action)
21{
22 unsigned long flags;
23 unsigned int intr;
24
25 pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
26 smp_processor_id(), __func__, cpu, action, read_c0_status());
27
28 local_irq_save(flags);
29
30 switch (action) {
31 case SMP_CALL_FUNCTION:
32 intr = plat_ipi_call_int_xlate(cpu);
33 break;
34
35 case SMP_RESCHEDULE_YOURSELF:
36 intr = plat_ipi_resched_int_xlate(cpu);
37 break;
38
39 default:
40 BUG();
41 }
42
43 gic_send_ipi(intr);
44 local_irq_restore(flags);
45}
46
47void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
48{
49 unsigned int i;
50
51 for_each_cpu(i, mask)
52 gic_send_ipi_single(i, action);
53}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 0fb8cefc9114..f8e13149604d 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -113,27 +113,6 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
113 write_tc_c0_tchalt(TCHALT_H); 113 write_tc_c0_tchalt(TCHALT_H);
114} 114}
115 115
116#ifdef CONFIG_IRQ_GIC
117static void mp_send_ipi_single(int cpu, unsigned int action)
118{
119 unsigned long flags;
120
121 local_irq_save(flags);
122
123 switch (action) {
124 case SMP_CALL_FUNCTION:
125 gic_send_ipi(plat_ipi_call_int_xlate(cpu));
126 break;
127
128 case SMP_RESCHEDULE_YOURSELF:
129 gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
130 break;
131 }
132
133 local_irq_restore(flags);
134}
135#endif
136
137static void vsmp_send_ipi_single(int cpu, unsigned int action) 116static void vsmp_send_ipi_single(int cpu, unsigned int action)
138{ 117{
139 int i; 118 int i;
@@ -142,7 +121,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
142 121
143#ifdef CONFIG_IRQ_GIC 122#ifdef CONFIG_IRQ_GIC
144 if (gic_present) { 123 if (gic_present) {
145 mp_send_ipi_single(cpu, action); 124 gic_send_ipi_single(cpu, action);
146 return; 125 return;
147 } 126 }
148#endif 127#endif
@@ -313,3 +292,25 @@ struct plat_smp_ops vsmp_smp_ops = {
313 .smp_setup = vsmp_smp_setup, 292 .smp_setup = vsmp_smp_setup,
314 .prepare_cpus = vsmp_prepare_cpus, 293 .prepare_cpus = vsmp_prepare_cpus,
315}; 294};
295
296static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
297 unsigned long action_unused, void *data)
298{
299 struct proc_cpuinfo_notifier_args *pcn = data;
300 struct seq_file *m = pcn->m;
301 unsigned long n = pcn->n;
302
303 if (!cpu_has_mipsmt)
304 return NOTIFY_OK;
305
306 seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
307
308 return NOTIFY_OK;
309}
310
311static int __init proc_cpuinfo_notifier_init(void)
312{
313 return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
314}
315
316subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
index c10aa84c9fa9..38635a996cbf 100644
--- a/arch/mips/kernel/smtc-proc.c
+++ b/arch/mips/kernel/smtc-proc.c
@@ -77,3 +77,26 @@ void init_smtc_stats(void)
77 77
78 proc_create("smtc", 0444, NULL, &smtc_proc_fops); 78 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
79} 79}
80
81static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
82 unsigned long action_unused, void *data)
83{
84 struct proc_cpuinfo_notifier_args *pcn = data;
85 struct seq_file *m = pcn->m;
86 unsigned long n = pcn->n;
87
88 if (!cpu_has_mipsmt)
89 return NOTIFY_OK;
90
91 seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
92 seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
93
94 return NOTIFY_OK;
95}
96
97static int __init proc_cpuinfo_notifier_init(void)
98{
99 return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
100}
101
102subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index b242e2c10ea0..67f2495def1c 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -197,16 +197,17 @@ static void probe_spram(char *type,
197} 197}
198void spram_config(void) 198void spram_config(void)
199{ 199{
200 struct cpuinfo_mips *c = &current_cpu_data;
201 unsigned int config0; 200 unsigned int config0;
202 201
203 switch (c->cputype) { 202 switch (current_cpu_type()) {
204 case CPU_24K: 203 case CPU_24K:
205 case CPU_34K: 204 case CPU_34K:
206 case CPU_74K: 205 case CPU_74K:
207 case CPU_1004K: 206 case CPU_1004K:
207 case CPU_1074K:
208 case CPU_INTERAPTIV: 208 case CPU_INTERAPTIV:
209 case CPU_PROAPTIV: 209 case CPU_PROAPTIV:
210 case CPU_P5600:
210 config0 = read_c0_config(); 211 config0 = read_c0_config();
211 /* FIXME: addresses are Malta specific */ 212 /* FIXME: addresses are Malta specific */
212 if (config0 & (1<<24)) { 213 if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b79d13f95bf0..4a4f9dda5658 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -110,7 +110,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
110 110
111 if (cpu_has_llsc && R10000_LLSC_WAR) { 111 if (cpu_has_llsc && R10000_LLSC_WAR) {
112 __asm__ __volatile__ ( 112 __asm__ __volatile__ (
113 " .set mips3 \n" 113 " .set arch=r4000 \n"
114 " li %[err], 0 \n" 114 " li %[err], 0 \n"
115 "1: ll %[old], (%[addr]) \n" 115 "1: ll %[old], (%[addr]) \n"
116 " move %[tmp], %[new] \n" 116 " move %[tmp], %[new] \n"
@@ -135,7 +135,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
135 : "memory"); 135 : "memory");
136 } else if (cpu_has_llsc) { 136 } else if (cpu_has_llsc) {
137 __asm__ __volatile__ ( 137 __asm__ __volatile__ (
138 " .set mips3 \n" 138 " .set arch=r4000 \n"
139 " li %[err], 0 \n" 139 " li %[err], 0 \n"
140 "1: ll %[old], (%[addr]) \n" 140 "1: ll %[old], (%[addr]) \n"
141 " move %[tmp], %[new] \n" 141 " move %[tmp], %[new] \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e0b499694d18..074e857ced28 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -10,6 +10,7 @@
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
13 */ 14 */
14#include <linux/bug.h> 15#include <linux/bug.h>
15#include <linux/compiler.h> 16#include <linux/compiler.h>
@@ -47,6 +48,7 @@
47#include <asm/mipsregs.h> 48#include <asm/mipsregs.h>
48#include <asm/mipsmtregs.h> 49#include <asm/mipsmtregs.h>
49#include <asm/module.h> 50#include <asm/module.h>
51#include <asm/msa.h>
50#include <asm/pgtable.h> 52#include <asm/pgtable.h>
51#include <asm/ptrace.h> 53#include <asm/ptrace.h>
52#include <asm/sections.h> 54#include <asm/sections.h>
@@ -77,8 +79,10 @@ extern asmlinkage void handle_ri_rdhwr(void);
77extern asmlinkage void handle_cpu(void); 79extern asmlinkage void handle_cpu(void);
78extern asmlinkage void handle_ov(void); 80extern asmlinkage void handle_ov(void);
79extern asmlinkage void handle_tr(void); 81extern asmlinkage void handle_tr(void);
82extern asmlinkage void handle_msa_fpe(void);
80extern asmlinkage void handle_fpe(void); 83extern asmlinkage void handle_fpe(void);
81extern asmlinkage void handle_ftlb(void); 84extern asmlinkage void handle_ftlb(void);
85extern asmlinkage void handle_msa(void);
82extern asmlinkage void handle_mdmx(void); 86extern asmlinkage void handle_mdmx(void);
83extern asmlinkage void handle_watch(void); 87extern asmlinkage void handle_watch(void);
84extern asmlinkage void handle_mt(void); 88extern asmlinkage void handle_mt(void);
@@ -861,6 +865,11 @@ asmlinkage void do_bp(struct pt_regs *regs)
861 enum ctx_state prev_state; 865 enum ctx_state prev_state;
862 unsigned long epc; 866 unsigned long epc;
863 u16 instr[2]; 867 u16 instr[2];
868 mm_segment_t seg;
869
870 seg = get_fs();
871 if (!user_mode(regs))
872 set_fs(KERNEL_DS);
864 873
865 prev_state = exception_enter(); 874 prev_state = exception_enter();
866 if (get_isa16_mode(regs->cp0_epc)) { 875 if (get_isa16_mode(regs->cp0_epc)) {
@@ -870,17 +879,19 @@ asmlinkage void do_bp(struct pt_regs *regs)
870 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || 879 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
871 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) 880 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
872 goto out_sigsegv; 881 goto out_sigsegv;
873 opcode = (instr[0] << 16) | instr[1]; 882 opcode = (instr[0] << 16) | instr[1];
874 } else { 883 } else {
875 /* MIPS16e mode */ 884 /* MIPS16e mode */
876 if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) 885 if (__get_user(instr[0],
886 (u16 __user *)msk_isa16_mode(epc)))
877 goto out_sigsegv; 887 goto out_sigsegv;
878 bcode = (instr[0] >> 6) & 0x3f; 888 bcode = (instr[0] >> 6) & 0x3f;
879 do_trap_or_bp(regs, bcode, "Break"); 889 do_trap_or_bp(regs, bcode, "Break");
880 goto out; 890 goto out;
881 } 891 }
882 } else { 892 } else {
883 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 893 if (__get_user(opcode,
894 (unsigned int __user *) exception_epc(regs)))
884 goto out_sigsegv; 895 goto out_sigsegv;
885 } 896 }
886 897
@@ -918,6 +929,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
918 do_trap_or_bp(regs, bcode, "Break"); 929 do_trap_or_bp(regs, bcode, "Break");
919 930
920out: 931out:
932 set_fs(seg);
921 exception_exit(prev_state); 933 exception_exit(prev_state);
922 return; 934 return;
923 935
@@ -931,8 +943,13 @@ asmlinkage void do_tr(struct pt_regs *regs)
931 u32 opcode, tcode = 0; 943 u32 opcode, tcode = 0;
932 enum ctx_state prev_state; 944 enum ctx_state prev_state;
933 u16 instr[2]; 945 u16 instr[2];
946 mm_segment_t seg;
934 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 947 unsigned long epc = msk_isa16_mode(exception_epc(regs));
935 948
949 seg = get_fs();
950 if (!user_mode(regs))
951 set_fs(get_ds());
952
936 prev_state = exception_enter(); 953 prev_state = exception_enter();
937 if (get_isa16_mode(regs->cp0_epc)) { 954 if (get_isa16_mode(regs->cp0_epc)) {
938 if (__get_user(instr[0], (u16 __user *)(epc + 0)) || 955 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
@@ -953,6 +970,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
953 do_trap_or_bp(regs, tcode, "Trap"); 970 do_trap_or_bp(regs, tcode, "Trap");
954 971
955out: 972out:
973 set_fs(seg);
956 exception_exit(prev_state); 974 exception_exit(prev_state);
957 return; 975 return;
958 976
@@ -1074,6 +1092,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1074 return NOTIFY_OK; 1092 return NOTIFY_OK;
1075} 1093}
1076 1094
1095static int enable_restore_fp_context(int msa)
1096{
1097 int err, was_fpu_owner;
1098
1099 if (!used_math()) {
1100 /* First time FP context user. */
1101 err = init_fpu();
1102 if (msa && !err)
1103 enable_msa();
1104 if (!err)
1105 set_used_math();
1106 return err;
1107 }
1108
1109 /*
1110 * This task has formerly used the FP context.
1111 *
1112 * If this thread has no live MSA vector context then we can simply
1113 * restore the scalar FP context. If it has live MSA vector context
1114 * (that is, it has or may have used MSA since last performing a
1115 * function call) then we'll need to restore the vector context. This
1116 * applies even if we're currently only executing a scalar FP
1117 * instruction. This is because if we were to later execute an MSA
1118 * instruction then we'd either have to:
1119 *
1120 * - Restore the vector context & clobber any registers modified by
1121 * scalar FP instructions between now & then.
1122 *
1123 * or
1124 *
1125 * - Not restore the vector context & lose the most significant bits
1126 * of all vector registers.
1127 *
1128 * Neither of those options is acceptable. We cannot restore the least
1129 * significant bits of the registers now & only restore the most
1130 * significant bits later because the most significant bits of any
1131 * vector registers whose aliased FP register is modified now will have
1132 * been zeroed. We'd have no way to know that when restoring the vector
1133 * context & thus may load an outdated value for the most significant
1134 * bits of a vector register.
1135 */
1136 if (!msa && !thread_msa_context_live())
1137 return own_fpu(1);
1138
1139 /*
1140 * This task is using or has previously used MSA. Thus we require
1141 * that Status.FR == 1.
1142 */
1143 was_fpu_owner = is_fpu_owner();
1144 err = own_fpu(0);
1145 if (err)
1146 return err;
1147
1148 enable_msa();
1149 write_msa_csr(current->thread.fpu.msacsr);
1150 set_thread_flag(TIF_USEDMSA);
1151
1152 /*
1153 * If this is the first time that the task is using MSA and it has
1154 * previously used scalar FP in this time slice then we already nave
1155 * FP context which we shouldn't clobber.
1156 */
1157 if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
1158 return 0;
1159
1160 /* We need to restore the vector context. */
1161 restore_msa(current);
1162 return 0;
1163}
1164
1077asmlinkage void do_cpu(struct pt_regs *regs) 1165asmlinkage void do_cpu(struct pt_regs *regs)
1078{ 1166{
1079 enum ctx_state prev_state; 1167 enum ctx_state prev_state;
@@ -1153,12 +1241,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1153 /* Fall through. */ 1241 /* Fall through. */
1154 1242
1155 case 1: 1243 case 1:
1156 if (used_math()) /* Using the FPU again. */ 1244 err = enable_restore_fp_context(0);
1157 err = own_fpu(1);
1158 else { /* First time FPU user. */
1159 err = init_fpu();
1160 set_used_math();
1161 }
1162 1245
1163 if (!raw_cpu_has_fpu || err) { 1246 if (!raw_cpu_has_fpu || err) {
1164 int sig; 1247 int sig;
@@ -1183,6 +1266,37 @@ out:
1183 exception_exit(prev_state); 1266 exception_exit(prev_state);
1184} 1267}
1185 1268
1269asmlinkage void do_msa_fpe(struct pt_regs *regs)
1270{
1271 enum ctx_state prev_state;
1272
1273 prev_state = exception_enter();
1274 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1275 force_sig(SIGFPE, current);
1276 exception_exit(prev_state);
1277}
1278
1279asmlinkage void do_msa(struct pt_regs *regs)
1280{
1281 enum ctx_state prev_state;
1282 int err;
1283
1284 prev_state = exception_enter();
1285
1286 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1287 force_sig(SIGILL, current);
1288 goto out;
1289 }
1290
1291 die_if_kernel("do_msa invoked from kernel context!", regs);
1292
1293 err = enable_restore_fp_context(1);
1294 if (err)
1295 force_sig(SIGILL, current);
1296out:
1297 exception_exit(prev_state);
1298}
1299
1186asmlinkage void do_mdmx(struct pt_regs *regs) 1300asmlinkage void do_mdmx(struct pt_regs *regs)
1187{ 1301{
1188 enum ctx_state prev_state; 1302 enum ctx_state prev_state;
@@ -1337,8 +1451,10 @@ static inline void parity_protection_init(void)
1337 case CPU_34K: 1451 case CPU_34K:
1338 case CPU_74K: 1452 case CPU_74K:
1339 case CPU_1004K: 1453 case CPU_1004K:
1454 case CPU_1074K:
1340 case CPU_INTERAPTIV: 1455 case CPU_INTERAPTIV:
1341 case CPU_PROAPTIV: 1456 case CPU_PROAPTIV:
1457 case CPU_P5600:
1342 { 1458 {
1343#define ERRCTL_PE 0x80000000 1459#define ERRCTL_PE 0x80000000
1344#define ERRCTL_L2P 0x00800000 1460#define ERRCTL_L2P 0x00800000
@@ -2017,6 +2133,7 @@ void __init trap_init(void)
2017 set_except_vector(11, handle_cpu); 2133 set_except_vector(11, handle_cpu);
2018 set_except_vector(12, handle_ov); 2134 set_except_vector(12, handle_ov);
2019 set_except_vector(13, handle_tr); 2135 set_except_vector(13, handle_tr);
2136 set_except_vector(14, handle_msa_fpe);
2020 2137
2021 if (current_cpu_type() == CPU_R6000 || 2138 if (current_cpu_type() == CPU_R6000 ||
2022 current_cpu_type() == CPU_R6000A) { 2139 current_cpu_type() == CPU_R6000A) {
@@ -2040,6 +2157,7 @@ void __init trap_init(void)
2040 set_except_vector(15, handle_fpe); 2157 set_except_vector(15, handle_fpe);
2041 2158
2042 set_except_vector(16, handle_ftlb); 2159 set_except_vector(16, handle_ftlb);
2160 set_except_vector(21, handle_msa);
2043 set_except_vector(22, handle_mdmx); 2161 set_except_vector(22, handle_mdmx);
2044 2162
2045 if (cpu_has_mcheck) 2163 if (cpu_has_mcheck)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index c369a5d35527..2b3517214d6d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
10 * 11 *
11 * This file contains exception handler for address error exception with the 12 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The 13 * special capability to execute faulting instructions in software. The
@@ -110,8 +111,8 @@ extern void show_registers(struct pt_regs *regs);
110#ifdef __BIG_ENDIAN 111#ifdef __BIG_ENDIAN
111#define LoadHW(addr, value, res) \ 112#define LoadHW(addr, value, res) \
112 __asm__ __volatile__ (".set\tnoat\n" \ 113 __asm__ __volatile__ (".set\tnoat\n" \
113 "1:\tlb\t%0, 0(%2)\n" \ 114 "1:\t"user_lb("%0", "0(%2)")"\n" \
114 "2:\tlbu\t$1, 1(%2)\n\t" \ 115 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
115 "sll\t%0, 0x8\n\t" \ 116 "sll\t%0, 0x8\n\t" \
116 "or\t%0, $1\n\t" \ 117 "or\t%0, $1\n\t" \
117 "li\t%1, 0\n" \ 118 "li\t%1, 0\n" \
@@ -130,8 +131,8 @@ extern void show_registers(struct pt_regs *regs);
130 131
131#define LoadW(addr, value, res) \ 132#define LoadW(addr, value, res) \
132 __asm__ __volatile__ ( \ 133 __asm__ __volatile__ ( \
133 "1:\tlwl\t%0, (%2)\n" \ 134 "1:\t"user_lwl("%0", "(%2)")"\n" \
134 "2:\tlwr\t%0, 3(%2)\n\t" \ 135 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
135 "li\t%1, 0\n" \ 136 "li\t%1, 0\n" \
136 "3:\n\t" \ 137 "3:\n\t" \
137 ".insn\n\t" \ 138 ".insn\n\t" \
@@ -149,8 +150,8 @@ extern void show_registers(struct pt_regs *regs);
149#define LoadHWU(addr, value, res) \ 150#define LoadHWU(addr, value, res) \
150 __asm__ __volatile__ ( \ 151 __asm__ __volatile__ ( \
151 ".set\tnoat\n" \ 152 ".set\tnoat\n" \
152 "1:\tlbu\t%0, 0(%2)\n" \ 153 "1:\t"user_lbu("%0", "0(%2)")"\n" \
153 "2:\tlbu\t$1, 1(%2)\n\t" \ 154 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
154 "sll\t%0, 0x8\n\t" \ 155 "sll\t%0, 0x8\n\t" \
155 "or\t%0, $1\n\t" \ 156 "or\t%0, $1\n\t" \
156 "li\t%1, 0\n" \ 157 "li\t%1, 0\n" \
@@ -170,8 +171,8 @@ extern void show_registers(struct pt_regs *regs);
170 171
171#define LoadWU(addr, value, res) \ 172#define LoadWU(addr, value, res) \
172 __asm__ __volatile__ ( \ 173 __asm__ __volatile__ ( \
173 "1:\tlwl\t%0, (%2)\n" \ 174 "1:\t"user_lwl("%0", "(%2)")"\n" \
174 "2:\tlwr\t%0, 3(%2)\n\t" \ 175 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
175 "dsll\t%0, %0, 32\n\t" \ 176 "dsll\t%0, %0, 32\n\t" \
176 "dsrl\t%0, %0, 32\n\t" \ 177 "dsrl\t%0, %0, 32\n\t" \
177 "li\t%1, 0\n" \ 178 "li\t%1, 0\n" \
@@ -209,9 +210,9 @@ extern void show_registers(struct pt_regs *regs);
209#define StoreHW(addr, value, res) \ 210#define StoreHW(addr, value, res) \
210 __asm__ __volatile__ ( \ 211 __asm__ __volatile__ ( \
211 ".set\tnoat\n" \ 212 ".set\tnoat\n" \
212 "1:\tsb\t%1, 1(%2)\n\t" \ 213 "1:\t"user_sb("%1", "1(%2)")"\n" \
213 "srl\t$1, %1, 0x8\n" \ 214 "srl\t$1, %1, 0x8\n" \
214 "2:\tsb\t$1, 0(%2)\n\t" \ 215 "2:\t"user_sb("$1", "0(%2)")"\n" \
215 ".set\tat\n\t" \ 216 ".set\tat\n\t" \
216 "li\t%0, 0\n" \ 217 "li\t%0, 0\n" \
217 "3:\n\t" \ 218 "3:\n\t" \
@@ -229,8 +230,8 @@ extern void show_registers(struct pt_regs *regs);
229 230
230#define StoreW(addr, value, res) \ 231#define StoreW(addr, value, res) \
231 __asm__ __volatile__ ( \ 232 __asm__ __volatile__ ( \
232 "1:\tswl\t%1,(%2)\n" \ 233 "1:\t"user_swl("%1", "(%2)")"\n" \
233 "2:\tswr\t%1, 3(%2)\n\t" \ 234 "2:\t"user_swr("%1", "3(%2)")"\n\t" \
234 "li\t%0, 0\n" \ 235 "li\t%0, 0\n" \
235 "3:\n\t" \ 236 "3:\n\t" \
236 ".insn\n\t" \ 237 ".insn\n\t" \
@@ -267,8 +268,8 @@ extern void show_registers(struct pt_regs *regs);
267#ifdef __LITTLE_ENDIAN 268#ifdef __LITTLE_ENDIAN
268#define LoadHW(addr, value, res) \ 269#define LoadHW(addr, value, res) \
269 __asm__ __volatile__ (".set\tnoat\n" \ 270 __asm__ __volatile__ (".set\tnoat\n" \
270 "1:\tlb\t%0, 1(%2)\n" \ 271 "1:\t"user_lb("%0", "1(%2)")"\n" \
271 "2:\tlbu\t$1, 0(%2)\n\t" \ 272 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
272 "sll\t%0, 0x8\n\t" \ 273 "sll\t%0, 0x8\n\t" \
273 "or\t%0, $1\n\t" \ 274 "or\t%0, $1\n\t" \
274 "li\t%1, 0\n" \ 275 "li\t%1, 0\n" \
@@ -287,8 +288,8 @@ extern void show_registers(struct pt_regs *regs);
287 288
288#define LoadW(addr, value, res) \ 289#define LoadW(addr, value, res) \
289 __asm__ __volatile__ ( \ 290 __asm__ __volatile__ ( \
290 "1:\tlwl\t%0, 3(%2)\n" \ 291 "1:\t"user_lwl("%0", "3(%2)")"\n" \
291 "2:\tlwr\t%0, (%2)\n\t" \ 292 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
292 "li\t%1, 0\n" \ 293 "li\t%1, 0\n" \
293 "3:\n\t" \ 294 "3:\n\t" \
294 ".insn\n\t" \ 295 ".insn\n\t" \
@@ -306,8 +307,8 @@ extern void show_registers(struct pt_regs *regs);
306#define LoadHWU(addr, value, res) \ 307#define LoadHWU(addr, value, res) \
307 __asm__ __volatile__ ( \ 308 __asm__ __volatile__ ( \
308 ".set\tnoat\n" \ 309 ".set\tnoat\n" \
309 "1:\tlbu\t%0, 1(%2)\n" \ 310 "1:\t"user_lbu("%0", "1(%2)")"\n" \
310 "2:\tlbu\t$1, 0(%2)\n\t" \ 311 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
311 "sll\t%0, 0x8\n\t" \ 312 "sll\t%0, 0x8\n\t" \
312 "or\t%0, $1\n\t" \ 313 "or\t%0, $1\n\t" \
313 "li\t%1, 0\n" \ 314 "li\t%1, 0\n" \
@@ -327,8 +328,8 @@ extern void show_registers(struct pt_regs *regs);
327 328
328#define LoadWU(addr, value, res) \ 329#define LoadWU(addr, value, res) \
329 __asm__ __volatile__ ( \ 330 __asm__ __volatile__ ( \
330 "1:\tlwl\t%0, 3(%2)\n" \ 331 "1:\t"user_lwl("%0", "3(%2)")"\n" \
331 "2:\tlwr\t%0, (%2)\n\t" \ 332 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
332 "dsll\t%0, %0, 32\n\t" \ 333 "dsll\t%0, %0, 32\n\t" \
333 "dsrl\t%0, %0, 32\n\t" \ 334 "dsrl\t%0, %0, 32\n\t" \
334 "li\t%1, 0\n" \ 335 "li\t%1, 0\n" \
@@ -366,9 +367,9 @@ extern void show_registers(struct pt_regs *regs);
366#define StoreHW(addr, value, res) \ 367#define StoreHW(addr, value, res) \
367 __asm__ __volatile__ ( \ 368 __asm__ __volatile__ ( \
368 ".set\tnoat\n" \ 369 ".set\tnoat\n" \
369 "1:\tsb\t%1, 0(%2)\n\t" \ 370 "1:\t"user_sb("%1", "0(%2)")"\n" \
370 "srl\t$1,%1, 0x8\n" \ 371 "srl\t$1,%1, 0x8\n" \
371 "2:\tsb\t$1, 1(%2)\n\t" \ 372 "2:\t"user_sb("$1", "1(%2)")"\n" \
372 ".set\tat\n\t" \ 373 ".set\tat\n\t" \
373 "li\t%0, 0\n" \ 374 "li\t%0, 0\n" \
374 "3:\n\t" \ 375 "3:\n\t" \
@@ -386,8 +387,8 @@ extern void show_registers(struct pt_regs *regs);
386 387
387#define StoreW(addr, value, res) \ 388#define StoreW(addr, value, res) \
388 __asm__ __volatile__ ( \ 389 __asm__ __volatile__ ( \
389 "1:\tswl\t%1, 3(%2)\n" \ 390 "1:\t"user_swl("%1", "3(%2)")"\n" \
390 "2:\tswr\t%1, (%2)\n\t" \ 391 "2:\t"user_swr("%1", "(%2)")"\n\t" \
391 "li\t%0, 0\n" \ 392 "li\t%0, 0\n" \
392 "3:\n\t" \ 393 "3:\n\t" \
393 ".insn\n\t" \ 394 ".insn\n\t" \
@@ -430,7 +431,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
430 unsigned long origpc; 431 unsigned long origpc;
431 unsigned long orig31; 432 unsigned long orig31;
432 void __user *fault_addr = NULL; 433 void __user *fault_addr = NULL;
433 434#ifdef CONFIG_EVA
435 mm_segment_t seg;
436#endif
434 origpc = (unsigned long)pc; 437 origpc = (unsigned long)pc;
435 orig31 = regs->regs[31]; 438 orig31 = regs->regs[31];
436 439
@@ -475,6 +478,88 @@ static void emulate_load_store_insn(struct pt_regs *regs,
475 * The remaining opcodes are the ones that are really of 478 * The remaining opcodes are the ones that are really of
476 * interest. 479 * interest.
477 */ 480 */
481#ifdef CONFIG_EVA
482 case spec3_op:
483 /*
484 * we can land here only from kernel accessing user memory,
485 * so we need to "switch" the address limit to user space, so
486 * address check can work properly.
487 */
488 seg = get_fs();
489 set_fs(USER_DS);
490 switch (insn.spec3_format.func) {
491 case lhe_op:
492 if (!access_ok(VERIFY_READ, addr, 2)) {
493 set_fs(seg);
494 goto sigbus;
495 }
496 LoadHW(addr, value, res);
497 if (res) {
498 set_fs(seg);
499 goto fault;
500 }
501 compute_return_epc(regs);
502 regs->regs[insn.spec3_format.rt] = value;
503 break;
504 case lwe_op:
505 if (!access_ok(VERIFY_READ, addr, 4)) {
506 set_fs(seg);
507 goto sigbus;
508 }
509 LoadW(addr, value, res);
510 if (res) {
511 set_fs(seg);
512 goto fault;
513 }
514 compute_return_epc(regs);
515 regs->regs[insn.spec3_format.rt] = value;
516 break;
517 case lhue_op:
518 if (!access_ok(VERIFY_READ, addr, 2)) {
519 set_fs(seg);
520 goto sigbus;
521 }
522 LoadHWU(addr, value, res);
523 if (res) {
524 set_fs(seg);
525 goto fault;
526 }
527 compute_return_epc(regs);
528 regs->regs[insn.spec3_format.rt] = value;
529 break;
530 case she_op:
531 if (!access_ok(VERIFY_WRITE, addr, 2)) {
532 set_fs(seg);
533 goto sigbus;
534 }
535 compute_return_epc(regs);
536 value = regs->regs[insn.spec3_format.rt];
537 StoreHW(addr, value, res);
538 if (res) {
539 set_fs(seg);
540 goto fault;
541 }
542 break;
543 case swe_op:
544 if (!access_ok(VERIFY_WRITE, addr, 4)) {
545 set_fs(seg);
546 goto sigbus;
547 }
548 compute_return_epc(regs);
549 value = regs->regs[insn.spec3_format.rt];
550 StoreW(addr, value, res);
551 if (res) {
552 set_fs(seg);
553 goto fault;
554 }
555 break;
556 default:
557 set_fs(seg);
558 goto sigill;
559 }
560 set_fs(seg);
561 break;
562#endif
478 case lh_op: 563 case lh_op:
479 if (!access_ok(VERIFY_READ, addr, 2)) 564 if (!access_ok(VERIFY_READ, addr, 2))
480 goto sigbus; 565 goto sigbus;