aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c14
-rw-r--r--arch/mips/kernel/bmips_vec.S56
-rw-r--r--arch/mips/kernel/cpu-probe.c103
-rw-r--r--arch/mips/kernel/crash.c1
-rw-r--r--arch/mips/kernel/ftrace.c5
-rw-r--r--arch/mips/kernel/genex.S1
-rw-r--r--arch/mips/kernel/idle.c2
-rw-r--r--arch/mips/kernel/proc.c48
-rw-r--r--arch/mips/kernel/process.c6
-rw-r--r--arch/mips/kernel/ptrace.c60
-rw-r--r--arch/mips/kernel/ptrace32.c53
-rw-r--r--arch/mips/kernel/r4k_fpu.S82
-rw-r--r--arch/mips/kernel/r4k_switch.S45
-rw-r--r--arch/mips/kernel/rtlx-cmp.c119
-rw-r--r--arch/mips/kernel/rtlx-mt.c151
-rw-r--r--arch/mips/kernel/rtlx.c273
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/segment.c110
-rw-r--r--arch/mips/kernel/signal.c10
-rw-r--r--arch/mips/kernel/signal32.c10
-rw-r--r--arch/mips/kernel/smp-bmips.c312
-rw-r--r--arch/mips/kernel/smp-cmp.c3
-rw-r--r--arch/mips/kernel/smp-mt.c28
-rw-r--r--arch/mips/kernel/spram.c3
-rw-r--r--arch/mips/kernel/sync-r4k.c1
-rw-r--r--arch/mips/kernel/traps.c71
-rw-r--r--arch/mips/kernel/vpe-cmp.c180
-rw-r--r--arch/mips/kernel/vpe-mt.c523
-rw-r--r--arch/mips/kernel/vpe.c910
33 files changed, 1941 insertions, 1252 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 1c1b71752c84..26c6175e1379 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
30obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 30obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
31obj-$(CONFIG_SYNC_R4K) += sync-r4k.o 31obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
32 32
33obj-$(CONFIG_DEBUG_FS) += segment.o
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 34obj-$(CONFIG_STACKTRACE) += stacktrace.o
34obj-$(CONFIG_MODULES) += mips_ksyms.o module.o 35obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
35obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o 36obj-$(CONFIG_MODULES_USE_ELF_RELA) += module-rela.o
@@ -55,7 +56,11 @@ obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
55obj-$(CONFIG_CPU_MIPSR2) += spram.o 56obj-$(CONFIG_CPU_MIPSR2) += spram.o
56 57
57obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 58obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
59obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
60obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
58obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o 61obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
62obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
63obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
59 64
60obj-$(CONFIG_I8259) += i8259.o 65obj-$(CONFIG_I8259) += i8259.o
61obj-$(CONFIG_IRQ_CPU) += irq_cpu.o 66obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 202e581e6096..7faf5f2bee25 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -28,6 +28,18 @@ typedef double elf_fpreg_t;
28typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; 28typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
29 29
30/* 30/*
31 * In order to be sure that we don't attempt to execute an O32 binary which
32 * requires 64 bit FP (FR=1) on a system which does not support it we refuse
33 * to execute any binary which has bits specified by the following macro set
34 * in its ELF header flags.
35 */
36#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
37# define __MIPS_O32_FP64_MUST_BE_ZERO 0
38#else
39# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
40#endif
41
42/*
31 * This is used to ensure we don't load something for the wrong architecture. 43 * This is used to ensure we don't load something for the wrong architecture.
32 */ 44 */
33#define elf_check_arch(hdr) \ 45#define elf_check_arch(hdr) \
@@ -44,6 +56,8 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
44 if (((__h->e_flags & EF_MIPS_ABI) != 0) && \ 56 if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
45 ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \ 57 ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
46 __res = 0; \ 58 __res = 0; \
59 if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
60 __res = 0; \
47 \ 61 \
48 __res; \ 62 __res; \
49}) 63})
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index bd79c4f9bff4..a5bf73d22fcc 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -8,11 +8,11 @@
8 * Reset/NMI/re-entry vectors for BMIPS processors 8 * Reset/NMI/re-entry vectors for BMIPS processors
9 */ 9 */
10 10
11#include <linux/init.h>
12 11
13#include <asm/asm.h> 12#include <asm/asm.h>
14#include <asm/asmmacro.h> 13#include <asm/asmmacro.h>
15#include <asm/cacheops.h> 14#include <asm/cacheops.h>
15#include <asm/cpu.h>
16#include <asm/regdef.h> 16#include <asm/regdef.h>
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/stackframe.h> 18#include <asm/stackframe.h>
@@ -91,12 +91,18 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
91 beqz k0, bmips_smp_entry 91 beqz k0, bmips_smp_entry
92 92
93#if defined(CONFIG_CPU_BMIPS5000) 93#if defined(CONFIG_CPU_BMIPS5000)
94 mfc0 k0, CP0_PRID
95 li k1, PRID_IMP_BMIPS5000
96 andi k0, 0xff00
97 bne k0, k1, 1f
98
94 /* if we're not on core 0, this must be the SMP boot signal */ 99 /* if we're not on core 0, this must be the SMP boot signal */
95 li k1, (3 << 25) 100 li k1, (3 << 25)
96 mfc0 k0, $22 101 mfc0 k0, $22
97 and k0, k1 102 and k0, k1
98 bnez k0, bmips_smp_entry 103 bnez k0, bmips_smp_entry
99#endif 1041:
105#endif /* CONFIG_CPU_BMIPS5000 */
100#endif /* CONFIG_SMP */ 106#endif /* CONFIG_SMP */
101 107
102 /* nope, it's just a regular NMI */ 108 /* nope, it's just a regular NMI */
@@ -139,7 +145,12 @@ bmips_smp_entry:
139 xori k0, 0x04 145 xori k0, 0x04
140 mtc0 k0, CP0_CONFIG 146 mtc0 k0, CP0_CONFIG
141 147
148 mfc0 k0, CP0_PRID
149 andi k0, 0xff00
142#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 150#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
151 li k1, PRID_IMP_BMIPS43XX
152 bne k0, k1, 2f
153
143 /* initialize CPU1's local I-cache */ 154 /* initialize CPU1's local I-cache */
144 li k0, 0x80000000 155 li k0, 0x80000000
145 li k1, 0x80010000 156 li k1, 0x80010000
@@ -150,14 +161,21 @@ bmips_smp_entry:
1501: cache Index_Store_Tag_I, 0(k0) 1611: cache Index_Store_Tag_I, 0(k0)
151 addiu k0, 16 162 addiu k0, 16
152 bne k0, k1, 1b 163 bne k0, k1, 1b
153#elif defined(CONFIG_CPU_BMIPS5000) 164
165 b 3f
1662:
167#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
168#if defined(CONFIG_CPU_BMIPS5000)
154 /* set exception vector base */ 169 /* set exception vector base */
170 li k1, PRID_IMP_BMIPS5000
171 bne k0, k1, 3f
172
155 la k0, ebase 173 la k0, ebase
156 lw k0, 0(k0) 174 lw k0, 0(k0)
157 mtc0 k0, $15, 1 175 mtc0 k0, $15, 1
158 BARRIER 176 BARRIER
159#endif 177#endif /* CONFIG_CPU_BMIPS5000 */
160 1783:
161 /* jump back to kseg0 in case we need to remap the kseg1 area */ 179 /* jump back to kseg0 in case we need to remap the kseg1 area */
162 la k0, 1f 180 la k0, 1f
163 jr k0 181 jr k0
@@ -221,8 +239,18 @@ END(bmips_smp_int_vec)
221LEAF(bmips_enable_xks01) 239LEAF(bmips_enable_xks01)
222 240
223#if defined(CONFIG_XKS01) 241#if defined(CONFIG_XKS01)
224 242 mfc0 t0, CP0_PRID
243 andi t2, t0, 0xff00
225#if defined(CONFIG_CPU_BMIPS4380) 244#if defined(CONFIG_CPU_BMIPS4380)
245 li t1, PRID_IMP_BMIPS43XX
246 bne t2, t1, 1f
247
248 andi t0, 0xff
249 addiu t1, t0, -PRID_REV_BMIPS4380_HI
250 bgtz t1, 2f
251 addiu t0, -PRID_REV_BMIPS4380_LO
252 bltz t0, 2f
253
226 mfc0 t0, $22, 3 254 mfc0 t0, $22, 3
227 li t1, 0x1ff0 255 li t1, 0x1ff0
228 li t2, (1 << 12) | (1 << 9) 256 li t2, (1 << 12) | (1 << 9)
@@ -231,7 +259,13 @@ LEAF(bmips_enable_xks01)
231 or t0, t2 259 or t0, t2
232 mtc0 t0, $22, 3 260 mtc0 t0, $22, 3
233 BARRIER 261 BARRIER
234#elif defined(CONFIG_CPU_BMIPS5000) 262 b 2f
2631:
264#endif /* CONFIG_CPU_BMIPS4380 */
265#if defined(CONFIG_CPU_BMIPS5000)
266 li t1, PRID_IMP_BMIPS5000
267 bne t2, t1, 2f
268
235 mfc0 t0, $22, 5 269 mfc0 t0, $22, 5
236 li t1, 0x01ff 270 li t1, 0x01ff
237 li t2, (1 << 8) | (1 << 5) 271 li t2, (1 << 8) | (1 << 5)
@@ -240,12 +274,8 @@ LEAF(bmips_enable_xks01)
240 or t0, t2 274 or t0, t2
241 mtc0 t0, $22, 5 275 mtc0 t0, $22, 5
242 BARRIER 276 BARRIER
243#else 277#endif /* CONFIG_CPU_BMIPS5000 */
244 2782:
245#error Missing XKS01 setup
246
247#endif
248
249#endif /* defined(CONFIG_XKS01) */ 279#endif /* defined(CONFIG_XKS01) */
250 280
251 jr ra 281 jr ra
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c814287bdf5d..530f832de02c 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -112,7 +112,7 @@ static inline unsigned long cpu_get_fpu_id(void)
112 unsigned long tmp, fpu_id; 112 unsigned long tmp, fpu_id;
113 113
114 tmp = read_c0_status(); 114 tmp = read_c0_status();
115 __enable_fpu(); 115 __enable_fpu(FPU_AS_IS);
116 fpu_id = read_32bit_cp1_register(CP1_REVISION); 116 fpu_id = read_32bit_cp1_register(CP1_REVISION);
117 write_c0_status(tmp); 117 write_c0_status(tmp);
118 return fpu_id; 118 return fpu_id;
@@ -163,6 +163,25 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
163static char unknown_isa[] = KERN_ERR \ 163static char unknown_isa[] = KERN_ERR \
164 "Unsupported ISA type, c0.config0: %d."; 164 "Unsupported ISA type, c0.config0: %d.";
165 165
166static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
167{
168 unsigned int config6;
169 /*
170 * Config6 is implementation dependent and it's currently only
171 * used by proAptiv
172 */
173 if (c->cputype == CPU_PROAPTIV) {
174 config6 = read_c0_config6();
175 if (enable)
176 /* Enable FTLB */
177 write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
178 else
179 /* Disable FTLB */
180 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
181 back_to_back_c0_hazard();
182 }
183}
184
166static inline unsigned int decode_config0(struct cpuinfo_mips *c) 185static inline unsigned int decode_config0(struct cpuinfo_mips *c)
167{ 186{
168 unsigned int config0; 187 unsigned int config0;
@@ -170,8 +189,13 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
170 189
171 config0 = read_c0_config(); 190 config0 = read_c0_config();
172 191
173 if (((config0 & MIPS_CONF_MT) >> 7) == 1) 192 /*
193 * Look for Standard TLB or Dual VTLB and FTLB
194 */
195 if ((((config0 & MIPS_CONF_MT) >> 7) == 1) ||
196 (((config0 & MIPS_CONF_MT) >> 7) == 4))
174 c->options |= MIPS_CPU_TLB; 197 c->options |= MIPS_CPU_TLB;
198
175 isa = (config0 & MIPS_CONF_AT) >> 13; 199 isa = (config0 & MIPS_CONF_AT) >> 13;
176 switch (isa) { 200 switch (isa) {
177 case 0: 201 case 0:
@@ -226,8 +250,11 @@ static inline unsigned int decode_config1(struct cpuinfo_mips *c)
226 c->options |= MIPS_CPU_FPU; 250 c->options |= MIPS_CPU_FPU;
227 c->options |= MIPS_CPU_32FPR; 251 c->options |= MIPS_CPU_32FPR;
228 } 252 }
229 if (cpu_has_tlb) 253 if (cpu_has_tlb) {
230 c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; 254 c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
255 c->tlbsizevtlb = c->tlbsize;
256 c->tlbsizeftlbsets = 0;
257 }
231 258
232 return config1 & MIPS_CONF_M; 259 return config1 & MIPS_CONF_M;
233} 260}
@@ -272,6 +299,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
272 c->options |= MIPS_CPU_MICROMIPS; 299 c->options |= MIPS_CPU_MICROMIPS;
273 if (config3 & MIPS_CONF3_VZ) 300 if (config3 & MIPS_CONF3_VZ)
274 c->ases |= MIPS_ASE_VZ; 301 c->ases |= MIPS_ASE_VZ;
302 if (config3 & MIPS_CONF3_SC)
303 c->options |= MIPS_CPU_SEGMENTS;
275 304
276 return config3 & MIPS_CONF_M; 305 return config3 & MIPS_CONF_M;
277} 306}
@@ -279,12 +308,51 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
279static inline unsigned int decode_config4(struct cpuinfo_mips *c) 308static inline unsigned int decode_config4(struct cpuinfo_mips *c)
280{ 309{
281 unsigned int config4; 310 unsigned int config4;
311 unsigned int newcf4;
312 unsigned int mmuextdef;
313 unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
282 314
283 config4 = read_c0_config4(); 315 config4 = read_c0_config4();
284 316
285 if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT 317 if (cpu_has_tlb) {
286 && cpu_has_tlb) 318 if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
287 c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; 319 c->options |= MIPS_CPU_TLBINV;
320 mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
321 switch (mmuextdef) {
322 case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
323 c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
324 c->tlbsizevtlb = c->tlbsize;
325 break;
326 case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
327 c->tlbsizevtlb +=
328 ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
329 MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40;
330 c->tlbsize = c->tlbsizevtlb;
331 ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
332 /* fall through */
333 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
334 newcf4 = (config4 & ~ftlb_page) |
335 (page_size_ftlb(mmuextdef) <<
336 MIPS_CONF4_FTLBPAGESIZE_SHIFT);
337 write_c0_config4(newcf4);
338 back_to_back_c0_hazard();
339 config4 = read_c0_config4();
340 if (config4 != newcf4) {
341 pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n",
342 PAGE_SIZE, config4);
343 /* Switch FTLB off */
344 set_ftlb_enable(c, 0);
345 break;
346 }
347 c->tlbsizeftlbsets = 1 <<
348 ((config4 & MIPS_CONF4_FTLBSETS) >>
349 MIPS_CONF4_FTLBSETS_SHIFT);
350 c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
351 MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
352 c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
353 break;
354 }
355 }
288 356
289 c->kscratch_mask = (config4 >> 16) & 0xff; 357 c->kscratch_mask = (config4 >> 16) & 0xff;
290 358
@@ -312,6 +380,9 @@ static void decode_configs(struct cpuinfo_mips *c)
312 380
313 c->scache.flags = MIPS_CACHE_NOT_PRESENT; 381 c->scache.flags = MIPS_CACHE_NOT_PRESENT;
314 382
383 /* Enable FTLB if present */
384 set_ftlb_enable(c, 1);
385
315 ok = decode_config0(c); /* Read Config registers. */ 386 ok = decode_config0(c); /* Read Config registers. */
316 BUG_ON(!ok); /* Arch spec violation! */ 387 BUG_ON(!ok); /* Arch spec violation! */
317 if (ok) 388 if (ok)
@@ -675,7 +746,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
675 746
676static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) 747static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
677{ 748{
678 decode_configs(c);
679 switch (c->processor_id & PRID_IMP_MASK) { 749 switch (c->processor_id & PRID_IMP_MASK) {
680 case PRID_IMP_4KC: 750 case PRID_IMP_4KC:
681 c->cputype = CPU_4KC; 751 c->cputype = CPU_4KC;
@@ -739,8 +809,26 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
739 c->cputype = CPU_74K; 809 c->cputype = CPU_74K;
740 __cpu_name[cpu] = "MIPS 1074Kc"; 810 __cpu_name[cpu] = "MIPS 1074Kc";
741 break; 811 break;
812 case PRID_IMP_INTERAPTIV_UP:
813 c->cputype = CPU_INTERAPTIV;
814 __cpu_name[cpu] = "MIPS interAptiv";
815 break;
816 case PRID_IMP_INTERAPTIV_MP:
817 c->cputype = CPU_INTERAPTIV;
818 __cpu_name[cpu] = "MIPS interAptiv (multi)";
819 break;
820 case PRID_IMP_PROAPTIV_UP:
821 c->cputype = CPU_PROAPTIV;
822 __cpu_name[cpu] = "MIPS proAptiv";
823 break;
824 case PRID_IMP_PROAPTIV_MP:
825 c->cputype = CPU_PROAPTIV;
826 __cpu_name[cpu] = "MIPS proAptiv (multi)";
827 break;
742 } 828 }
743 829
830 decode_configs(c);
831
744 spram_config(); 832 spram_config();
745} 833}
746 834
@@ -943,6 +1031,7 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
943 1031
944 switch (c->processor_id & PRID_IMP_MASK) { 1032 switch (c->processor_id & PRID_IMP_MASK) {
945 case PRID_IMP_NETLOGIC_XLP2XX: 1033 case PRID_IMP_NETLOGIC_XLP2XX:
1034 case PRID_IMP_NETLOGIC_XLP9XX:
946 c->cputype = CPU_XLP; 1035 c->cputype = CPU_XLP;
947 __cpu_name[cpu] = "Broadcom XLPII"; 1036 __cpu_name[cpu] = "Broadcom XLPII";
948 break; 1037 break;
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 93aa302948d7..d21264681e97 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -5,7 +5,6 @@
5#include <linux/bootmem.h> 5#include <linux/bootmem.h>
6#include <linux/crash_dump.h> 6#include <linux/crash_dump.h>
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/init.h>
9#include <linux/irq.h> 8#include <linux/irq.h>
10#include <linux/types.h> 9#include <linux/types.h>
11#include <linux/sched.h> 10#include <linux/sched.h>
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 185ba258361b..374ed74cd516 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
111 safe_store_code(new_code1, ip, faulted); 111 safe_store_code(new_code1, ip, faulted);
112 if (unlikely(faulted)) 112 if (unlikely(faulted))
113 return -EFAULT; 113 return -EFAULT;
114 ip += 4; 114 safe_store_code(new_code2, ip + 4, faulted);
115 safe_store_code(new_code2, ip, faulted);
116 if (unlikely(faulted)) 115 if (unlikely(faulted))
117 return -EFAULT; 116 return -EFAULT;
118 flush_icache_range(ip, ip + 8); /* original ip + 12 */ 117 flush_icache_range(ip, ip + 8);
119 return 0; 118 return 0;
120} 119}
121#endif 120#endif
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 47d7583cd67f..d84f6a509502 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -476,6 +476,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
476 BUILD_HANDLER ov ov sti silent /* #12 */ 476 BUILD_HANDLER ov ov sti silent /* #12 */
477 BUILD_HANDLER tr tr sti silent /* #13 */ 477 BUILD_HANDLER tr tr sti silent /* #13 */
478 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 478 BUILD_HANDLER fpe fpe fpe silent /* #15 */
479 BUILD_HANDLER ftlb ftlb none silent /* #16 */
479 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 480 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
480#ifdef CONFIG_HARDWARE_WATCHPOINTS 481#ifdef CONFIG_HARDWARE_WATCHPOINTS
481 /* 482 /*
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index f7991d95bff9..3553243bf9d6 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -184,6 +184,8 @@ void __init check_wait(void)
184 case CPU_24K: 184 case CPU_24K:
185 case CPU_34K: 185 case CPU_34K:
186 case CPU_1004K: 186 case CPU_1004K:
187 case CPU_INTERAPTIV:
188 case CPU_PROAPTIV:
187 cpu_wait = r4k_wait; 189 cpu_wait = r4k_wait;
188 if (read_c0_config7() & MIPS_CONF7_WII) 190 if (read_c0_config7() & MIPS_CONF7_WII)
189 cpu_wait = r4k_wait_irqoff; 191 cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 8c58d8a84bf3..00d20974b3e7 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -65,26 +65,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
65 cpu_data[n].watch_reg_masks[i]); 65 cpu_data[n].watch_reg_masks[i]);
66 seq_printf(m, "]\n"); 66 seq_printf(m, "]\n");
67 } 67 }
68 if (cpu_has_mips_r) { 68
69 seq_printf(m, "isa\t\t\t: mips1"); 69 seq_printf(m, "isa\t\t\t: mips1");
70 if (cpu_has_mips_2) 70 if (cpu_has_mips_2)
71 seq_printf(m, "%s", " mips2"); 71 seq_printf(m, "%s", " mips2");
72 if (cpu_has_mips_3) 72 if (cpu_has_mips_3)
73 seq_printf(m, "%s", " mips3"); 73 seq_printf(m, "%s", " mips3");
74 if (cpu_has_mips_4) 74 if (cpu_has_mips_4)
75 seq_printf(m, "%s", " mips4"); 75 seq_printf(m, "%s", " mips4");
76 if (cpu_has_mips_5) 76 if (cpu_has_mips_5)
77 seq_printf(m, "%s", " mips5"); 77 seq_printf(m, "%s", " mips5");
78 if (cpu_has_mips32r1) 78 if (cpu_has_mips32r1)
79 seq_printf(m, "%s", " mips32r1"); 79 seq_printf(m, "%s", " mips32r1");
80 if (cpu_has_mips32r2) 80 if (cpu_has_mips32r2)
81 seq_printf(m, "%s", " mips32r2"); 81 seq_printf(m, "%s", " mips32r2");
82 if (cpu_has_mips64r1) 82 if (cpu_has_mips64r1)
83 seq_printf(m, "%s", " mips64r1"); 83 seq_printf(m, "%s", " mips64r1");
84 if (cpu_has_mips64r2) 84 if (cpu_has_mips64r2)
85 seq_printf(m, "%s", " mips64r2"); 85 seq_printf(m, "%s", " mips64r2");
86 seq_printf(m, "\n"); 86 seq_printf(m, "\n");
87 }
88 87
89 seq_printf(m, "ASEs implemented\t:"); 88 seq_printf(m, "ASEs implemented\t:");
90 if (cpu_has_mips16) seq_printf(m, "%s", " mips16"); 89 if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
@@ -107,7 +106,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
107 seq_printf(m, "kscratch registers\t: %d\n", 106 seq_printf(m, "kscratch registers\t: %d\n",
108 hweight8(cpu_data[n].kscratch_mask)); 107 hweight8(cpu_data[n].kscratch_mask));
109 seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); 108 seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
110 109#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
110 if (cpu_has_mipsmt) {
111 seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
112#if defined(CONFIG_MIPS_MT_SMTC)
113 seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
114#endif
115 }
116#endif
111 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", 117 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
112 cpu_has_vce ? "%u" : "not available"); 118 cpu_has_vce ? "%u" : "not available");
113 seq_printf(m, fmt, 'D', vced_count); 119 seq_printf(m, fmt, 'D', vced_count);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ddc76103e78c..6ae540e133b2 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -60,15 +60,11 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
60 60
61 /* New thread loses kernel privileges. */ 61 /* New thread loses kernel privileges. */
62 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); 62 status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
63#ifdef CONFIG_64BIT
64 status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
65#endif
66 status |= KU_USER; 63 status |= KU_USER;
67 regs->cp0_status = status; 64 regs->cp0_status = status;
68 clear_used_math(); 65 clear_used_math();
69 clear_fpu_owner(); 66 clear_fpu_owner();
70 if (cpu_has_dsp) 67 init_dsp();
71 __init_dsp();
72 regs->cp0_epc = pc; 68 regs->cp0_epc = pc;
73 regs->regs[29] = sp; 69 regs->regs[29] = sp;
74} 70}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index b52e1d2b33e0..7da9b76db4d9 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -137,13 +137,13 @@ int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
137 if (cpu_has_mipsmt) { 137 if (cpu_has_mipsmt) {
138 unsigned int vpflags = dvpe(); 138 unsigned int vpflags = dvpe();
139 flags = read_c0_status(); 139 flags = read_c0_status();
140 __enable_fpu(); 140 __enable_fpu(FPU_AS_IS);
141 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); 141 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
142 write_c0_status(flags); 142 write_c0_status(flags);
143 evpe(vpflags); 143 evpe(vpflags);
144 } else { 144 } else {
145 flags = read_c0_status(); 145 flags = read_c0_status();
146 __enable_fpu(); 146 __enable_fpu(FPU_AS_IS);
147 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp)); 147 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
148 write_c0_status(flags); 148 write_c0_status(flags);
149 } 149 }
@@ -408,6 +408,7 @@ long arch_ptrace(struct task_struct *child, long request,
408 /* Read the word at location addr in the USER area. */ 408 /* Read the word at location addr in the USER area. */
409 case PTRACE_PEEKUSR: { 409 case PTRACE_PEEKUSR: {
410 struct pt_regs *regs; 410 struct pt_regs *regs;
411 fpureg_t *fregs;
411 unsigned long tmp = 0; 412 unsigned long tmp = 0;
412 413
413 regs = task_pt_regs(child); 414 regs = task_pt_regs(child);
@@ -418,26 +419,28 @@ long arch_ptrace(struct task_struct *child, long request,
418 tmp = regs->regs[addr]; 419 tmp = regs->regs[addr];
419 break; 420 break;
420 case FPR_BASE ... FPR_BASE + 31: 421 case FPR_BASE ... FPR_BASE + 31:
421 if (tsk_used_math(child)) { 422 if (!tsk_used_math(child)) {
422 fpureg_t *fregs = get_fpu_regs(child); 423 /* FP not yet used */
424 tmp = -1;
425 break;
426 }
427 fregs = get_fpu_regs(child);
423 428
424#ifdef CONFIG_32BIT 429#ifdef CONFIG_32BIT
430 if (test_thread_flag(TIF_32BIT_FPREGS)) {
425 /* 431 /*
426 * The odd registers are actually the high 432 * The odd registers are actually the high
427 * order bits of the values stored in the even 433 * order bits of the values stored in the even
428 * registers - unless we're using r2k_switch.S. 434 * registers - unless we're using r2k_switch.S.
429 */ 435 */
430 if (addr & 1) 436 if (addr & 1)
431 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); 437 tmp = fregs[(addr & ~1) - 32] >> 32;
432 else 438 else
433 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); 439 tmp = fregs[addr - 32];
434#endif 440 break;
435#ifdef CONFIG_64BIT
436 tmp = fregs[addr - FPR_BASE];
437#endif
438 } else {
439 tmp = -1; /* FP not yet used */
440 } 441 }
442#endif
443 tmp = fregs[addr - FPR_BASE];
441 break; 444 break;
442 case PC: 445 case PC:
443 tmp = regs->cp0_epc; 446 tmp = regs->cp0_epc;
@@ -483,13 +486,13 @@ long arch_ptrace(struct task_struct *child, long request,
483 if (cpu_has_mipsmt) { 486 if (cpu_has_mipsmt) {
484 unsigned int vpflags = dvpe(); 487 unsigned int vpflags = dvpe();
485 flags = read_c0_status(); 488 flags = read_c0_status();
486 __enable_fpu(); 489 __enable_fpu(FPU_AS_IS);
487 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 490 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
488 write_c0_status(flags); 491 write_c0_status(flags);
489 evpe(vpflags); 492 evpe(vpflags);
490 } else { 493 } else {
491 flags = read_c0_status(); 494 flags = read_c0_status();
492 __enable_fpu(); 495 __enable_fpu(FPU_AS_IS);
493 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 496 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
494 write_c0_status(flags); 497 write_c0_status(flags);
495 } 498 }
@@ -554,22 +557,25 @@ long arch_ptrace(struct task_struct *child, long request,
554 child->thread.fpu.fcr31 = 0; 557 child->thread.fpu.fcr31 = 0;
555 } 558 }
556#ifdef CONFIG_32BIT 559#ifdef CONFIG_32BIT
557 /* 560 if (test_thread_flag(TIF_32BIT_FPREGS)) {
558 * The odd registers are actually the high order bits 561 /*
559 * of the values stored in the even registers - unless 562 * The odd registers are actually the high
560 * we're using r2k_switch.S. 563 * order bits of the values stored in the even
561 */ 564 * registers - unless we're using r2k_switch.S.
562 if (addr & 1) { 565 */
563 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; 566 if (addr & 1) {
564 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; 567 fregs[(addr & ~1) - FPR_BASE] &=
565 } else { 568 0xffffffff;
566 fregs[addr - FPR_BASE] &= ~0xffffffffLL; 569 fregs[(addr & ~1) - FPR_BASE] |=
567 fregs[addr - FPR_BASE] |= data; 570 ((u64)data) << 32;
571 } else {
572 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
573 fregs[addr - FPR_BASE] |= data;
574 }
575 break;
568 } 576 }
569#endif 577#endif
570#ifdef CONFIG_64BIT
571 fregs[addr - FPR_BASE] = data; 578 fregs[addr - FPR_BASE] = data;
572#endif
573 break; 579 break;
574 } 580 }
575 case PC: 581 case PC:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 9486055ba660..b8aa2dd5b00b 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -80,6 +80,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80 /* Read the word at location addr in the USER area. */ 80 /* Read the word at location addr in the USER area. */
81 case PTRACE_PEEKUSR: { 81 case PTRACE_PEEKUSR: {
82 struct pt_regs *regs; 82 struct pt_regs *regs;
83 fpureg_t *fregs;
83 unsigned int tmp; 84 unsigned int tmp;
84 85
85 regs = task_pt_regs(child); 86 regs = task_pt_regs(child);
@@ -90,21 +91,25 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
90 tmp = regs->regs[addr]; 91 tmp = regs->regs[addr];
91 break; 92 break;
92 case FPR_BASE ... FPR_BASE + 31: 93 case FPR_BASE ... FPR_BASE + 31:
93 if (tsk_used_math(child)) { 94 if (!tsk_used_math(child)) {
94 fpureg_t *fregs = get_fpu_regs(child); 95 /* FP not yet used */
95 96 tmp = -1;
97 break;
98 }
99 fregs = get_fpu_regs(child);
100 if (test_thread_flag(TIF_32BIT_FPREGS)) {
96 /* 101 /*
97 * The odd registers are actually the high 102 * The odd registers are actually the high
98 * order bits of the values stored in the even 103 * order bits of the values stored in the even
99 * registers - unless we're using r2k_switch.S. 104 * registers - unless we're using r2k_switch.S.
100 */ 105 */
101 if (addr & 1) 106 if (addr & 1)
102 tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32); 107 tmp = fregs[(addr & ~1) - 32] >> 32;
103 else 108 else
104 tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff); 109 tmp = fregs[addr - 32];
105 } else { 110 break;
106 tmp = -1; /* FP not yet used */
107 } 111 }
112 tmp = fregs[addr - FPR_BASE];
108 break; 113 break;
109 case PC: 114 case PC:
110 tmp = regs->cp0_epc; 115 tmp = regs->cp0_epc;
@@ -147,13 +152,13 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
147 if (cpu_has_mipsmt) { 152 if (cpu_has_mipsmt) {
148 unsigned int vpflags = dvpe(); 153 unsigned int vpflags = dvpe();
149 flags = read_c0_status(); 154 flags = read_c0_status();
150 __enable_fpu(); 155 __enable_fpu(FPU_AS_IS);
151 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 156 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
152 write_c0_status(flags); 157 write_c0_status(flags);
153 evpe(vpflags); 158 evpe(vpflags);
154 } else { 159 } else {
155 flags = read_c0_status(); 160 flags = read_c0_status();
156 __enable_fpu(); 161 __enable_fpu(FPU_AS_IS);
157 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp)); 162 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
158 write_c0_status(flags); 163 write_c0_status(flags);
159 } 164 }
@@ -236,20 +241,24 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
236 sizeof(child->thread.fpu)); 241 sizeof(child->thread.fpu));
237 child->thread.fpu.fcr31 = 0; 242 child->thread.fpu.fcr31 = 0;
238 } 243 }
239 /* 244 if (test_thread_flag(TIF_32BIT_FPREGS)) {
240 * The odd registers are actually the high order bits 245 /*
241 * of the values stored in the even registers - unless 246 * The odd registers are actually the high
242 * we're using r2k_switch.S. 247 * order bits of the values stored in the even
243 */ 248 * registers - unless we're using r2k_switch.S.
244 if (addr & 1) { 249 */
245 fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff; 250 if (addr & 1) {
246 fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32; 251 fregs[(addr & ~1) - FPR_BASE] &=
247 } else { 252 0xffffffff;
248 fregs[addr - FPR_BASE] &= ~0xffffffffLL; 253 fregs[(addr & ~1) - FPR_BASE] |=
249 /* Must cast, lest sign extension fill upper 254 ((u64)data) << 32;
250 bits! */ 255 } else {
251 fregs[addr - FPR_BASE] |= (unsigned int)data; 256 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
257 fregs[addr - FPR_BASE] |= data;
258 }
259 break;
252 } 260 }
261 fregs[addr - FPR_BASE] = data;
253 break; 262 break;
254 } 263 }
255 case PC: 264 case PC:
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 55ffe149dae9..73b0ddf910d4 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -35,7 +35,15 @@
35LEAF(_save_fp_context) 35LEAF(_save_fp_context)
36 cfc1 t1, fcr31 36 cfc1 t1, fcr31
37 37
38#ifdef CONFIG_64BIT 38#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
39 .set push
40#ifdef CONFIG_CPU_MIPS32_R2
41 .set mips64r2
42 mfc0 t0, CP0_STATUS
43 sll t0, t0, 5
44 bgez t0, 1f # skip storing odd if FR=0
45 nop
46#endif
39 /* Store the 16 odd double precision registers */ 47 /* Store the 16 odd double precision registers */
40 EX sdc1 $f1, SC_FPREGS+8(a0) 48 EX sdc1 $f1, SC_FPREGS+8(a0)
41 EX sdc1 $f3, SC_FPREGS+24(a0) 49 EX sdc1 $f3, SC_FPREGS+24(a0)
@@ -53,6 +61,7 @@ LEAF(_save_fp_context)
53 EX sdc1 $f27, SC_FPREGS+216(a0) 61 EX sdc1 $f27, SC_FPREGS+216(a0)
54 EX sdc1 $f29, SC_FPREGS+232(a0) 62 EX sdc1 $f29, SC_FPREGS+232(a0)
55 EX sdc1 $f31, SC_FPREGS+248(a0) 63 EX sdc1 $f31, SC_FPREGS+248(a0)
641: .set pop
56#endif 65#endif
57 66
58 /* Store the 16 even double precision registers */ 67 /* Store the 16 even double precision registers */
@@ -82,7 +91,31 @@ LEAF(_save_fp_context)
82LEAF(_save_fp_context32) 91LEAF(_save_fp_context32)
83 cfc1 t1, fcr31 92 cfc1 t1, fcr31
84 93
85 EX sdc1 $f0, SC32_FPREGS+0(a0) 94 mfc0 t0, CP0_STATUS
95 sll t0, t0, 5
96 bgez t0, 1f # skip storing odd if FR=0
97 nop
98
99 /* Store the 16 odd double precision registers */
100 EX sdc1 $f1, SC32_FPREGS+8(a0)
101 EX sdc1 $f3, SC32_FPREGS+24(a0)
102 EX sdc1 $f5, SC32_FPREGS+40(a0)
103 EX sdc1 $f7, SC32_FPREGS+56(a0)
104 EX sdc1 $f9, SC32_FPREGS+72(a0)
105 EX sdc1 $f11, SC32_FPREGS+88(a0)
106 EX sdc1 $f13, SC32_FPREGS+104(a0)
107 EX sdc1 $f15, SC32_FPREGS+120(a0)
108 EX sdc1 $f17, SC32_FPREGS+136(a0)
109 EX sdc1 $f19, SC32_FPREGS+152(a0)
110 EX sdc1 $f21, SC32_FPREGS+168(a0)
111 EX sdc1 $f23, SC32_FPREGS+184(a0)
112 EX sdc1 $f25, SC32_FPREGS+200(a0)
113 EX sdc1 $f27, SC32_FPREGS+216(a0)
114 EX sdc1 $f29, SC32_FPREGS+232(a0)
115 EX sdc1 $f31, SC32_FPREGS+248(a0)
116
117 /* Store the 16 even double precision registers */
1181: EX sdc1 $f0, SC32_FPREGS+0(a0)
86 EX sdc1 $f2, SC32_FPREGS+16(a0) 119 EX sdc1 $f2, SC32_FPREGS+16(a0)
87 EX sdc1 $f4, SC32_FPREGS+32(a0) 120 EX sdc1 $f4, SC32_FPREGS+32(a0)
88 EX sdc1 $f6, SC32_FPREGS+48(a0) 121 EX sdc1 $f6, SC32_FPREGS+48(a0)
@@ -113,8 +146,17 @@ LEAF(_save_fp_context32)
113 * - cp1 status/control register 146 * - cp1 status/control register
114 */ 147 */
115LEAF(_restore_fp_context) 148LEAF(_restore_fp_context)
116 EX lw t0, SC_FPC_CSR(a0) 149 EX lw t1, SC_FPC_CSR(a0)
117#ifdef CONFIG_64BIT 150
151#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
152 .set push
153#ifdef CONFIG_CPU_MIPS32_R2
154 .set mips64r2
155 mfc0 t0, CP0_STATUS
156 sll t0, t0, 5
157 bgez t0, 1f # skip loading odd if FR=0
158 nop
159#endif
118 EX ldc1 $f1, SC_FPREGS+8(a0) 160 EX ldc1 $f1, SC_FPREGS+8(a0)
119 EX ldc1 $f3, SC_FPREGS+24(a0) 161 EX ldc1 $f3, SC_FPREGS+24(a0)
120 EX ldc1 $f5, SC_FPREGS+40(a0) 162 EX ldc1 $f5, SC_FPREGS+40(a0)
@@ -131,6 +173,7 @@ LEAF(_restore_fp_context)
131 EX ldc1 $f27, SC_FPREGS+216(a0) 173 EX ldc1 $f27, SC_FPREGS+216(a0)
132 EX ldc1 $f29, SC_FPREGS+232(a0) 174 EX ldc1 $f29, SC_FPREGS+232(a0)
133 EX ldc1 $f31, SC_FPREGS+248(a0) 175 EX ldc1 $f31, SC_FPREGS+248(a0)
1761: .set pop
134#endif 177#endif
135 EX ldc1 $f0, SC_FPREGS+0(a0) 178 EX ldc1 $f0, SC_FPREGS+0(a0)
136 EX ldc1 $f2, SC_FPREGS+16(a0) 179 EX ldc1 $f2, SC_FPREGS+16(a0)
@@ -148,7 +191,7 @@ LEAF(_restore_fp_context)
148 EX ldc1 $f26, SC_FPREGS+208(a0) 191 EX ldc1 $f26, SC_FPREGS+208(a0)
149 EX ldc1 $f28, SC_FPREGS+224(a0) 192 EX ldc1 $f28, SC_FPREGS+224(a0)
150 EX ldc1 $f30, SC_FPREGS+240(a0) 193 EX ldc1 $f30, SC_FPREGS+240(a0)
151 ctc1 t0, fcr31 194 ctc1 t1, fcr31
152 jr ra 195 jr ra
153 li v0, 0 # success 196 li v0, 0 # success
154 END(_restore_fp_context) 197 END(_restore_fp_context)
@@ -156,8 +199,31 @@ LEAF(_restore_fp_context)
156#ifdef CONFIG_MIPS32_COMPAT 199#ifdef CONFIG_MIPS32_COMPAT
157LEAF(_restore_fp_context32) 200LEAF(_restore_fp_context32)
158 /* Restore an o32 sigcontext. */ 201 /* Restore an o32 sigcontext. */
159 EX lw t0, SC32_FPC_CSR(a0) 202 EX lw t1, SC32_FPC_CSR(a0)
160 EX ldc1 $f0, SC32_FPREGS+0(a0) 203
204 mfc0 t0, CP0_STATUS
205 sll t0, t0, 5
206 bgez t0, 1f # skip loading odd if FR=0
207 nop
208
209 EX ldc1 $f1, SC32_FPREGS+8(a0)
210 EX ldc1 $f3, SC32_FPREGS+24(a0)
211 EX ldc1 $f5, SC32_FPREGS+40(a0)
212 EX ldc1 $f7, SC32_FPREGS+56(a0)
213 EX ldc1 $f9, SC32_FPREGS+72(a0)
214 EX ldc1 $f11, SC32_FPREGS+88(a0)
215 EX ldc1 $f13, SC32_FPREGS+104(a0)
216 EX ldc1 $f15, SC32_FPREGS+120(a0)
217 EX ldc1 $f17, SC32_FPREGS+136(a0)
218 EX ldc1 $f19, SC32_FPREGS+152(a0)
219 EX ldc1 $f21, SC32_FPREGS+168(a0)
220 EX ldc1 $f23, SC32_FPREGS+184(a0)
221 EX ldc1 $f25, SC32_FPREGS+200(a0)
222 EX ldc1 $f27, SC32_FPREGS+216(a0)
223 EX ldc1 $f29, SC32_FPREGS+232(a0)
224 EX ldc1 $f31, SC32_FPREGS+248(a0)
225
2261: EX ldc1 $f0, SC32_FPREGS+0(a0)
161 EX ldc1 $f2, SC32_FPREGS+16(a0) 227 EX ldc1 $f2, SC32_FPREGS+16(a0)
162 EX ldc1 $f4, SC32_FPREGS+32(a0) 228 EX ldc1 $f4, SC32_FPREGS+32(a0)
163 EX ldc1 $f6, SC32_FPREGS+48(a0) 229 EX ldc1 $f6, SC32_FPREGS+48(a0)
@@ -173,7 +239,7 @@ LEAF(_restore_fp_context32)
173 EX ldc1 $f26, SC32_FPREGS+208(a0) 239 EX ldc1 $f26, SC32_FPREGS+208(a0)
174 EX ldc1 $f28, SC32_FPREGS+224(a0) 240 EX ldc1 $f28, SC32_FPREGS+224(a0)
175 EX ldc1 $f30, SC32_FPREGS+240(a0) 241 EX ldc1 $f30, SC32_FPREGS+240(a0)
176 ctc1 t0, fcr31 242 ctc1 t1, fcr31
177 jr ra 243 jr ra
178 li v0, 0 # success 244 li v0, 0 # success
179 END(_restore_fp_context32) 245 END(_restore_fp_context32)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 078de5eaca8f..cc78dd9a17c7 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -123,7 +123,7 @@
123 * Save a thread's fp context. 123 * Save a thread's fp context.
124 */ 124 */
125LEAF(_save_fp) 125LEAF(_save_fp)
126#ifdef CONFIG_64BIT 126#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
127 mfc0 t0, CP0_STATUS 127 mfc0 t0, CP0_STATUS
128#endif 128#endif
129 fpu_save_double a0 t0 t1 # clobbers t1 129 fpu_save_double a0 t0 t1 # clobbers t1
@@ -134,7 +134,7 @@ LEAF(_save_fp)
134 * Restore a thread's fp context. 134 * Restore a thread's fp context.
135 */ 135 */
136LEAF(_restore_fp) 136LEAF(_restore_fp)
137#ifdef CONFIG_64BIT 137#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
138 mfc0 t0, CP0_STATUS 138 mfc0 t0, CP0_STATUS
139#endif 139#endif
140 fpu_restore_double a0 t0 t1 # clobbers t1 140 fpu_restore_double a0 t0 t1 # clobbers t1
@@ -228,6 +228,47 @@ LEAF(_init_fpu)
228 mtc1 t1, $f29 228 mtc1 t1, $f29
229 mtc1 t1, $f30 229 mtc1 t1, $f30
230 mtc1 t1, $f31 230 mtc1 t1, $f31
231
232#ifdef CONFIG_CPU_MIPS32_R2
233 .set push
234 .set mips64r2
235 sll t0, t0, 5 # is Status.FR set?
236 bgez t0, 1f # no: skip setting upper 32b
237
238 mthc1 t1, $f0
239 mthc1 t1, $f1
240 mthc1 t1, $f2
241 mthc1 t1, $f3
242 mthc1 t1, $f4
243 mthc1 t1, $f5
244 mthc1 t1, $f6
245 mthc1 t1, $f7
246 mthc1 t1, $f8
247 mthc1 t1, $f9
248 mthc1 t1, $f10
249 mthc1 t1, $f11
250 mthc1 t1, $f12
251 mthc1 t1, $f13
252 mthc1 t1, $f14
253 mthc1 t1, $f15
254 mthc1 t1, $f16
255 mthc1 t1, $f17
256 mthc1 t1, $f18
257 mthc1 t1, $f19
258 mthc1 t1, $f20
259 mthc1 t1, $f21
260 mthc1 t1, $f22
261 mthc1 t1, $f23
262 mthc1 t1, $f24
263 mthc1 t1, $f25
264 mthc1 t1, $f26
265 mthc1 t1, $f27
266 mthc1 t1, $f28
267 mthc1 t1, $f29
268 mthc1 t1, $f30
269 mthc1 t1, $f31
2701: .set pop
271#endif /* CONFIG_CPU_MIPS32_R2 */
231#else 272#else
232 .set mips3 273 .set mips3
233 dmtc1 t1, $f0 274 dmtc1 t1, $f0
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
new file mode 100644
index 000000000000..758fb3cd2326
--- /dev/null
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -0,0 +1,119 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
7 * Copyright (C) 2013 Imagination Technologies Ltd.
8 */
9#include <linux/device.h>
10#include <linux/fs.h>
11#include <linux/err.h>
12#include <linux/wait.h>
13#include <linux/sched.h>
14#include <linux/smp.h>
15
16#include <asm/mips_mt.h>
17#include <asm/vpe.h>
18#include <asm/rtlx.h>
19
20static int major;
21
22static void rtlx_interrupt(void)
23{
24 int i;
25 struct rtlx_info *info;
26 struct rtlx_info **p = vpe_get_shared(aprp_cpu_index());
27
28 if (p == NULL || *p == NULL)
29 return;
30
31 info = *p;
32
33 if (info->ap_int_pending == 1 && smp_processor_id() == 0) {
34 for (i = 0; i < RTLX_CHANNELS; i++) {
35 wake_up(&channel_wqs[i].lx_queue);
36 wake_up(&channel_wqs[i].rt_queue);
37 }
38 info->ap_int_pending = 0;
39 }
40}
41
42void _interrupt_sp(void)
43{
44 smp_send_reschedule(aprp_cpu_index());
45}
46
47int __init rtlx_module_init(void)
48{
49 struct device *dev;
50 int i, err;
51
52 if (!cpu_has_mipsmt) {
53 pr_warn("VPE loader: not a MIPS MT capable processor\n");
54 return -ENODEV;
55 }
56
57 if (num_possible_cpus() - aprp_cpu_index() < 1) {
58 pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
59 "Pass maxcpus=<n> argument as kernel argument\n");
60
61 return -ENODEV;
62 }
63
64 major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
65 if (major < 0) {
66 pr_err("rtlx_module_init: unable to register device\n");
67 return major;
68 }
69
70 /* initialise the wait queues */
71 for (i = 0; i < RTLX_CHANNELS; i++) {
72 init_waitqueue_head(&channel_wqs[i].rt_queue);
73 init_waitqueue_head(&channel_wqs[i].lx_queue);
74 atomic_set(&channel_wqs[i].in_open, 0);
75 mutex_init(&channel_wqs[i].mutex);
76
77 dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
78 "%s%d", RTLX_MODULE_NAME, i);
79 if (IS_ERR(dev)) {
80 err = PTR_ERR(dev);
81 goto out_chrdev;
82 }
83 }
84
85 /* set up notifiers */
86 rtlx_notify.start = rtlx_starting;
87 rtlx_notify.stop = rtlx_stopping;
88 vpe_notify(aprp_cpu_index(), &rtlx_notify);
89
90 if (cpu_has_vint) {
91 aprp_hook = rtlx_interrupt;
92 } else {
93 pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
94 err = -ENODEV;
95 goto out_class;
96 }
97
98 return 0;
99
100out_class:
101 for (i = 0; i < RTLX_CHANNELS; i++)
102 device_destroy(mt_class, MKDEV(major, i));
103out_chrdev:
104 unregister_chrdev(major, RTLX_MODULE_NAME);
105
106 return err;
107}
108
109void __exit rtlx_module_exit(void)
110{
111 int i;
112
113 for (i = 0; i < RTLX_CHANNELS; i++)
114 device_destroy(mt_class, MKDEV(major, i));
115
116 unregister_chrdev(major, RTLX_MODULE_NAME);
117
118 aprp_hook = NULL;
119}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
new file mode 100644
index 000000000000..9c1aca00fd54
--- /dev/null
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -0,0 +1,151 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
7 * Copyright (C) 2013 Imagination Technologies Ltd.
8 */
9#include <linux/device.h>
10#include <linux/fs.h>
11#include <linux/err.h>
12#include <linux/wait.h>
13#include <linux/sched.h>
14#include <linux/interrupt.h>
15#include <linux/irq.h>
16
17#include <asm/mips_mt.h>
18#include <asm/vpe.h>
19#include <asm/rtlx.h>
20
21static int major;
22
23static void rtlx_dispatch(void)
24{
25 if (read_c0_cause() & read_c0_status() & C_SW0)
26 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
27}
28
29/*
30 * Interrupt handler may be called before rtlx_init has otherwise had
31 * a chance to run.
32 */
33static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
34{
35 unsigned int vpeflags;
36 unsigned long flags;
37 int i;
38
39 /* Ought not to be strictly necessary for SMTC builds */
40 local_irq_save(flags);
41 vpeflags = dvpe();
42 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
43 irq_enable_hazard();
44 evpe(vpeflags);
45 local_irq_restore(flags);
46
47 for (i = 0; i < RTLX_CHANNELS; i++) {
48 wake_up(&channel_wqs[i].lx_queue);
49 wake_up(&channel_wqs[i].rt_queue);
50 }
51
52 return IRQ_HANDLED;
53}
54
55static struct irqaction rtlx_irq = {
56 .handler = rtlx_interrupt,
57 .name = "RTLX",
58};
59
60static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
61
62void _interrupt_sp(void)
63{
64 unsigned long flags;
65
66 local_irq_save(flags);
67 dvpe();
68 settc(1);
69 write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
70 evpe(EVPE_ENABLE);
71 local_irq_restore(flags);
72}
73
74int __init rtlx_module_init(void)
75{
76 struct device *dev;
77 int i, err;
78
79 if (!cpu_has_mipsmt) {
80 pr_warn("VPE loader: not a MIPS MT capable processor\n");
81 return -ENODEV;
82 }
83
84 if (aprp_cpu_index() == 0) {
85 pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
86 "Pass maxtcs=<n> argument as kernel argument\n");
87
88 return -ENODEV;
89 }
90
91 major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
92 if (major < 0) {
93 pr_err("rtlx_module_init: unable to register device\n");
94 return major;
95 }
96
97 /* initialise the wait queues */
98 for (i = 0; i < RTLX_CHANNELS; i++) {
99 init_waitqueue_head(&channel_wqs[i].rt_queue);
100 init_waitqueue_head(&channel_wqs[i].lx_queue);
101 atomic_set(&channel_wqs[i].in_open, 0);
102 mutex_init(&channel_wqs[i].mutex);
103
104 dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
105 "%s%d", RTLX_MODULE_NAME, i);
106 if (IS_ERR(dev)) {
107 err = PTR_ERR(dev);
108 goto out_chrdev;
109 }
110 }
111
112 /* set up notifiers */
113 rtlx_notify.start = rtlx_starting;
114 rtlx_notify.stop = rtlx_stopping;
115 vpe_notify(aprp_cpu_index(), &rtlx_notify);
116
117 if (cpu_has_vint) {
118 aprp_hook = rtlx_dispatch;
119 } else {
120 pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
121 err = -ENODEV;
122 goto out_class;
123 }
124
125 rtlx_irq.dev_id = rtlx;
126 err = setup_irq(rtlx_irq_num, &rtlx_irq);
127 if (err)
128 goto out_class;
129
130 return 0;
131
132out_class:
133 for (i = 0; i < RTLX_CHANNELS; i++)
134 device_destroy(mt_class, MKDEV(major, i));
135out_chrdev:
136 unregister_chrdev(major, RTLX_MODULE_NAME);
137
138 return err;
139}
140
141void __exit rtlx_module_exit(void)
142{
143 int i;
144
145 for (i = 0; i < RTLX_CHANNELS; i++)
146 device_destroy(mt_class, MKDEV(major, i));
147
148 unregister_chrdev(major, RTLX_MODULE_NAME);
149
150 aprp_hook = NULL;
151}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 2c12ea1668d1..31b1b763cb29 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -1,114 +1,51 @@
1/* 1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
2 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
3 * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org)
4 * 8 * Copyright (C) 2013 Imagination Technologies Ltd.
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17 *
18 */ 9 */
19
20#include <linux/device.h>
21#include <linux/kernel.h> 10#include <linux/kernel.h>
22#include <linux/fs.h> 11#include <linux/fs.h>
23#include <linux/init.h>
24#include <asm/uaccess.h>
25#include <linux/list.h>
26#include <linux/vmalloc.h>
27#include <linux/elf.h>
28#include <linux/seq_file.h>
29#include <linux/syscalls.h> 12#include <linux/syscalls.h>
30#include <linux/moduleloader.h> 13#include <linux/moduleloader.h>
31#include <linux/interrupt.h> 14#include <linux/atomic.h>
32#include <linux/poll.h>
33#include <linux/sched.h>
34#include <linux/wait.h>
35#include <asm/mipsmtregs.h> 15#include <asm/mipsmtregs.h>
36#include <asm/mips_mt.h> 16#include <asm/mips_mt.h>
37#include <asm/cacheflush.h>
38#include <linux/atomic.h>
39#include <asm/cpu.h>
40#include <asm/processor.h> 17#include <asm/processor.h>
41#include <asm/vpe.h>
42#include <asm/rtlx.h> 18#include <asm/rtlx.h>
43#include <asm/setup.h> 19#include <asm/setup.h>
20#include <asm/vpe.h>
44 21
45static struct rtlx_info *rtlx;
46static int major;
47static char module_name[] = "rtlx";
48
49static struct chan_waitqueues {
50 wait_queue_head_t rt_queue;
51 wait_queue_head_t lx_queue;
52 atomic_t in_open;
53 struct mutex mutex;
54} channel_wqs[RTLX_CHANNELS];
55
56static struct vpe_notifications notify;
57static int sp_stopping; 22static int sp_stopping;
58 23struct rtlx_info *rtlx;
59extern void *vpe_get_shared(int index); 24struct chan_waitqueues channel_wqs[RTLX_CHANNELS];
60 25struct vpe_notifications rtlx_notify;
61static void rtlx_dispatch(void) 26void (*aprp_hook)(void) = NULL;
62{ 27EXPORT_SYMBOL(aprp_hook);
63 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
64}
65
66
67/* Interrupt handler may be called before rtlx_init has otherwise had
68 a chance to run.
69*/
70static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
71{
72 unsigned int vpeflags;
73 unsigned long flags;
74 int i;
75
76 /* Ought not to be strictly necessary for SMTC builds */
77 local_irq_save(flags);
78 vpeflags = dvpe();
79 set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
80 irq_enable_hazard();
81 evpe(vpeflags);
82 local_irq_restore(flags);
83
84 for (i = 0; i < RTLX_CHANNELS; i++) {
85 wake_up(&channel_wqs[i].lx_queue);
86 wake_up(&channel_wqs[i].rt_queue);
87 }
88
89 return IRQ_HANDLED;
90}
91 28
92static void __used dump_rtlx(void) 29static void __used dump_rtlx(void)
93{ 30{
94 int i; 31 int i;
95 32
96 printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); 33 pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state);
97 34
98 for (i = 0; i < RTLX_CHANNELS; i++) { 35 for (i = 0; i < RTLX_CHANNELS; i++) {
99 struct rtlx_channel *chan = &rtlx->channel[i]; 36 struct rtlx_channel *chan = &rtlx->channel[i];
100 37
101 printk(" rt_state %d lx_state %d buffer_size %d\n", 38 pr_info(" rt_state %d lx_state %d buffer_size %d\n",
102 chan->rt_state, chan->lx_state, chan->buffer_size); 39 chan->rt_state, chan->lx_state, chan->buffer_size);
103 40
104 printk(" rt_read %d rt_write %d\n", 41 pr_info(" rt_read %d rt_write %d\n",
105 chan->rt_read, chan->rt_write); 42 chan->rt_read, chan->rt_write);
106 43
107 printk(" lx_read %d lx_write %d\n", 44 pr_info(" lx_read %d lx_write %d\n",
108 chan->lx_read, chan->lx_write); 45 chan->lx_read, chan->lx_write);
109 46
110 printk(" rt_buffer <%s>\n", chan->rt_buffer); 47 pr_info(" rt_buffer <%s>\n", chan->rt_buffer);
111 printk(" lx_buffer <%s>\n", chan->lx_buffer); 48 pr_info(" lx_buffer <%s>\n", chan->lx_buffer);
112 } 49 }
113} 50}
114 51
@@ -116,8 +53,7 @@ static void __used dump_rtlx(void)
116static int rtlx_init(struct rtlx_info *rtlxi) 53static int rtlx_init(struct rtlx_info *rtlxi)
117{ 54{
118 if (rtlxi->id != RTLX_ID) { 55 if (rtlxi->id != RTLX_ID) {
119 printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", 56 pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
120 rtlxi, rtlxi->id);
121 return -ENOEXEC; 57 return -ENOEXEC;
122 } 58 }
123 59
@@ -127,20 +63,20 @@ static int rtlx_init(struct rtlx_info *rtlxi)
127} 63}
128 64
129/* notifications */ 65/* notifications */
130static void starting(int vpe) 66void rtlx_starting(int vpe)
131{ 67{
132 int i; 68 int i;
133 sp_stopping = 0; 69 sp_stopping = 0;
134 70
135 /* force a reload of rtlx */ 71 /* force a reload of rtlx */
136 rtlx=NULL; 72 rtlx = NULL;
137 73
138 /* wake up any sleeping rtlx_open's */ 74 /* wake up any sleeping rtlx_open's */
139 for (i = 0; i < RTLX_CHANNELS; i++) 75 for (i = 0; i < RTLX_CHANNELS; i++)
140 wake_up_interruptible(&channel_wqs[i].lx_queue); 76 wake_up_interruptible(&channel_wqs[i].lx_queue);
141} 77}
142 78
143static void stopping(int vpe) 79void rtlx_stopping(int vpe)
144{ 80{
145 int i; 81 int i;
146 82
@@ -158,31 +94,30 @@ int rtlx_open(int index, int can_sleep)
158 int ret = 0; 94 int ret = 0;
159 95
160 if (index >= RTLX_CHANNELS) { 96 if (index >= RTLX_CHANNELS) {
161 printk(KERN_DEBUG "rtlx_open index out of range\n"); 97 pr_debug(KERN_DEBUG "rtlx_open index out of range\n");
162 return -ENOSYS; 98 return -ENOSYS;
163 } 99 }
164 100
165 if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { 101 if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
166 printk(KERN_DEBUG "rtlx_open channel %d already opened\n", 102 pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index);
167 index);
168 ret = -EBUSY; 103 ret = -EBUSY;
169 goto out_fail; 104 goto out_fail;
170 } 105 }
171 106
172 if (rtlx == NULL) { 107 if (rtlx == NULL) {
173 if( (p = vpe_get_shared(tclimit)) == NULL) { 108 p = vpe_get_shared(aprp_cpu_index());
174 if (can_sleep) { 109 if (p == NULL) {
175 ret = __wait_event_interruptible( 110 if (can_sleep) {
111 ret = __wait_event_interruptible(
176 channel_wqs[index].lx_queue, 112 channel_wqs[index].lx_queue,
177 (p = vpe_get_shared(tclimit))); 113 (p = vpe_get_shared(aprp_cpu_index())));
178 if (ret) 114 if (ret)
115 goto out_fail;
116 } else {
117 pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n");
118 ret = -ENOSYS;
179 goto out_fail; 119 goto out_fail;
180 } else { 120 }
181 printk(KERN_DEBUG "No SP program loaded, and device "
182 "opened with O_NONBLOCK\n");
183 ret = -ENOSYS;
184 goto out_fail;
185 }
186 } 121 }
187 122
188 smp_rmb(); 123 smp_rmb();
@@ -204,24 +139,24 @@ int rtlx_open(int index, int can_sleep)
204 ret = -ERESTARTSYS; 139 ret = -ERESTARTSYS;
205 goto out_fail; 140 goto out_fail;
206 } 141 }
207 finish_wait(&channel_wqs[index].lx_queue, &wait); 142 finish_wait(&channel_wqs[index].lx_queue,
143 &wait);
208 } else { 144 } else {
209 pr_err(" *vpe_get_shared is NULL. " 145 pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n");
210 "Has an SP program been loaded?\n");
211 ret = -ENOSYS; 146 ret = -ENOSYS;
212 goto out_fail; 147 goto out_fail;
213 } 148 }
214 } 149 }
215 150
216 if ((unsigned int)*p < KSEG0) { 151 if ((unsigned int)*p < KSEG0) {
217 printk(KERN_WARNING "vpe_get_shared returned an " 152 pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n",
218 "invalid pointer maybe an error code %d\n", 153 (int)*p);
219 (int)*p);
220 ret = -ENOSYS; 154 ret = -ENOSYS;
221 goto out_fail; 155 goto out_fail;
222 } 156 }
223 157
224 if ((ret = rtlx_init(*p)) < 0) 158 ret = rtlx_init(*p);
159 if (ret < 0)
225 goto out_ret; 160 goto out_ret;
226 } 161 }
227 162
@@ -352,7 +287,7 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
352 size_t fl; 287 size_t fl;
353 288
354 if (rtlx == NULL) 289 if (rtlx == NULL)
355 return(-ENOSYS); 290 return -ENOSYS;
356 291
357 rt = &rtlx->channel[index]; 292 rt = &rtlx->channel[index];
358 293
@@ -361,8 +296,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
361 rt_read = rt->rt_read; 296 rt_read = rt->rt_read;
362 297
363 /* total number of bytes to copy */ 298 /* total number of bytes to copy */
364 count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, 299 count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
365 rt->buffer_size)); 300 rt->buffer_size));
366 301
367 /* first bit from write pointer to the end of the buffer, or count */ 302 /* first bit from write pointer to the end of the buffer, or count */
368 fl = min(count, (size_t) rt->buffer_size - rt->rt_write); 303 fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
@@ -372,9 +307,8 @@ ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
372 goto out; 307 goto out;
373 308
374 /* if there's any left copy to the beginning of the buffer */ 309 /* if there's any left copy to the beginning of the buffer */
375 if (count - fl) { 310 if (count - fl)
376 failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); 311 failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
377 }
378 312
379out: 313out:
380 count -= failed; 314 count -= failed;
@@ -384,6 +318,8 @@ out:
384 smp_wmb(); 318 smp_wmb();
385 mutex_unlock(&channel_wqs[index].mutex); 319 mutex_unlock(&channel_wqs[index].mutex);
386 320
321 _interrupt_sp();
322
387 return count; 323 return count;
388} 324}
389 325
@@ -398,7 +334,7 @@ static int file_release(struct inode *inode, struct file *filp)
398 return rtlx_release(iminor(inode)); 334 return rtlx_release(iminor(inode));
399} 335}
400 336
401static unsigned int file_poll(struct file *file, poll_table * wait) 337static unsigned int file_poll(struct file *file, poll_table *wait)
402{ 338{
403 int minor = iminor(file_inode(file)); 339 int minor = iminor(file_inode(file));
404 unsigned int mask = 0; 340 unsigned int mask = 0;
@@ -420,21 +356,20 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
420 return mask; 356 return mask;
421} 357}
422 358
423static ssize_t file_read(struct file *file, char __user * buffer, size_t count, 359static ssize_t file_read(struct file *file, char __user *buffer, size_t count,
424 loff_t * ppos) 360 loff_t *ppos)
425{ 361{
426 int minor = iminor(file_inode(file)); 362 int minor = iminor(file_inode(file));
427 363
428 /* data available? */ 364 /* data available? */
429 if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { 365 if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1))
430 return 0; // -EAGAIN makes cat whinge 366 return 0; /* -EAGAIN makes 'cat' whine */
431 }
432 367
433 return rtlx_read(minor, buffer, count); 368 return rtlx_read(minor, buffer, count);
434} 369}
435 370
436static ssize_t file_write(struct file *file, const char __user * buffer, 371static ssize_t file_write(struct file *file, const char __user *buffer,
437 size_t count, loff_t * ppos) 372 size_t count, loff_t *ppos)
438{ 373{
439 int minor = iminor(file_inode(file)); 374 int minor = iminor(file_inode(file));
440 375
@@ -454,100 +389,16 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
454 return rtlx_write(minor, buffer, count); 389 return rtlx_write(minor, buffer, count);
455} 390}
456 391
457static const struct file_operations rtlx_fops = { 392const struct file_operations rtlx_fops = {
458 .owner = THIS_MODULE, 393 .owner = THIS_MODULE,
459 .open = file_open, 394 .open = file_open,
460 .release = file_release, 395 .release = file_release,
461 .write = file_write, 396 .write = file_write,
462 .read = file_read, 397 .read = file_read,
463 .poll = file_poll, 398 .poll = file_poll,
464 .llseek = noop_llseek, 399 .llseek = noop_llseek,
465}; 400};
466 401
467static struct irqaction rtlx_irq = {
468 .handler = rtlx_interrupt,
469 .name = "RTLX",
470};
471
472static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
473
474static char register_chrdev_failed[] __initdata =
475 KERN_ERR "rtlx_module_init: unable to register device\n";
476
477static int __init rtlx_module_init(void)
478{
479 struct device *dev;
480 int i, err;
481
482 if (!cpu_has_mipsmt) {
483 printk("VPE loader: not a MIPS MT capable processor\n");
484 return -ENODEV;
485 }
486
487 if (tclimit == 0) {
488 printk(KERN_WARNING "No TCs reserved for AP/SP, not "
489 "initializing RTLX.\nPass maxtcs=<n> argument as kernel "
490 "argument\n");
491
492 return -ENODEV;
493 }
494
495 major = register_chrdev(0, module_name, &rtlx_fops);
496 if (major < 0) {
497 printk(register_chrdev_failed);
498 return major;
499 }
500
501 /* initialise the wait queues */
502 for (i = 0; i < RTLX_CHANNELS; i++) {
503 init_waitqueue_head(&channel_wqs[i].rt_queue);
504 init_waitqueue_head(&channel_wqs[i].lx_queue);
505 atomic_set(&channel_wqs[i].in_open, 0);
506 mutex_init(&channel_wqs[i].mutex);
507
508 dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
509 "%s%d", module_name, i);
510 if (IS_ERR(dev)) {
511 err = PTR_ERR(dev);
512 goto out_chrdev;
513 }
514 }
515
516 /* set up notifiers */
517 notify.start = starting;
518 notify.stop = stopping;
519 vpe_notify(tclimit, &notify);
520
521 if (cpu_has_vint)
522 set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch);
523 else {
524 pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
525 err = -ENODEV;
526 goto out_chrdev;
527 }
528
529 rtlx_irq.dev_id = rtlx;
530 setup_irq(rtlx_irq_num, &rtlx_irq);
531
532 return 0;
533
534out_chrdev:
535 for (i = 0; i < RTLX_CHANNELS; i++)
536 device_destroy(mt_class, MKDEV(major, i));
537
538 return err;
539}
540
541static void __exit rtlx_module_exit(void)
542{
543 int i;
544
545 for (i = 0; i < RTLX_CHANNELS; i++)
546 device_destroy(mt_class, MKDEV(major, i));
547
548 unregister_chrdev(major, module_name);
549}
550
551module_init(rtlx_module_init); 402module_init(rtlx_module_init);
552module_exit(rtlx_module_exit); 403module_exit(rtlx_module_exit);
553 404
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e8e541b40d86..a5b14f48e1af 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -563,3 +563,5 @@ EXPORT(sys_call_table)
563 PTR sys_process_vm_writev 563 PTR sys_process_vm_writev
564 PTR sys_kcmp 564 PTR sys_kcmp
565 PTR sys_finit_module 565 PTR sys_finit_module
566 PTR sys_sched_setattr
567 PTR sys_sched_getattr /* 4350 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 57e3742fec59..b56e254beb15 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -425,4 +425,6 @@ EXPORT(sys_call_table)
425 PTR sys_kcmp 425 PTR sys_kcmp
426 PTR sys_finit_module 426 PTR sys_finit_module
427 PTR sys_getdents64 427 PTR sys_getdents64
428 PTR sys_sched_setattr
429 PTR sys_sched_getattr /* 5310 */
428 .size sys_call_table,.-sys_call_table 430 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2f48f5934399..f7e5b72cf481 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -418,4 +418,6 @@ EXPORT(sysn32_call_table)
418 PTR compat_sys_process_vm_writev /* 6310 */ 418 PTR compat_sys_process_vm_writev /* 6310 */
419 PTR sys_kcmp 419 PTR sys_kcmp
420 PTR sys_finit_module 420 PTR sys_finit_module
421 PTR sys_sched_setattr
422 PTR sys_sched_getattr
421 .size sysn32_call_table,.-sysn32_call_table 423 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f1acdb429f4f..6788727d91af 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -541,4 +541,6 @@ EXPORT(sys32_call_table)
541 PTR compat_sys_process_vm_writev 541 PTR compat_sys_process_vm_writev
542 PTR sys_kcmp 542 PTR sys_kcmp
543 PTR sys_finit_module 543 PTR sys_finit_module
544 PTR sys_sched_setattr
545 PTR sys_sched_getattr /* 4350 */
544 .size sys32_call_table,.-sys32_call_table 546 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
new file mode 100644
index 000000000000..076ead2a9859
--- /dev/null
+++ b/arch/mips/kernel/segment.c
@@ -0,0 +1,110 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Imagination Technologies Ltd.
7 */
8
9#include <linux/kernel.h>
10#include <linux/debugfs.h>
11#include <linux/seq_file.h>
12#include <asm/cpu.h>
13#include <asm/mipsregs.h>
14
15static void build_segment_config(char *str, unsigned int cfg)
16{
17 unsigned int am;
18 static const char * const am_str[] = {
19 "UK", "MK", "MSK", "MUSK", "MUSUK", "USK",
20 "RSRVD", "UUSK"};
21
22 /* Segment access mode. */
23 am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
24 str += sprintf(str, "%-5s", am_str[am]);
25
26 /*
27 * Access modes MK, MSK and MUSK are mapped segments. Therefore
28 * there is no direct physical address mapping.
29 */
30 if ((am == 0) || (am > 3)) {
31 str += sprintf(str, " %03lx",
32 ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
33 str += sprintf(str, " %01ld",
34 ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
35 } else {
36 str += sprintf(str, " UND");
37 str += sprintf(str, " U");
38 }
39
40 /* Exception configuration. */
41 str += sprintf(str, " %01ld\n",
42 ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
43}
44
45static int show_segments(struct seq_file *m, void *v)
46{
47 unsigned int segcfg;
48 char str[42];
49
50 seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n");
51 seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n");
52
53 segcfg = read_c0_segctl0();
54 build_segment_config(str, segcfg);
55 seq_printf(m, " 0 e0000000 512M %s", str);
56
57 segcfg >>= 16;
58 build_segment_config(str, segcfg);
59 seq_printf(m, " 1 c0000000 512M %s", str);
60
61 segcfg = read_c0_segctl1();
62 build_segment_config(str, segcfg);
63 seq_printf(m, " 2 a0000000 512M %s", str);
64
65 segcfg >>= 16;
66 build_segment_config(str, segcfg);
67 seq_printf(m, " 3 80000000 512M %s", str);
68
69 segcfg = read_c0_segctl2();
70 build_segment_config(str, segcfg);
71 seq_printf(m, " 4 40000000 1G %s", str);
72
73 segcfg >>= 16;
74 build_segment_config(str, segcfg);
75 seq_printf(m, " 5 00000000 1G %s\n", str);
76
77 return 0;
78}
79
80static int segments_open(struct inode *inode, struct file *file)
81{
82 return single_open(file, show_segments, NULL);
83}
84
85static const struct file_operations segments_fops = {
86 .open = segments_open,
87 .read = seq_read,
88 .llseek = seq_lseek,
89 .release = single_release,
90};
91
92static int __init segments_info(void)
93{
94 extern struct dentry *mips_debugfs_dir;
95 struct dentry *segments;
96
97 if (cpu_has_segments) {
98 if (!mips_debugfs_dir)
99 return -ENODEV;
100
101 segments = debugfs_create_file("segments", S_IRUGO,
102 mips_debugfs_dir, NULL,
103 &segments_fops);
104 if (!segments)
105 return -ENOMEM;
106 }
107 return 0;
108}
109
110device_initcall(segments_info);
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 2f285abc76d5..5199563c4403 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -71,8 +71,9 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
71 int err; 71 int err;
72 while (1) { 72 while (1) {
73 lock_fpu_owner(); 73 lock_fpu_owner();
74 own_fpu_inatomic(1); 74 err = own_fpu_inatomic(1);
75 err = save_fp_context(sc); /* this might fail */ 75 if (!err)
76 err = save_fp_context(sc); /* this might fail */
76 unlock_fpu_owner(); 77 unlock_fpu_owner();
77 if (likely(!err)) 78 if (likely(!err))
78 break; 79 break;
@@ -91,8 +92,9 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
91 int err, tmp __maybe_unused; 92 int err, tmp __maybe_unused;
92 while (1) { 93 while (1) {
93 lock_fpu_owner(); 94 lock_fpu_owner();
94 own_fpu_inatomic(0); 95 err = own_fpu_inatomic(0);
95 err = restore_fp_context(sc); /* this might fail */ 96 if (!err)
97 err = restore_fp_context(sc); /* this might fail */
96 unlock_fpu_owner(); 98 unlock_fpu_owner();
97 if (likely(!err)) 99 if (likely(!err))
98 break; 100 break;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 1905a419aa46..3d60f7750fa8 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -85,8 +85,9 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
85 int err; 85 int err;
86 while (1) { 86 while (1) {
87 lock_fpu_owner(); 87 lock_fpu_owner();
88 own_fpu_inatomic(1); 88 err = own_fpu_inatomic(1);
89 err = save_fp_context32(sc); /* this might fail */ 89 if (!err)
90 err = save_fp_context32(sc); /* this might fail */
90 unlock_fpu_owner(); 91 unlock_fpu_owner();
91 if (likely(!err)) 92 if (likely(!err))
92 break; 93 break;
@@ -105,8 +106,9 @@ static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
105 int err, tmp __maybe_unused; 106 int err, tmp __maybe_unused;
106 while (1) { 107 while (1) {
107 lock_fpu_owner(); 108 lock_fpu_owner();
108 own_fpu_inatomic(0); 109 err = own_fpu_inatomic(0);
109 err = restore_fp_context32(sc); /* this might fail */ 110 if (!err)
111 err = restore_fp_context32(sc); /* this might fail */
110 unlock_fpu_owner(); 112 unlock_fpu_owner();
111 if (likely(!err)) 113 if (likely(!err))
112 break; 114 break;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 2362665ba496..ea4c2dc31692 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -49,8 +49,10 @@ cpumask_t bmips_booted_mask;
49unsigned long bmips_smp_boot_sp; 49unsigned long bmips_smp_boot_sp;
50unsigned long bmips_smp_boot_gp; 50unsigned long bmips_smp_boot_gp;
51 51
52static void bmips_send_ipi_single(int cpu, unsigned int action); 52static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
53static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id); 53static void bmips5000_send_ipi_single(int cpu, unsigned int action);
54static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id);
55static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id);
54 56
55/* SW interrupts 0,1 are used for interprocessor signaling */ 57/* SW interrupts 0,1 are used for interprocessor signaling */
56#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) 58#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
@@ -64,49 +66,58 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id);
64static void __init bmips_smp_setup(void) 66static void __init bmips_smp_setup(void)
65{ 67{
66 int i, cpu = 1, boot_cpu = 0; 68 int i, cpu = 1, boot_cpu = 0;
67
68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
69 int cpu_hw_intr; 69 int cpu_hw_intr;
70 70
71 /* arbitration priority */ 71 switch (current_cpu_type()) {
72 clear_c0_brcm_cmt_ctrl(0x30); 72 case CPU_BMIPS4350:
73 73 case CPU_BMIPS4380:
74 /* NBK and weak order flags */ 74 /* arbitration priority */
75 set_c0_brcm_config_0(0x30000); 75 clear_c0_brcm_cmt_ctrl(0x30);
76 76
77 /* Find out if we are running on TP0 or TP1 */ 77 /* NBK and weak order flags */
78 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); 78 set_c0_brcm_config_0(0x30000);
79 79
80 /* 80 /* Find out if we are running on TP0 or TP1 */
81 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread 81 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
82 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output 82
83 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output 83 /*
84 */ 84 * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other
85 if (boot_cpu == 0) 85 * thread
86 cpu_hw_intr = 0x02; 86 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
87 else 87 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
88 cpu_hw_intr = 0x1d; 88 */
89 89 if (boot_cpu == 0)
90 change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15)); 90 cpu_hw_intr = 0x02;
91 91 else
92 /* single core, 2 threads (2 pipelines) */ 92 cpu_hw_intr = 0x1d;
93 max_cpus = 2; 93
94#elif defined(CONFIG_CPU_BMIPS5000) 94 change_c0_brcm_cmt_intr(0xf8018000,
95 /* enable raceless SW interrupts */ 95 (cpu_hw_intr << 27) | (0x03 << 15));
96 set_c0_brcm_config(0x03 << 22); 96
97 97 /* single core, 2 threads (2 pipelines) */
98 /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ 98 max_cpus = 2;
99 change_c0_brcm_mode(0x1f << 27, 0x02 << 27); 99
100 100 break;
101 /* N cores, 2 threads per core */ 101 case CPU_BMIPS5000:
102 max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; 102 /* enable raceless SW interrupts */
103 set_c0_brcm_config(0x03 << 22);
104
105 /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
106 change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
107
108 /* N cores, 2 threads per core */
109 max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
110
111 /* clear any pending SW interrupts */
112 for (i = 0; i < max_cpus; i++) {
113 write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
114 write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
115 }
103 116
104 /* clear any pending SW interrupts */ 117 break;
105 for (i = 0; i < max_cpus; i++) { 118 default:
106 write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); 119 max_cpus = 1;
107 write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
108 } 120 }
109#endif
110 121
111 if (!bmips_smp_enabled) 122 if (!bmips_smp_enabled)
112 max_cpus = 1; 123 max_cpus = 1;
@@ -134,6 +145,20 @@ static void __init bmips_smp_setup(void)
134 */ 145 */
135static void bmips_prepare_cpus(unsigned int max_cpus) 146static void bmips_prepare_cpus(unsigned int max_cpus)
136{ 147{
148 irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id);
149
150 switch (current_cpu_type()) {
151 case CPU_BMIPS4350:
152 case CPU_BMIPS4380:
153 bmips_ipi_interrupt = bmips43xx_ipi_interrupt;
154 break;
155 case CPU_BMIPS5000:
156 bmips_ipi_interrupt = bmips5000_ipi_interrupt;
157 break;
158 default:
159 return;
160 }
161
137 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU, 162 if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, IRQF_PERCPU,
138 "smp_ipi0", NULL)) 163 "smp_ipi0", NULL))
139 panic("Can't request IPI0 interrupt"); 164 panic("Can't request IPI0 interrupt");
@@ -168,26 +193,39 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
168 193
169 pr_info("SMP: Booting CPU%d...\n", cpu); 194 pr_info("SMP: Booting CPU%d...\n", cpu);
170 195
171 if (cpumask_test_cpu(cpu, &bmips_booted_mask)) 196 if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
172 bmips_send_ipi_single(cpu, 0); 197 switch (current_cpu_type()) {
198 case CPU_BMIPS4350:
199 case CPU_BMIPS4380:
200 bmips43xx_send_ipi_single(cpu, 0);
201 break;
202 case CPU_BMIPS5000:
203 bmips5000_send_ipi_single(cpu, 0);
204 break;
205 }
206 }
173 else { 207 else {
174#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 208 switch (current_cpu_type()) {
175 /* Reset slave TP1 if booting from TP0 */ 209 case CPU_BMIPS4350:
176 if (cpu_logical_map(cpu) == 1) 210 case CPU_BMIPS4380:
177 set_c0_brcm_cmt_ctrl(0x01); 211 /* Reset slave TP1 if booting from TP0 */
178#elif defined(CONFIG_CPU_BMIPS5000) 212 if (cpu_logical_map(cpu) == 1)
179 if (cpu & 0x01) 213 set_c0_brcm_cmt_ctrl(0x01);
180 write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); 214 break;
181 else { 215 case CPU_BMIPS5000:
182 /* 216 if (cpu & 0x01)
183 * core N thread 0 was already booted; just 217 write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
184 * pulse the NMI line 218 else {
185 */ 219 /*
186 bmips_write_zscm_reg(0x210, 0xc0000000); 220 * core N thread 0 was already booted; just
187 udelay(10); 221 * pulse the NMI line
188 bmips_write_zscm_reg(0x210, 0x00); 222 */
223 bmips_write_zscm_reg(0x210, 0xc0000000);
224 udelay(10);
225 bmips_write_zscm_reg(0x210, 0x00);
226 }
227 break;
189 } 228 }
190#endif
191 cpumask_set_cpu(cpu, &bmips_booted_mask); 229 cpumask_set_cpu(cpu, &bmips_booted_mask);
192 } 230 }
193} 231}
@@ -199,26 +237,32 @@ static void bmips_init_secondary(void)
199{ 237{
200 /* move NMI vector to kseg0, in case XKS01 is enabled */ 238 /* move NMI vector to kseg0, in case XKS01 is enabled */
201 239
202#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 240 void __iomem *cbr;
203 void __iomem *cbr = BMIPS_GET_CBR();
204 unsigned long old_vec; 241 unsigned long old_vec;
205 unsigned long relo_vector; 242 unsigned long relo_vector;
206 int boot_cpu; 243 int boot_cpu;
207 244
208 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); 245 switch (current_cpu_type()) {
209 relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 : 246 case CPU_BMIPS4350:
210 BMIPS_RELO_VECTOR_CONTROL_1; 247 case CPU_BMIPS4380:
248 cbr = BMIPS_GET_CBR();
211 249
212 old_vec = __raw_readl(cbr + relo_vector); 250 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
213 __raw_writel(old_vec & ~0x20000000, cbr + relo_vector); 251 relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
252 BMIPS_RELO_VECTOR_CONTROL_1;
214 253
215 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); 254 old_vec = __raw_readl(cbr + relo_vector);
216#elif defined(CONFIG_CPU_BMIPS5000) 255 __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
217 write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
218 (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
219 256
220 write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); 257 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
221#endif 258 break;
259 case CPU_BMIPS5000:
260 write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
261 (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
262
263 write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
264 break;
265 }
222} 266}
223 267
224/* 268/*
@@ -243,8 +287,6 @@ static void bmips_cpus_done(void)
243{ 287{
244} 288}
245 289
246#if defined(CONFIG_CPU_BMIPS5000)
247
248/* 290/*
249 * BMIPS5000 raceless IPIs 291 * BMIPS5000 raceless IPIs
250 * 292 *
@@ -253,12 +295,12 @@ static void bmips_cpus_done(void)
253 * IPI1 is used for SMP_CALL_FUNCTION 295 * IPI1 is used for SMP_CALL_FUNCTION
254 */ 296 */
255 297
256static void bmips_send_ipi_single(int cpu, unsigned int action) 298static void bmips5000_send_ipi_single(int cpu, unsigned int action)
257{ 299{
258 write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); 300 write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
259} 301}
260 302
261static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) 303static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
262{ 304{
263 int action = irq - IPI0_IRQ; 305 int action = irq - IPI0_IRQ;
264 306
@@ -272,7 +314,14 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
272 return IRQ_HANDLED; 314 return IRQ_HANDLED;
273} 315}
274 316
275#else 317static void bmips5000_send_ipi_mask(const struct cpumask *mask,
318 unsigned int action)
319{
320 unsigned int i;
321
322 for_each_cpu(i, mask)
323 bmips5000_send_ipi_single(i, action);
324}
276 325
277/* 326/*
278 * BMIPS43xx racey IPIs 327 * BMIPS43xx racey IPIs
@@ -287,7 +336,7 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
287static DEFINE_SPINLOCK(ipi_lock); 336static DEFINE_SPINLOCK(ipi_lock);
288static DEFINE_PER_CPU(int, ipi_action_mask); 337static DEFINE_PER_CPU(int, ipi_action_mask);
289 338
290static void bmips_send_ipi_single(int cpu, unsigned int action) 339static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
291{ 340{
292 unsigned long flags; 341 unsigned long flags;
293 342
@@ -298,7 +347,7 @@ static void bmips_send_ipi_single(int cpu, unsigned int action)
298 spin_unlock_irqrestore(&ipi_lock, flags); 347 spin_unlock_irqrestore(&ipi_lock, flags);
299} 348}
300 349
301static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id) 350static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
302{ 351{
303 unsigned long flags; 352 unsigned long flags;
304 int action, cpu = irq - IPI0_IRQ; 353 int action, cpu = irq - IPI0_IRQ;
@@ -317,15 +366,13 @@ static irqreturn_t bmips_ipi_interrupt(int irq, void *dev_id)
317 return IRQ_HANDLED; 366 return IRQ_HANDLED;
318} 367}
319 368
320#endif /* BMIPS type */ 369static void bmips43xx_send_ipi_mask(const struct cpumask *mask,
321
322static void bmips_send_ipi_mask(const struct cpumask *mask,
323 unsigned int action) 370 unsigned int action)
324{ 371{
325 unsigned int i; 372 unsigned int i;
326 373
327 for_each_cpu(i, mask) 374 for_each_cpu(i, mask)
328 bmips_send_ipi_single(i, action); 375 bmips43xx_send_ipi_single(i, action);
329} 376}
330 377
331#ifdef CONFIG_HOTPLUG_CPU 378#ifdef CONFIG_HOTPLUG_CPU
@@ -381,15 +428,30 @@ void __ref play_dead(void)
381 428
382#endif /* CONFIG_HOTPLUG_CPU */ 429#endif /* CONFIG_HOTPLUG_CPU */
383 430
384struct plat_smp_ops bmips_smp_ops = { 431struct plat_smp_ops bmips43xx_smp_ops = {
432 .smp_setup = bmips_smp_setup,
433 .prepare_cpus = bmips_prepare_cpus,
434 .boot_secondary = bmips_boot_secondary,
435 .smp_finish = bmips_smp_finish,
436 .init_secondary = bmips_init_secondary,
437 .cpus_done = bmips_cpus_done,
438 .send_ipi_single = bmips43xx_send_ipi_single,
439 .send_ipi_mask = bmips43xx_send_ipi_mask,
440#ifdef CONFIG_HOTPLUG_CPU
441 .cpu_disable = bmips_cpu_disable,
442 .cpu_die = bmips_cpu_die,
443#endif
444};
445
446struct plat_smp_ops bmips5000_smp_ops = {
385 .smp_setup = bmips_smp_setup, 447 .smp_setup = bmips_smp_setup,
386 .prepare_cpus = bmips_prepare_cpus, 448 .prepare_cpus = bmips_prepare_cpus,
387 .boot_secondary = bmips_boot_secondary, 449 .boot_secondary = bmips_boot_secondary,
388 .smp_finish = bmips_smp_finish, 450 .smp_finish = bmips_smp_finish,
389 .init_secondary = bmips_init_secondary, 451 .init_secondary = bmips_init_secondary,
390 .cpus_done = bmips_cpus_done, 452 .cpus_done = bmips_cpus_done,
391 .send_ipi_single = bmips_send_ipi_single, 453 .send_ipi_single = bmips5000_send_ipi_single,
392 .send_ipi_mask = bmips_send_ipi_mask, 454 .send_ipi_mask = bmips5000_send_ipi_mask,
393#ifdef CONFIG_HOTPLUG_CPU 455#ifdef CONFIG_HOTPLUG_CPU
394 .cpu_disable = bmips_cpu_disable, 456 .cpu_disable = bmips_cpu_disable,
395 .cpu_die = bmips_cpu_die, 457 .cpu_die = bmips_cpu_die,
@@ -427,43 +489,47 @@ void bmips_ebase_setup(void)
427 489
428 BUG_ON(ebase != CKSEG0); 490 BUG_ON(ebase != CKSEG0);
429 491
430#if defined(CONFIG_CPU_BMIPS4350) 492 switch (current_cpu_type()) {
431 /* 493 case CPU_BMIPS4350:
432 * BMIPS4350 cannot relocate the normal vectors, but it 494 /*
433 * can relocate the BEV=1 vectors. So CPU1 starts up at 495 * BMIPS4350 cannot relocate the normal vectors, but it
434 * the relocated BEV=1, IV=0 general exception vector @ 496 * can relocate the BEV=1 vectors. So CPU1 starts up at
435 * 0xa000_0380. 497 * the relocated BEV=1, IV=0 general exception vector @
436 * 498 * 0xa000_0380.
437 * set_uncached_handler() is used here because: 499 *
438 * - CPU1 will run this from uncached space 500 * set_uncached_handler() is used here because:
439 * - None of the cacheflush functions are set up yet 501 * - CPU1 will run this from uncached space
440 */ 502 * - None of the cacheflush functions are set up yet
441 set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, 503 */
442 &bmips_smp_int_vec, 0x80); 504 set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
443 __sync(); 505 &bmips_smp_int_vec, 0x80);
444 return; 506 __sync();
445#elif defined(CONFIG_CPU_BMIPS4380) 507 return;
446 /* 508 case CPU_BMIPS4380:
447 * 0x8000_0000: reset/NMI (initially in kseg1) 509 /*
448 * 0x8000_0400: normal vectors 510 * 0x8000_0000: reset/NMI (initially in kseg1)
449 */ 511 * 0x8000_0400: normal vectors
450 new_ebase = 0x80000400; 512 */
451 cbr = BMIPS_GET_CBR(); 513 new_ebase = 0x80000400;
452 __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0); 514 cbr = BMIPS_GET_CBR();
453 __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1); 515 __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
454#elif defined(CONFIG_CPU_BMIPS5000) 516 __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
455 /* 517 break;
456 * 0x8000_0000: reset/NMI (initially in kseg1) 518 case CPU_BMIPS5000:
457 * 0x8000_1000: normal vectors 519 /*
458 */ 520 * 0x8000_0000: reset/NMI (initially in kseg1)
459 new_ebase = 0x80001000; 521 * 0x8000_1000: normal vectors
460 write_c0_brcm_bootvec(0xa0088008); 522 */
461 write_c0_ebase(new_ebase); 523 new_ebase = 0x80001000;
462 if (max_cpus > 2) 524 write_c0_brcm_bootvec(0xa0088008);
463 bmips_write_zscm_reg(0xa0, 0xa008a008); 525 write_c0_ebase(new_ebase);
464#else 526 if (max_cpus > 2)
465 return; 527 bmips_write_zscm_reg(0xa0, 0xa008a008);
466#endif 528 break;
529 default:
530 return;
531 }
532
467 board_nmi_handler_setup = &bmips_nmi_handler_setup; 533 board_nmi_handler_setup = &bmips_nmi_handler_setup;
468 ebase = new_ebase; 534 ebase = new_ebase;
469} 535}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 5969f1e9b62a..1b925d8a610c 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -199,11 +199,14 @@ void __init cmp_prepare_cpus(unsigned int max_cpus)
199 pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", 199 pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
200 smp_processor_id(), __func__, max_cpus); 200 smp_processor_id(), __func__, max_cpus);
201 201
202#ifdef CONFIG_MIPS_MT
202 /* 203 /*
203 * FIXME: some of these options are per-system, some per-core and 204 * FIXME: some of these options are per-system, some per-core and
204 * some per-cpu 205 * some per-cpu
205 */ 206 */
206 mips_mt_set_cpuoptions(); 207 mips_mt_set_cpuoptions();
208#endif
209
207} 210}
208 211
209struct plat_smp_ops cmp_smp_ops = { 212struct plat_smp_ops cmp_smp_ops = {
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 57a3f7a2b370..0fb8cefc9114 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -71,6 +71,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
71 71
72 /* Record this as available CPU */ 72 /* Record this as available CPU */
73 set_cpu_possible(tc, true); 73 set_cpu_possible(tc, true);
74 set_cpu_present(tc, true);
74 __cpu_number_map[tc] = ++ncpu; 75 __cpu_number_map[tc] = ++ncpu;
75 __cpu_logical_map[ncpu] = tc; 76 __cpu_logical_map[ncpu] = tc;
76 } 77 }
@@ -112,12 +113,39 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
112 write_tc_c0_tchalt(TCHALT_H); 113 write_tc_c0_tchalt(TCHALT_H);
113} 114}
114 115
116#ifdef CONFIG_IRQ_GIC
117static void mp_send_ipi_single(int cpu, unsigned int action)
118{
119 unsigned long flags;
120
121 local_irq_save(flags);
122
123 switch (action) {
124 case SMP_CALL_FUNCTION:
125 gic_send_ipi(plat_ipi_call_int_xlate(cpu));
126 break;
127
128 case SMP_RESCHEDULE_YOURSELF:
129 gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
130 break;
131 }
132
133 local_irq_restore(flags);
134}
135#endif
136
115static void vsmp_send_ipi_single(int cpu, unsigned int action) 137static void vsmp_send_ipi_single(int cpu, unsigned int action)
116{ 138{
117 int i; 139 int i;
118 unsigned long flags; 140 unsigned long flags;
119 int vpflags; 141 int vpflags;
120 142
143#ifdef CONFIG_IRQ_GIC
144 if (gic_present) {
145 mp_send_ipi_single(cpu, action);
146 return;
147 }
148#endif
121 local_irq_save(flags); 149 local_irq_save(flags);
122 150
123 vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ 151 vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 93f86817f20a..b242e2c10ea0 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -8,7 +8,6 @@
8 * 8 *
9 * Copyright (C) 2007, 2008 MIPS Technologies, Inc. 9 * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
10 */ 10 */
11#include <linux/init.h>
12#include <linux/kernel.h> 11#include <linux/kernel.h>
13#include <linux/ptrace.h> 12#include <linux/ptrace.h>
14#include <linux/stddef.h> 13#include <linux/stddef.h>
@@ -206,6 +205,8 @@ void spram_config(void)
206 case CPU_34K: 205 case CPU_34K:
207 case CPU_74K: 206 case CPU_74K:
208 case CPU_1004K: 207 case CPU_1004K:
208 case CPU_INTERAPTIV:
209 case CPU_PROAPTIV:
209 config0 = read_c0_config(); 210 config0 = read_c0_config();
210 /* FIXME: addresses are Malta specific */ 211 /* FIXME: addresses are Malta specific */
211 if (config0 & (1<<24)) { 212 if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 84536bf4a154..c24ad5f4b324 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -11,7 +11,6 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/irqflags.h> 14#include <linux/irqflags.h>
16#include <linux/cpumask.h> 15#include <linux/cpumask.h>
17 16
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index f9c8746be8d6..e0b499694d18 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -78,6 +78,7 @@ extern asmlinkage void handle_cpu(void);
78extern asmlinkage void handle_ov(void); 78extern asmlinkage void handle_ov(void);
79extern asmlinkage void handle_tr(void); 79extern asmlinkage void handle_tr(void);
80extern asmlinkage void handle_fpe(void); 80extern asmlinkage void handle_fpe(void);
81extern asmlinkage void handle_ftlb(void);
81extern asmlinkage void handle_mdmx(void); 82extern asmlinkage void handle_mdmx(void);
82extern asmlinkage void handle_watch(void); 83extern asmlinkage void handle_watch(void);
83extern asmlinkage void handle_mt(void); 84extern asmlinkage void handle_mt(void);
@@ -1080,7 +1081,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1080 unsigned long old_epc, old31; 1081 unsigned long old_epc, old31;
1081 unsigned int opcode; 1082 unsigned int opcode;
1082 unsigned int cpid; 1083 unsigned int cpid;
1083 int status; 1084 int status, err;
1084 unsigned long __maybe_unused flags; 1085 unsigned long __maybe_unused flags;
1085 1086
1086 prev_state = exception_enter(); 1087 prev_state = exception_enter();
@@ -1153,19 +1154,19 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1153 1154
1154 case 1: 1155 case 1:
1155 if (used_math()) /* Using the FPU again. */ 1156 if (used_math()) /* Using the FPU again. */
1156 own_fpu(1); 1157 err = own_fpu(1);
1157 else { /* First time FPU user. */ 1158 else { /* First time FPU user. */
1158 init_fpu(); 1159 err = init_fpu();
1159 set_used_math(); 1160 set_used_math();
1160 } 1161 }
1161 1162
1162 if (!raw_cpu_has_fpu) { 1163 if (!raw_cpu_has_fpu || err) {
1163 int sig; 1164 int sig;
1164 void __user *fault_addr = NULL; 1165 void __user *fault_addr = NULL;
1165 sig = fpu_emulator_cop1Handler(regs, 1166 sig = fpu_emulator_cop1Handler(regs,
1166 &current->thread.fpu, 1167 &current->thread.fpu,
1167 0, &fault_addr); 1168 0, &fault_addr);
1168 if (!process_fpemu_return(sig, fault_addr)) 1169 if (!process_fpemu_return(sig, fault_addr) && !err)
1169 mt_ase_fp_affinity(); 1170 mt_ase_fp_affinity();
1170 } 1171 }
1171 1172
@@ -1336,6 +1337,8 @@ static inline void parity_protection_init(void)
1336 case CPU_34K: 1337 case CPU_34K:
1337 case CPU_74K: 1338 case CPU_74K:
1338 case CPU_1004K: 1339 case CPU_1004K:
1340 case CPU_INTERAPTIV:
1341 case CPU_PROAPTIV:
1339 { 1342 {
1340#define ERRCTL_PE 0x80000000 1343#define ERRCTL_PE 0x80000000
1341#define ERRCTL_L2P 0x00800000 1344#define ERRCTL_L2P 0x00800000
@@ -1425,14 +1428,27 @@ asmlinkage void cache_parity_error(void)
1425 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1428 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1426 reg_val & (1<<30) ? "secondary" : "primary", 1429 reg_val & (1<<30) ? "secondary" : "primary",
1427 reg_val & (1<<31) ? "data" : "insn"); 1430 reg_val & (1<<31) ? "data" : "insn");
1428 printk("Error bits: %s%s%s%s%s%s%s\n", 1431 if (cpu_has_mips_r2 &&
1429 reg_val & (1<<29) ? "ED " : "", 1432 ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
1430 reg_val & (1<<28) ? "ET " : "", 1433 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1431 reg_val & (1<<26) ? "EE " : "", 1434 reg_val & (1<<29) ? "ED " : "",
1432 reg_val & (1<<25) ? "EB " : "", 1435 reg_val & (1<<28) ? "ET " : "",
1433 reg_val & (1<<24) ? "EI " : "", 1436 reg_val & (1<<27) ? "ES " : "",
1434 reg_val & (1<<23) ? "E1 " : "", 1437 reg_val & (1<<26) ? "EE " : "",
1435 reg_val & (1<<22) ? "E0 " : ""); 1438 reg_val & (1<<25) ? "EB " : "",
1439 reg_val & (1<<24) ? "EI " : "",
1440 reg_val & (1<<23) ? "E1 " : "",
1441 reg_val & (1<<22) ? "E0 " : "");
1442 } else {
1443 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1444 reg_val & (1<<29) ? "ED " : "",
1445 reg_val & (1<<28) ? "ET " : "",
1446 reg_val & (1<<26) ? "EE " : "",
1447 reg_val & (1<<25) ? "EB " : "",
1448 reg_val & (1<<24) ? "EI " : "",
1449 reg_val & (1<<23) ? "E1 " : "",
1450 reg_val & (1<<22) ? "E0 " : "");
1451 }
1436 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); 1452 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1437 1453
1438#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) 1454#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
@@ -1446,6 +1462,34 @@ asmlinkage void cache_parity_error(void)
1446 panic("Can't handle the cache error!"); 1462 panic("Can't handle the cache error!");
1447} 1463}
1448 1464
1465asmlinkage void do_ftlb(void)
1466{
1467 const int field = 2 * sizeof(unsigned long);
1468 unsigned int reg_val;
1469
1470 /* For the moment, report the problem and hang. */
1471 if (cpu_has_mips_r2 &&
1472 ((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
1473 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1474 read_c0_ecc());
1475 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1476 reg_val = read_c0_cacheerr();
1477 pr_err("c0_cacheerr == %08x\n", reg_val);
1478
1479 if ((reg_val & 0xc0000000) == 0xc0000000) {
1480 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1481 } else {
1482 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1483 reg_val & (1<<30) ? "secondary" : "primary",
1484 reg_val & (1<<31) ? "data" : "insn");
1485 }
1486 } else {
1487 pr_err("FTLB error exception\n");
1488 }
1489 /* Just print the cacheerr bits for now */
1490 cache_parity_error();
1491}
1492
1449/* 1493/*
1450 * SDBBP EJTAG debug exception handler. 1494 * SDBBP EJTAG debug exception handler.
1451 * We skip the instruction and return to the next instruction. 1495 * We skip the instruction and return to the next instruction.
@@ -1995,6 +2039,7 @@ void __init trap_init(void)
1995 if (cpu_has_fpu && !cpu_has_nofpuex) 2039 if (cpu_has_fpu && !cpu_has_nofpuex)
1996 set_except_vector(15, handle_fpe); 2040 set_except_vector(15, handle_fpe);
1997 2041
2042 set_except_vector(16, handle_ftlb);
1998 set_except_vector(22, handle_mdmx); 2043 set_except_vector(22, handle_mdmx);
1999 2044
2000 if (cpu_has_mcheck) 2045 if (cpu_has_mcheck)
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
new file mode 100644
index 000000000000..9268ebc0f61e
--- /dev/null
+++ b/arch/mips/kernel/vpe-cmp.c
@@ -0,0 +1,180 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
7 * Copyright (C) 2013 Imagination Technologies Ltd.
8 */
9#include <linux/kernel.h>
10#include <linux/device.h>
11#include <linux/fs.h>
12#include <linux/slab.h>
13#include <linux/export.h>
14
15#include <asm/vpe.h>
16
17static int major;
18
19void cleanup_tc(struct tc *tc)
20{
21
22}
23
24static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
25 const char *buf, size_t len)
26{
27 struct vpe *vpe = get_vpe(aprp_cpu_index());
28 struct vpe_notifications *notifier;
29
30 list_for_each_entry(notifier, &vpe->notify, list)
31 notifier->stop(aprp_cpu_index());
32
33 release_progmem(vpe->load_addr);
34 vpe->state = VPE_STATE_UNUSED;
35
36 return len;
37}
38static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
39
40static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
41 char *buf)
42{
43 struct vpe *vpe = get_vpe(aprp_cpu_index());
44
45 return sprintf(buf, "%d\n", vpe->ntcs);
46}
47
48static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
49 const char *buf, size_t len)
50{
51 struct vpe *vpe = get_vpe(aprp_cpu_index());
52 unsigned long new;
53 int ret;
54
55 ret = kstrtoul(buf, 0, &new);
56 if (ret < 0)
57 return ret;
58
59 /* APRP can only reserve one TC in a VPE and no more. */
60 if (new != 1)
61 return -EINVAL;
62
63 vpe->ntcs = new;
64
65 return len;
66}
67static DEVICE_ATTR_RW(ntcs);
68
69static struct attribute *vpe_attrs[] = {
70 &dev_attr_kill.attr,
71 &dev_attr_ntcs.attr,
72 NULL,
73};
74ATTRIBUTE_GROUPS(vpe);
75
76static void vpe_device_release(struct device *cd)
77{
78 kfree(cd);
79}
80
81static struct class vpe_class = {
82 .name = "vpe",
83 .owner = THIS_MODULE,
84 .dev_release = vpe_device_release,
85 .dev_groups = vpe_groups,
86};
87
88static struct device vpe_device;
89
90int __init vpe_module_init(void)
91{
92 struct vpe *v = NULL;
93 struct tc *t;
94 int err;
95
96 if (!cpu_has_mipsmt) {
97 pr_warn("VPE loader: not a MIPS MT capable processor\n");
98 return -ENODEV;
99 }
100
101 if (num_possible_cpus() - aprp_cpu_index() < 1) {
102 pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
103 "Pass maxcpus=<n> argument as kernel argument\n");
104 return -ENODEV;
105 }
106
107 major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
108 if (major < 0) {
109 pr_warn("VPE loader: unable to register character device\n");
110 return major;
111 }
112
113 err = class_register(&vpe_class);
114 if (err) {
115 pr_err("vpe_class registration failed\n");
116 goto out_chrdev;
117 }
118
119 device_initialize(&vpe_device);
120 vpe_device.class = &vpe_class,
121 vpe_device.parent = NULL,
122 dev_set_name(&vpe_device, "vpe_sp");
123 vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
124 err = device_add(&vpe_device);
125 if (err) {
126 pr_err("Adding vpe_device failed\n");
127 goto out_class;
128 }
129
130 t = alloc_tc(aprp_cpu_index());
131 if (!t) {
132 pr_warn("VPE: unable to allocate TC\n");
133 err = -ENOMEM;
134 goto out_dev;
135 }
136
137 /* VPE */
138 v = alloc_vpe(aprp_cpu_index());
139 if (v == NULL) {
140 pr_warn("VPE: unable to allocate VPE\n");
141 kfree(t);
142 err = -ENOMEM;
143 goto out_dev;
144 }
145
146 v->ntcs = 1;
147
148 /* add the tc to the list of this vpe's tc's. */
149 list_add(&t->tc, &v->tc);
150
151 /* TC */
152 t->pvpe = v; /* set the parent vpe */
153
154 return 0;
155
156out_dev:
157 device_del(&vpe_device);
158
159out_class:
160 class_unregister(&vpe_class);
161
162out_chrdev:
163 unregister_chrdev(major, VPE_MODULE_NAME);
164
165 return err;
166}
167
168void __exit vpe_module_exit(void)
169{
170 struct vpe *v, *n;
171
172 device_del(&vpe_device);
173 class_unregister(&vpe_class);
174 unregister_chrdev(major, VPE_MODULE_NAME);
175
176 /* No locking needed here */
177 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list)
178 if (v->state != VPE_STATE_UNUSED)
179 release_vpe(v);
180}
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
new file mode 100644
index 000000000000..949ae0e17018
--- /dev/null
+++ b/arch/mips/kernel/vpe-mt.c
@@ -0,0 +1,523 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
7 * Copyright (C) 2013 Imagination Technologies Ltd.
8 */
9#include <linux/kernel.h>
10#include <linux/device.h>
11#include <linux/fs.h>
12#include <linux/slab.h>
13#include <linux/export.h>
14
15#include <asm/mipsregs.h>
16#include <asm/mipsmtregs.h>
17#include <asm/mips_mt.h>
18#include <asm/vpe.h>
19
20static int major;
21
22/* The number of TCs and VPEs physically available on the core */
23static int hw_tcs, hw_vpes;
24
25/* We are prepared so configure and start the VPE... */
26int vpe_run(struct vpe *v)
27{
28 unsigned long flags, val, dmt_flag;
29 struct vpe_notifications *notifier;
30 unsigned int vpeflags;
31 struct tc *t;
32
33 /* check we are the Master VPE */
34 local_irq_save(flags);
35 val = read_c0_vpeconf0();
36 if (!(val & VPECONF0_MVP)) {
37 pr_warn("VPE loader: only Master VPE's are able to config MT\n");
38 local_irq_restore(flags);
39
40 return -1;
41 }
42
43 dmt_flag = dmt();
44 vpeflags = dvpe();
45
46 if (list_empty(&v->tc)) {
47 evpe(vpeflags);
48 emt(dmt_flag);
49 local_irq_restore(flags);
50
51 pr_warn("VPE loader: No TC's associated with VPE %d\n",
52 v->minor);
53
54 return -ENOEXEC;
55 }
56
57 t = list_first_entry(&v->tc, struct tc, tc);
58
59 /* Put MVPE's into 'configuration state' */
60 set_c0_mvpcontrol(MVPCONTROL_VPC);
61
62 settc(t->index);
63
64 /* should check it is halted, and not activated */
65 if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
66 !(read_tc_c0_tchalt() & TCHALT_H)) {
67 evpe(vpeflags);
68 emt(dmt_flag);
69 local_irq_restore(flags);
70
71 pr_warn("VPE loader: TC %d is already active!\n",
72 t->index);
73
74 return -ENOEXEC;
75 }
76
77 /*
78 * Write the address we want it to start running from in the TCPC
79 * register.
80 */
81 write_tc_c0_tcrestart((unsigned long)v->__start);
82 write_tc_c0_tccontext((unsigned long)0);
83
84 /*
85 * Mark the TC as activated, not interrupt exempt and not dynamically
86 * allocatable
87 */
88 val = read_tc_c0_tcstatus();
89 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
90 write_tc_c0_tcstatus(val);
91
92 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
93
94 /*
95 * The sde-kit passes 'memsize' to __start in $a3, so set something
96 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
97 * DFLT_HEAP_SIZE when you compile your program
98 */
99 mttgpr(6, v->ntcs);
100 mttgpr(7, physical_memsize);
101
102 /* set up VPE1 */
103 /*
104 * bind the TC to VPE 1 as late as possible so we only have the final
105 * VPE registers to set up, and so an EJTAG probe can trigger on it
106 */
107 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
108
109 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
110
111 back_to_back_c0_hazard();
112
113 /* Set up the XTC bit in vpeconf0 to point at our tc */
114 write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
115 | (t->index << VPECONF0_XTC_SHIFT));
116
117 back_to_back_c0_hazard();
118
119 /* enable this VPE */
120 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
121
122 /* clear out any left overs from a previous program */
123 write_vpe_c0_status(0);
124 write_vpe_c0_cause(0);
125
126 /* take system out of configuration state */
127 clear_c0_mvpcontrol(MVPCONTROL_VPC);
128
129 /*
130 * SMTC/SMVP kernels manage VPE enable independently,
131 * but uniprocessor kernels need to turn it on, even
132 * if that wasn't the pre-dvpe() state.
133 */
134#ifdef CONFIG_SMP
135 evpe(vpeflags);
136#else
137 evpe(EVPE_ENABLE);
138#endif
139 emt(dmt_flag);
140 local_irq_restore(flags);
141
142 list_for_each_entry(notifier, &v->notify, list)
143 notifier->start(VPE_MODULE_MINOR);
144
145 return 0;
146}
147
148void cleanup_tc(struct tc *tc)
149{
150 unsigned long flags;
151 unsigned int mtflags, vpflags;
152 int tmp;
153
154 local_irq_save(flags);
155 mtflags = dmt();
156 vpflags = dvpe();
157 /* Put MVPE's into 'configuration state' */
158 set_c0_mvpcontrol(MVPCONTROL_VPC);
159
160 settc(tc->index);
161 tmp = read_tc_c0_tcstatus();
162
163 /* mark not allocated and not dynamically allocatable */
164 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
165 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
166 write_tc_c0_tcstatus(tmp);
167
168 write_tc_c0_tchalt(TCHALT_H);
169 mips_ihb();
170
171 clear_c0_mvpcontrol(MVPCONTROL_VPC);
172 evpe(vpflags);
173 emt(mtflags);
174 local_irq_restore(flags);
175}
176
177/* module wrapper entry points */
178/* give me a vpe */
179void *vpe_alloc(void)
180{
181 int i;
182 struct vpe *v;
183
184 /* find a vpe */
185 for (i = 1; i < MAX_VPES; i++) {
186 v = get_vpe(i);
187 if (v != NULL) {
188 v->state = VPE_STATE_INUSE;
189 return v;
190 }
191 }
192 return NULL;
193}
194EXPORT_SYMBOL(vpe_alloc);
195
196/* start running from here */
197int vpe_start(void *vpe, unsigned long start)
198{
199 struct vpe *v = vpe;
200
201 v->__start = start;
202 return vpe_run(v);
203}
204EXPORT_SYMBOL(vpe_start);
205
206/* halt it for now */
207int vpe_stop(void *vpe)
208{
209 struct vpe *v = vpe;
210 struct tc *t;
211 unsigned int evpe_flags;
212
213 evpe_flags = dvpe();
214
215 t = list_entry(v->tc.next, struct tc, tc);
216 if (t != NULL) {
217 settc(t->index);
218 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
219 }
220
221 evpe(evpe_flags);
222
223 return 0;
224}
225EXPORT_SYMBOL(vpe_stop);
226
227/* I've done with it thank you */
228int vpe_free(void *vpe)
229{
230 struct vpe *v = vpe;
231 struct tc *t;
232 unsigned int evpe_flags;
233
234 t = list_entry(v->tc.next, struct tc, tc);
235 if (t == NULL)
236 return -ENOEXEC;
237
238 evpe_flags = dvpe();
239
240 /* Put MVPE's into 'configuration state' */
241 set_c0_mvpcontrol(MVPCONTROL_VPC);
242
243 settc(t->index);
244 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
245
246 /* halt the TC */
247 write_tc_c0_tchalt(TCHALT_H);
248 mips_ihb();
249
250 /* mark the TC unallocated */
251 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
252
253 v->state = VPE_STATE_UNUSED;
254
255 clear_c0_mvpcontrol(MVPCONTROL_VPC);
256 evpe(evpe_flags);
257
258 return 0;
259}
260EXPORT_SYMBOL(vpe_free);
261
262static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
263 const char *buf, size_t len)
264{
265 struct vpe *vpe = get_vpe(aprp_cpu_index());
266 struct vpe_notifications *notifier;
267
268 list_for_each_entry(notifier, &vpe->notify, list)
269 notifier->stop(aprp_cpu_index());
270
271 release_progmem(vpe->load_addr);
272 cleanup_tc(get_tc(aprp_cpu_index()));
273 vpe_stop(vpe);
274 vpe_free(vpe);
275
276 return len;
277}
278static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
279
280static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
281 char *buf)
282{
283 struct vpe *vpe = get_vpe(aprp_cpu_index());
284
285 return sprintf(buf, "%d\n", vpe->ntcs);
286}
287
288static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
289 const char *buf, size_t len)
290{
291 struct vpe *vpe = get_vpe(aprp_cpu_index());
292 unsigned long new;
293 int ret;
294
295 ret = kstrtoul(buf, 0, &new);
296 if (ret < 0)
297 return ret;
298
299 if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
300 return -EINVAL;
301
302 vpe->ntcs = new;
303
304 return len;
305}
306static DEVICE_ATTR_RW(ntcs);
307
308static struct attribute *vpe_attrs[] = {
309 &dev_attr_kill.attr,
310 &dev_attr_ntcs.attr,
311 NULL,
312};
313ATTRIBUTE_GROUPS(vpe);
314
315static void vpe_device_release(struct device *cd)
316{
317 kfree(cd);
318}
319
320static struct class vpe_class = {
321 .name = "vpe",
322 .owner = THIS_MODULE,
323 .dev_release = vpe_device_release,
324 .dev_groups = vpe_groups,
325};
326
327static struct device vpe_device;
328
329int __init vpe_module_init(void)
330{
331 unsigned int mtflags, vpflags;
332 unsigned long flags, val;
333 struct vpe *v = NULL;
334 struct tc *t;
335 int tc, err;
336
337 if (!cpu_has_mipsmt) {
338 pr_warn("VPE loader: not a MIPS MT capable processor\n");
339 return -ENODEV;
340 }
341
342 if (vpelimit == 0) {
343 pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
344 "Pass maxvpes=<n> argument as kernel argument\n");
345
346 return -ENODEV;
347 }
348
349 if (aprp_cpu_index() == 0) {
350 pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
351 "Pass maxtcs=<n> argument as kernel argument\n");
352
353 return -ENODEV;
354 }
355
356 major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
357 if (major < 0) {
358 pr_warn("VPE loader: unable to register character device\n");
359 return major;
360 }
361
362 err = class_register(&vpe_class);
363 if (err) {
364 pr_err("vpe_class registration failed\n");
365 goto out_chrdev;
366 }
367
368 device_initialize(&vpe_device);
369 vpe_device.class = &vpe_class,
370 vpe_device.parent = NULL,
371 dev_set_name(&vpe_device, "vpe1");
372 vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
373 err = device_add(&vpe_device);
374 if (err) {
375 pr_err("Adding vpe_device failed\n");
376 goto out_class;
377 }
378
379 local_irq_save(flags);
380 mtflags = dmt();
381 vpflags = dvpe();
382
383 /* Put MVPE's into 'configuration state' */
384 set_c0_mvpcontrol(MVPCONTROL_VPC);
385
386 val = read_c0_mvpconf0();
387 hw_tcs = (val & MVPCONF0_PTC) + 1;
388 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
389
390 for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
391 /*
392 * Must re-enable multithreading temporarily or in case we
393 * reschedule send IPIs or similar we might hang.
394 */
395 clear_c0_mvpcontrol(MVPCONTROL_VPC);
396 evpe(vpflags);
397 emt(mtflags);
398 local_irq_restore(flags);
399 t = alloc_tc(tc);
400 if (!t) {
401 err = -ENOMEM;
402 goto out_dev;
403 }
404
405 local_irq_save(flags);
406 mtflags = dmt();
407 vpflags = dvpe();
408 set_c0_mvpcontrol(MVPCONTROL_VPC);
409
410 /* VPE's */
411 if (tc < hw_tcs) {
412 settc(tc);
413
414 v = alloc_vpe(tc);
415 if (v == NULL) {
416 pr_warn("VPE: unable to allocate VPE\n");
417 goto out_reenable;
418 }
419
420 v->ntcs = hw_tcs - aprp_cpu_index();
421
422 /* add the tc to the list of this vpe's tc's. */
423 list_add(&t->tc, &v->tc);
424
425 /* deactivate all but vpe0 */
426 if (tc >= aprp_cpu_index()) {
427 unsigned long tmp = read_vpe_c0_vpeconf0();
428
429 tmp &= ~VPECONF0_VPA;
430
431 /* master VPE */
432 tmp |= VPECONF0_MVP;
433 write_vpe_c0_vpeconf0(tmp);
434 }
435
436 /* disable multi-threading with TC's */
437 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
438 ~VPECONTROL_TE);
439
440 if (tc >= vpelimit) {
441 /*
442 * Set config to be the same as vpe0,
443 * particularly kseg0 coherency alg
444 */
445 write_vpe_c0_config(read_c0_config());
446 }
447 }
448
449 /* TC's */
450 t->pvpe = v; /* set the parent vpe */
451
452 if (tc >= aprp_cpu_index()) {
453 unsigned long tmp;
454
455 settc(tc);
456
457 /* Any TC that is bound to VPE0 gets left as is - in
458 * case we are running SMTC on VPE0. A TC that is bound
459 * to any other VPE gets bound to VPE0, ideally I'd like
460 * to make it homeless but it doesn't appear to let me
461 * bind a TC to a non-existent VPE. Which is perfectly
462 * reasonable.
463 *
464 * The (un)bound state is visible to an EJTAG probe so
465 * may notify GDB...
466 */
467 tmp = read_tc_c0_tcbind();
468 if (tmp & TCBIND_CURVPE) {
469 /* tc is bound >vpe0 */
470 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
471
472 t->pvpe = get_vpe(0); /* set the parent vpe */
473 }
474
475 /* halt the TC */
476 write_tc_c0_tchalt(TCHALT_H);
477 mips_ihb();
478
479 tmp = read_tc_c0_tcstatus();
480
481 /* mark not activated and not dynamically allocatable */
482 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
483 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
484 write_tc_c0_tcstatus(tmp);
485 }
486 }
487
488out_reenable:
489 /* release config state */
490 clear_c0_mvpcontrol(MVPCONTROL_VPC);
491
492 evpe(vpflags);
493 emt(mtflags);
494 local_irq_restore(flags);
495
496 return 0;
497
498out_dev:
499 device_del(&vpe_device);
500
501out_class:
502 class_unregister(&vpe_class);
503
504out_chrdev:
505 unregister_chrdev(major, VPE_MODULE_NAME);
506
507 return err;
508}
509
510void __exit vpe_module_exit(void)
511{
512 struct vpe *v, *n;
513
514 device_del(&vpe_device);
515 class_unregister(&vpe_class);
516 unregister_chrdev(major, VPE_MODULE_NAME);
517
518 /* No locking needed here */
519 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
520 if (v->state != VPE_STATE_UNUSED)
521 release_vpe(v);
522 }
523}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 59b2b3cd7885..11da314565cc 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1,37 +1,22 @@
1/* 1/*
2 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. 2 * This file is subject to the terms and conditions of the GNU General Public
3 * 3 * License. See the file "COPYING" in the main directory of this archive
4 * This program is free software; you can distribute it and/or modify it 4 * for more details.
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 * 5 *
13 * You should have received a copy of the GNU General Public License along 6 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
14 * with this program; if not, write to the Free Software Foundation, Inc., 7 * Copyright (C) 2013 Imagination Technologies Ltd.
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 */
17
18/*
19 * VPE support module
20 *
21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP environment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples.
26 * 8 *
27 * To load and run, simply cat a SP 'program file' to /dev/vpe1. 9 * VPE spport module for loading a MIPS SP program into VPE1. The SP
28 * i.e cat spapp >/dev/vpe1. 10 * environment is rather simple since there are no TLBs. It needs
11 * to be relocatable (or partiall linked). Initialize your stack in
12 * the startup-code. The loader looks for the symbol __start and sets
13 * up the execution to resume from there. To load and run, simply do
14 * a cat SP 'binary' to the /dev/vpe1 device.
29 */ 15 */
30#include <linux/kernel.h> 16#include <linux/kernel.h>
31#include <linux/device.h> 17#include <linux/device.h>
32#include <linux/fs.h> 18#include <linux/fs.h>
33#include <linux/init.h> 19#include <linux/init.h>
34#include <asm/uaccess.h>
35#include <linux/slab.h> 20#include <linux/slab.h>
36#include <linux/list.h> 21#include <linux/list.h>
37#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
@@ -46,13 +31,10 @@
46#include <asm/mipsmtregs.h> 31#include <asm/mipsmtregs.h>
47#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
48#include <linux/atomic.h> 33#include <linux/atomic.h>
49#include <asm/cpu.h>
50#include <asm/mips_mt.h> 34#include <asm/mips_mt.h>
51#include <asm/processor.h> 35#include <asm/processor.h>
52#include <asm/vpe.h> 36#include <asm/vpe.h>
53 37
54typedef void *vpe_handle;
55
56#ifndef ARCH_SHF_SMALL 38#ifndef ARCH_SHF_SMALL
57#define ARCH_SHF_SMALL 0 39#define ARCH_SHF_SMALL 0
58#endif 40#endif
@@ -60,96 +42,15 @@ typedef void *vpe_handle;
60/* If this is set, the section belongs in the init part of the module */ 42/* If this is set, the section belongs in the init part of the module */
61#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) 43#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
62 44
63/* 45struct vpe_control vpecontrol = {
64 * The number of TCs and VPEs physically available on the core
65 */
66static int hw_tcs, hw_vpes;
67static char module_name[] = "vpe";
68static int major;
69static const int minor = 1; /* fixed for now */
70
71/* grab the likely amount of memory we will need. */
72#ifdef CONFIG_MIPS_VPE_LOADER_TOM
73#define P_SIZE (2 * 1024 * 1024)
74#else
75/* add an overhead to the max kmalloc size for non-striped symbols/etc */
76#define P_SIZE (256 * 1024)
77#endif
78
79extern unsigned long physical_memsize;
80
81#define MAX_VPES 16
82#define VPE_PATH_MAX 256
83
84enum vpe_state {
85 VPE_STATE_UNUSED = 0,
86 VPE_STATE_INUSE,
87 VPE_STATE_RUNNING
88};
89
90enum tc_state {
91 TC_STATE_UNUSED = 0,
92 TC_STATE_INUSE,
93 TC_STATE_RUNNING,
94 TC_STATE_DYNAMIC
95};
96
97struct vpe {
98 enum vpe_state state;
99
100 /* (device) minor associated with this vpe */
101 int minor;
102
103 /* elfloader stuff */
104 void *load_addr;
105 unsigned long len;
106 char *pbuffer;
107 unsigned long plen;
108 unsigned int uid, gid;
109 char cwd[VPE_PATH_MAX];
110
111 unsigned long __start;
112
113 /* tc's associated with this vpe */
114 struct list_head tc;
115
116 /* The list of vpe's */
117 struct list_head list;
118
119 /* shared symbol address */
120 void *shared_ptr;
121
122 /* the list of who wants to know when something major happens */
123 struct list_head notify;
124
125 unsigned int ntcs;
126};
127
128struct tc {
129 enum tc_state state;
130 int index;
131
132 struct vpe *pvpe; /* parent VPE */
133 struct list_head tc; /* The list of TC's with this VPE */
134 struct list_head list; /* The global list of tc's */
135};
136
137struct {
138 spinlock_t vpe_list_lock;
139 struct list_head vpe_list; /* Virtual processing elements */
140 spinlock_t tc_list_lock;
141 struct list_head tc_list; /* Thread contexts */
142} vpecontrol = {
143 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), 46 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
144 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), 47 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
145 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), 48 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
146 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) 49 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
147}; 50};
148 51
149static void release_progmem(void *ptr);
150
151/* get the vpe associated with this minor */ 52/* get the vpe associated with this minor */
152static struct vpe *get_vpe(int minor) 53struct vpe *get_vpe(int minor)
153{ 54{
154 struct vpe *res, *v; 55 struct vpe *res, *v;
155 56
@@ -159,7 +60,7 @@ static struct vpe *get_vpe(int minor)
159 res = NULL; 60 res = NULL;
160 spin_lock(&vpecontrol.vpe_list_lock); 61 spin_lock(&vpecontrol.vpe_list_lock);
161 list_for_each_entry(v, &vpecontrol.vpe_list, list) { 62 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
162 if (v->minor == minor) { 63 if (v->minor == VPE_MODULE_MINOR) {
163 res = v; 64 res = v;
164 break; 65 break;
165 } 66 }
@@ -170,7 +71,7 @@ static struct vpe *get_vpe(int minor)
170} 71}
171 72
172/* get the vpe associated with this minor */ 73/* get the vpe associated with this minor */
173static struct tc *get_tc(int index) 74struct tc *get_tc(int index)
174{ 75{
175 struct tc *res, *t; 76 struct tc *res, *t;
176 77
@@ -188,12 +89,13 @@ static struct tc *get_tc(int index)
188} 89}
189 90
190/* allocate a vpe and associate it with this minor (or index) */ 91/* allocate a vpe and associate it with this minor (or index) */
191static struct vpe *alloc_vpe(int minor) 92struct vpe *alloc_vpe(int minor)
192{ 93{
193 struct vpe *v; 94 struct vpe *v;
194 95
195 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) 96 v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
196 return NULL; 97 if (v == NULL)
98 goto out;
197 99
198 INIT_LIST_HEAD(&v->tc); 100 INIT_LIST_HEAD(&v->tc);
199 spin_lock(&vpecontrol.vpe_list_lock); 101 spin_lock(&vpecontrol.vpe_list_lock);
@@ -201,17 +103,19 @@ static struct vpe *alloc_vpe(int minor)
201 spin_unlock(&vpecontrol.vpe_list_lock); 103 spin_unlock(&vpecontrol.vpe_list_lock);
202 104
203 INIT_LIST_HEAD(&v->notify); 105 INIT_LIST_HEAD(&v->notify);
204 v->minor = minor; 106 v->minor = VPE_MODULE_MINOR;
205 107
108out:
206 return v; 109 return v;
207} 110}
208 111
209/* allocate a tc. At startup only tc0 is running, all other can be halted. */ 112/* allocate a tc. At startup only tc0 is running, all other can be halted. */
210static struct tc *alloc_tc(int index) 113struct tc *alloc_tc(int index)
211{ 114{
212 struct tc *tc; 115 struct tc *tc;
213 116
214 if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) 117 tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
118 if (tc == NULL)
215 goto out; 119 goto out;
216 120
217 INIT_LIST_HEAD(&tc->tc); 121 INIT_LIST_HEAD(&tc->tc);
@@ -226,7 +130,7 @@ out:
226} 130}
227 131
228/* clean up and free everything */ 132/* clean up and free everything */
229static void release_vpe(struct vpe *v) 133void release_vpe(struct vpe *v)
230{ 134{
231 list_del(&v->list); 135 list_del(&v->list);
232 if (v->load_addr) 136 if (v->load_addr)
@@ -234,28 +138,8 @@ static void release_vpe(struct vpe *v)
234 kfree(v); 138 kfree(v);
235} 139}
236 140
237static void __maybe_unused dump_mtregs(void) 141/* Find some VPE program space */
238{ 142void *alloc_progmem(unsigned long len)
239 unsigned long val;
240
241 val = read_c0_config3();
242 printk("config3 0x%lx MT %ld\n", val,
243 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
244
245 val = read_c0_mvpcontrol();
246 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
247 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
248 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
249 (val & MVPCONTROL_EVP));
250
251 val = read_c0_mvpconf0();
252 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
253 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
254 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
255}
256
257/* Find some VPE program space */
258static void *alloc_progmem(unsigned long len)
259{ 143{
260 void *addr; 144 void *addr;
261 145
@@ -274,7 +158,7 @@ static void *alloc_progmem(unsigned long len)
274 return addr; 158 return addr;
275} 159}
276 160
277static void release_progmem(void *ptr) 161void release_progmem(void *ptr)
278{ 162{
279#ifndef CONFIG_MIPS_VPE_LOADER_TOM 163#ifndef CONFIG_MIPS_VPE_LOADER_TOM
280 kfree(ptr); 164 kfree(ptr);
@@ -282,7 +166,7 @@ static void release_progmem(void *ptr)
282} 166}
283 167
284/* Update size with this section: return offset. */ 168/* Update size with this section: return offset. */
285static long get_offset(unsigned long *size, Elf_Shdr * sechdr) 169static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
286{ 170{
287 long ret; 171 long ret;
288 172
@@ -295,8 +179,8 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
295 might -- code, read-only data, read-write data, small data. Tally 179 might -- code, read-only data, read-write data, small data. Tally
296 sizes, and place the offsets into sh_entsize fields: high bit means it 180 sizes, and place the offsets into sh_entsize fields: high bit means it
297 belongs in init. */ 181 belongs in init. */
298static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, 182static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
299 Elf_Shdr * sechdrs, const char *secstrings) 183 Elf_Shdr *sechdrs, const char *secstrings)
300{ 184{
301 static unsigned long const masks[][2] = { 185 static unsigned long const masks[][2] = {
302 /* NOTE: all executable code must be the first section 186 /* NOTE: all executable code must be the first section
@@ -316,7 +200,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
316 for (i = 0; i < hdr->e_shnum; ++i) { 200 for (i = 0; i < hdr->e_shnum; ++i) {
317 Elf_Shdr *s = &sechdrs[i]; 201 Elf_Shdr *s = &sechdrs[i];
318 202
319 // || strncmp(secstrings + s->sh_name, ".init", 5) == 0)
320 if ((s->sh_flags & masks[m][0]) != masks[m][0] 203 if ((s->sh_flags & masks[m][0]) != masks[m][0]
321 || (s->sh_flags & masks[m][1]) 204 || (s->sh_flags & masks[m][1])
322 || s->sh_entsize != ~0UL) 205 || s->sh_entsize != ~0UL)
@@ -331,7 +214,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
331 } 214 }
332} 215}
333 216
334
335/* from module-elf32.c, but subverted a little */ 217/* from module-elf32.c, but subverted a little */
336 218
337struct mips_hi16 { 219struct mips_hi16 {
@@ -354,20 +236,18 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
354{ 236{
355 int rel; 237 int rel;
356 238
357 if( !(*location & 0xffff) ) { 239 if (!(*location & 0xffff)) {
358 rel = (int)v - gp_addr; 240 rel = (int)v - gp_addr;
359 } 241 } else {
360 else {
361 /* .sbss + gp(relative) + offset */ 242 /* .sbss + gp(relative) + offset */
362 /* kludge! */ 243 /* kludge! */
363 rel = (int)(short)((int)v + gp_offs + 244 rel = (int)(short)((int)v + gp_offs +
364 (int)(short)(*location & 0xffff) - gp_addr); 245 (int)(short)(*location & 0xffff) - gp_addr);
365 } 246 }
366 247
367 if( (rel > 32768) || (rel < -32768) ) { 248 if ((rel > 32768) || (rel < -32768)) {
368 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: " 249 pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
369 "relative address 0x%x out of range of gp register\n", 250 rel);
370 rel);
371 return -ENOEXEC; 251 return -ENOEXEC;
372 } 252 }
373 253
@@ -381,12 +261,12 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
381{ 261{
382 int rel; 262 int rel;
383 rel = (((unsigned int)v - (unsigned int)location)); 263 rel = (((unsigned int)v - (unsigned int)location));
384 rel >>= 2; // because the offset is in _instructions_ not bytes. 264 rel >>= 2; /* because the offset is in _instructions_ not bytes. */
385 rel -= 1; // and one instruction less due to the branch delay slot. 265 rel -= 1; /* and one instruction less due to the branch delay slot. */
386 266
387 if( (rel > 32768) || (rel < -32768) ) { 267 if ((rel > 32768) || (rel < -32768)) {
388 printk(KERN_DEBUG "VPE loader: " 268 pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
389 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); 269 rel);
390 return -ENOEXEC; 270 return -ENOEXEC;
391 } 271 }
392 272
@@ -407,8 +287,7 @@ static int apply_r_mips_26(struct module *me, uint32_t *location,
407 Elf32_Addr v) 287 Elf32_Addr v)
408{ 288{
409 if (v % 4) { 289 if (v % 4) {
410 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 " 290 pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
411 " unaligned relocation\n");
412 return -ENOEXEC; 291 return -ENOEXEC;
413 } 292 }
414 293
@@ -439,7 +318,7 @@ static int apply_r_mips_hi16(struct module *me, uint32_t *location,
439 * the carry we need to add. Save the information, and let LO16 do the 318 * the carry we need to add. Save the information, and let LO16 do the
440 * actual relocation. 319 * actual relocation.
441 */ 320 */
442 n = kmalloc(sizeof *n, GFP_KERNEL); 321 n = kmalloc(sizeof(*n), GFP_KERNEL);
443 if (!n) 322 if (!n)
444 return -ENOMEM; 323 return -ENOMEM;
445 324
@@ -471,9 +350,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
471 * The value for the HI16 had best be the same. 350 * The value for the HI16 had best be the same.
472 */ 351 */
473 if (v != l->value) { 352 if (v != l->value) {
474 printk(KERN_DEBUG "VPE loader: " 353 pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
475 "apply_r_mips_lo16/hi16: \t"
476 "inconsistent value information\n");
477 goto out_free; 354 goto out_free;
478 } 355 }
479 356
@@ -569,20 +446,19 @@ static int apply_relocations(Elf32_Shdr *sechdrs,
569 + ELF32_R_SYM(r_info); 446 + ELF32_R_SYM(r_info);
570 447
571 if (!sym->st_value) { 448 if (!sym->st_value) {
572 printk(KERN_DEBUG "%s: undefined weak symbol %s\n", 449 pr_debug("%s: undefined weak symbol %s\n",
573 me->name, strtab + sym->st_name); 450 me->name, strtab + sym->st_name);
574 /* just print the warning, dont barf */ 451 /* just print the warning, dont barf */
575 } 452 }
576 453
577 v = sym->st_value; 454 v = sym->st_value;
578 455
579 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); 456 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
580 if( res ) { 457 if (res) {
581 char *r = rstrs[ELF32_R_TYPE(r_info)]; 458 char *r = rstrs[ELF32_R_TYPE(r_info)];
582 printk(KERN_WARNING "VPE loader: .text+0x%x " 459 pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
583 "relocation type %s for symbol \"%s\" failed\n", 460 rel[i].r_offset, r ? r : "UNKNOWN",
584 rel[i].r_offset, r ? r : "UNKNOWN", 461 strtab + sym->st_name);
585 strtab + sym->st_name);
586 return res; 462 return res;
587 } 463 }
588 } 464 }
@@ -597,10 +473,8 @@ static inline void save_gp_address(unsigned int secbase, unsigned int rel)
597} 473}
598/* end module-elf32.c */ 474/* end module-elf32.c */
599 475
600
601
602/* Change all symbols so that sh_value encodes the pointer directly. */ 476/* Change all symbols so that sh_value encodes the pointer directly. */
603static void simplify_symbols(Elf_Shdr * sechdrs, 477static void simplify_symbols(Elf_Shdr *sechdrs,
604 unsigned int symindex, 478 unsigned int symindex,
605 const char *strtab, 479 const char *strtab,
606 const char *secstrings, 480 const char *secstrings,
@@ -641,18 +515,16 @@ static void simplify_symbols(Elf_Shdr * sechdrs,
641 break; 515 break;
642 516
643 case SHN_MIPS_SCOMMON: 517 case SHN_MIPS_SCOMMON:
644 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON " 518 pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
645 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name, 519 strtab + sym[i].st_name, sym[i].st_shndx);
646 sym[i].st_shndx); 520 /* .sbss section */
647 // .sbss section
648 break; 521 break;
649 522
650 default: 523 default:
651 secbase = sechdrs[sym[i].st_shndx].sh_addr; 524 secbase = sechdrs[sym[i].st_shndx].sh_addr;
652 525
653 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { 526 if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
654 save_gp_address(secbase, sym[i].st_value); 527 save_gp_address(secbase, sym[i].st_value);
655 }
656 528
657 sym[i].st_value += secbase; 529 sym[i].st_value += secbase;
658 break; 530 break;
@@ -661,142 +533,21 @@ static void simplify_symbols(Elf_Shdr * sechdrs,
661} 533}
662 534
663#ifdef DEBUG_ELFLOADER 535#ifdef DEBUG_ELFLOADER
664static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, 536static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
665 const char *strtab, struct module *mod) 537 const char *strtab, struct module *mod)
666{ 538{
667 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; 539 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
668 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); 540 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
669 541
670 printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); 542 pr_debug("dump_elfsymbols: n %d\n", n);
671 for (i = 1; i < n; i++) { 543 for (i = 1; i < n; i++) {
672 printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, 544 pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
673 strtab + sym[i].st_name, sym[i].st_value); 545 sym[i].st_value);
674 } 546 }
675} 547}
676#endif 548#endif
677 549
678/* We are prepared so configure and start the VPE... */ 550static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
679static int vpe_run(struct vpe * v)
680{
681 unsigned long flags, val, dmt_flag;
682 struct vpe_notifications *n;
683 unsigned int vpeflags;
684 struct tc *t;
685
686 /* check we are the Master VPE */
687 local_irq_save(flags);
688 val = read_c0_vpeconf0();
689 if (!(val & VPECONF0_MVP)) {
690 printk(KERN_WARNING
691 "VPE loader: only Master VPE's are allowed to configure MT\n");
692 local_irq_restore(flags);
693
694 return -1;
695 }
696
697 dmt_flag = dmt();
698 vpeflags = dvpe();
699
700 if (list_empty(&v->tc)) {
701 evpe(vpeflags);
702 emt(dmt_flag);
703 local_irq_restore(flags);
704
705 printk(KERN_WARNING
706 "VPE loader: No TC's associated with VPE %d\n",
707 v->minor);
708
709 return -ENOEXEC;
710 }
711
712 t = list_first_entry(&v->tc, struct tc, tc);
713
714 /* Put MVPE's into 'configuration state' */
715 set_c0_mvpcontrol(MVPCONTROL_VPC);
716
717 settc(t->index);
718
719 /* should check it is halted, and not activated */
720 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
721 evpe(vpeflags);
722 emt(dmt_flag);
723 local_irq_restore(flags);
724
725 printk(KERN_WARNING "VPE loader: TC %d is already active!\n",
726 t->index);
727
728 return -ENOEXEC;
729 }
730
731 /* Write the address we want it to start running from in the TCPC register. */
732 write_tc_c0_tcrestart((unsigned long)v->__start);
733 write_tc_c0_tccontext((unsigned long)0);
734
735 /*
736 * Mark the TC as activated, not interrupt exempt and not dynamically
737 * allocatable
738 */
739 val = read_tc_c0_tcstatus();
740 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
741 write_tc_c0_tcstatus(val);
742
743 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
744
745 /*
746 * The sde-kit passes 'memsize' to __start in $a3, so set something
747 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
748 * DFLT_HEAP_SIZE when you compile your program
749 */
750 mttgpr(6, v->ntcs);
751 mttgpr(7, physical_memsize);
752
753 /* set up VPE1 */
754 /*
755 * bind the TC to VPE 1 as late as possible so we only have the final
756 * VPE registers to set up, and so an EJTAG probe can trigger on it
757 */
758 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
759
760 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
761
762 back_to_back_c0_hazard();
763
764 /* Set up the XTC bit in vpeconf0 to point at our tc */
765 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
766 | (t->index << VPECONF0_XTC_SHIFT));
767
768 back_to_back_c0_hazard();
769
770 /* enable this VPE */
771 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
772
773 /* clear out any left overs from a previous program */
774 write_vpe_c0_status(0);
775 write_vpe_c0_cause(0);
776
777 /* take system out of configuration state */
778 clear_c0_mvpcontrol(MVPCONTROL_VPC);
779
780 /*
781 * SMTC/SMVP kernels manage VPE enable independently,
782 * but uniprocessor kernels need to turn it on, even
783 * if that wasn't the pre-dvpe() state.
784 */
785#ifdef CONFIG_SMP
786 evpe(vpeflags);
787#else
788 evpe(EVPE_ENABLE);
789#endif
790 emt(dmt_flag);
791 local_irq_restore(flags);
792
793 list_for_each_entry(n, &v->notify, list)
794 n->start(minor);
795
796 return 0;
797}
798
799static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
800 unsigned int symindex, const char *strtab, 551 unsigned int symindex, const char *strtab,
801 struct module *mod) 552 struct module *mod)
802{ 553{
@@ -804,16 +555,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
804 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); 555 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
805 556
806 for (i = 1; i < n; i++) { 557 for (i = 1; i < n; i++) {
807 if (strcmp(strtab + sym[i].st_name, "__start") == 0) { 558 if (strcmp(strtab + sym[i].st_name, "__start") == 0)
808 v->__start = sym[i].st_value; 559 v->__start = sym[i].st_value;
809 }
810 560
811 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { 561 if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
812 v->shared_ptr = (void *)sym[i].st_value; 562 v->shared_ptr = (void *)sym[i].st_value;
813 }
814 } 563 }
815 564
816 if ( (v->__start == 0) || (v->shared_ptr == NULL)) 565 if ((v->__start == 0) || (v->shared_ptr == NULL))
817 return -1; 566 return -1;
818 567
819 return 0; 568 return 0;
@@ -824,14 +573,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
824 * contents of the program (p)buffer performing relocatations/etc, free's it 573 * contents of the program (p)buffer performing relocatations/etc, free's it
825 * when finished. 574 * when finished.
826 */ 575 */
827static int vpe_elfload(struct vpe * v) 576static int vpe_elfload(struct vpe *v)
828{ 577{
829 Elf_Ehdr *hdr; 578 Elf_Ehdr *hdr;
830 Elf_Shdr *sechdrs; 579 Elf_Shdr *sechdrs;
831 long err = 0; 580 long err = 0;
832 char *secstrings, *strtab = NULL; 581 char *secstrings, *strtab = NULL;
833 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; 582 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
834 struct module mod; // so we can re-use the relocations code 583 struct module mod; /* so we can re-use the relocations code */
835 584
836 memset(&mod, 0, sizeof(struct module)); 585 memset(&mod, 0, sizeof(struct module));
837 strcpy(mod.name, "VPE loader"); 586 strcpy(mod.name, "VPE loader");
@@ -845,8 +594,7 @@ static int vpe_elfload(struct vpe * v)
845 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) 594 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
846 || !elf_check_arch(hdr) 595 || !elf_check_arch(hdr)
847 || hdr->e_shentsize != sizeof(*sechdrs)) { 596 || hdr->e_shentsize != sizeof(*sechdrs)) {
848 printk(KERN_WARNING 597 pr_warn("VPE loader: program wrong arch or weird elf version\n");
849 "VPE loader: program wrong arch or weird elf version\n");
850 598
851 return -ENOEXEC; 599 return -ENOEXEC;
852 } 600 }
@@ -855,8 +603,7 @@ static int vpe_elfload(struct vpe * v)
855 relocate = 1; 603 relocate = 1;
856 604
857 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { 605 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
858 printk(KERN_ERR "VPE loader: program length %u truncated\n", 606 pr_err("VPE loader: program length %u truncated\n", len);
859 len);
860 607
861 return -ENOEXEC; 608 return -ENOEXEC;
862 } 609 }
@@ -871,22 +618,24 @@ static int vpe_elfload(struct vpe * v)
871 618
872 if (relocate) { 619 if (relocate) {
873 for (i = 1; i < hdr->e_shnum; i++) { 620 for (i = 1; i < hdr->e_shnum; i++) {
874 if (sechdrs[i].sh_type != SHT_NOBITS 621 if ((sechdrs[i].sh_type != SHT_NOBITS) &&
875 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { 622 (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
876 printk(KERN_ERR "VPE program length %u truncated\n", 623 pr_err("VPE program length %u truncated\n",
877 len); 624 len);
878 return -ENOEXEC; 625 return -ENOEXEC;
879 } 626 }
880 627
881 /* Mark all sections sh_addr with their address in the 628 /* Mark all sections sh_addr with their address in the
882 temporary image. */ 629 temporary image. */
883 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; 630 sechdrs[i].sh_addr = (size_t) hdr +
631 sechdrs[i].sh_offset;
884 632
885 /* Internal symbols and strings. */ 633 /* Internal symbols and strings. */
886 if (sechdrs[i].sh_type == SHT_SYMTAB) { 634 if (sechdrs[i].sh_type == SHT_SYMTAB) {
887 symindex = i; 635 symindex = i;
888 strindex = sechdrs[i].sh_link; 636 strindex = sechdrs[i].sh_link;
889 strtab = (char *)hdr + sechdrs[strindex].sh_offset; 637 strtab = (char *)hdr +
638 sechdrs[strindex].sh_offset;
890 } 639 }
891 } 640 }
892 layout_sections(&mod, hdr, sechdrs, secstrings); 641 layout_sections(&mod, hdr, sechdrs, secstrings);
@@ -913,8 +662,9 @@ static int vpe_elfload(struct vpe * v)
913 /* Update sh_addr to point to copy in image. */ 662 /* Update sh_addr to point to copy in image. */
914 sechdrs[i].sh_addr = (unsigned long)dest; 663 sechdrs[i].sh_addr = (unsigned long)dest;
915 664
916 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n", 665 pr_debug(" section sh_name %s sh_addr 0x%x\n",
917 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr); 666 secstrings + sechdrs[i].sh_name,
667 sechdrs[i].sh_addr);
918 } 668 }
919 669
920 /* Fix up syms, so that st_value is a pointer to location. */ 670 /* Fix up syms, so that st_value is a pointer to location. */
@@ -935,17 +685,18 @@ static int vpe_elfload(struct vpe * v)
935 continue; 685 continue;
936 686
937 if (sechdrs[i].sh_type == SHT_REL) 687 if (sechdrs[i].sh_type == SHT_REL)
938 err = apply_relocations(sechdrs, strtab, symindex, i, 688 err = apply_relocations(sechdrs, strtab,
939 &mod); 689 symindex, i, &mod);
940 else if (sechdrs[i].sh_type == SHT_RELA) 690 else if (sechdrs[i].sh_type == SHT_RELA)
941 err = apply_relocate_add(sechdrs, strtab, symindex, i, 691 err = apply_relocate_add(sechdrs, strtab,
942 &mod); 692 symindex, i, &mod);
943 if (err < 0) 693 if (err < 0)
944 return err; 694 return err;
945 695
946 } 696 }
947 } else { 697 } else {
948 struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); 698 struct elf_phdr *phdr = (struct elf_phdr *)
699 ((char *)hdr + hdr->e_phoff);
949 700
950 for (i = 0; i < hdr->e_phnum; i++) { 701 for (i = 0; i < hdr->e_phnum; i++) {
951 if (phdr->p_type == PT_LOAD) { 702 if (phdr->p_type == PT_LOAD) {
@@ -963,11 +714,15 @@ static int vpe_elfload(struct vpe * v)
963 if (sechdrs[i].sh_type == SHT_SYMTAB) { 714 if (sechdrs[i].sh_type == SHT_SYMTAB) {
964 symindex = i; 715 symindex = i;
965 strindex = sechdrs[i].sh_link; 716 strindex = sechdrs[i].sh_link;
966 strtab = (char *)hdr + sechdrs[strindex].sh_offset; 717 strtab = (char *)hdr +
718 sechdrs[strindex].sh_offset;
967 719
968 /* mark the symtab's address for when we try to find the 720 /*
969 magic symbols */ 721 * mark symtab's address for when we try
970 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; 722 * to find the magic symbols
723 */
724 sechdrs[i].sh_addr = (size_t) hdr +
725 sechdrs[i].sh_offset;
971 } 726 }
972 } 727 }
973 } 728 }
@@ -978,53 +733,19 @@ static int vpe_elfload(struct vpe * v)
978 733
979 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { 734 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
980 if (v->__start == 0) { 735 if (v->__start == 0) {
981 printk(KERN_WARNING "VPE loader: program does not contain " 736 pr_warn("VPE loader: program does not contain a __start symbol\n");
982 "a __start symbol\n");
983 return -ENOEXEC; 737 return -ENOEXEC;
984 } 738 }
985 739
986 if (v->shared_ptr == NULL) 740 if (v->shared_ptr == NULL)
987 printk(KERN_WARNING "VPE loader: " 741 pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
988 "program does not contain vpe_shared symbol.\n" 742 " Unable to use AMVP (AP/SP) facilities.\n");
989 " Unable to use AMVP (AP/SP) facilities.\n");
990 } 743 }
991 744
992 printk(" elf loaded\n"); 745 pr_info(" elf loaded\n");
993 return 0; 746 return 0;
994} 747}
995 748
996static void cleanup_tc(struct tc *tc)
997{
998 unsigned long flags;
999 unsigned int mtflags, vpflags;
1000 int tmp;
1001
1002 local_irq_save(flags);
1003 mtflags = dmt();
1004 vpflags = dvpe();
1005 /* Put MVPE's into 'configuration state' */
1006 set_c0_mvpcontrol(MVPCONTROL_VPC);
1007
1008 settc(tc->index);
1009 tmp = read_tc_c0_tcstatus();
1010
1011 /* mark not allocated and not dynamically allocatable */
1012 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1013 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1014 write_tc_c0_tcstatus(tmp);
1015
1016 write_tc_c0_tchalt(TCHALT_H);
1017 mips_ihb();
1018
1019 /* bind it to anything other than VPE1 */
1020// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
1021
1022 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1023 evpe(vpflags);
1024 emt(mtflags);
1025 local_irq_restore(flags);
1026}
1027
1028static int getcwd(char *buff, int size) 749static int getcwd(char *buff, int size)
1029{ 750{
1030 mm_segment_t old_fs; 751 mm_segment_t old_fs;
@@ -1044,52 +765,49 @@ static int getcwd(char *buff, int size)
1044static int vpe_open(struct inode *inode, struct file *filp) 765static int vpe_open(struct inode *inode, struct file *filp)
1045{ 766{
1046 enum vpe_state state; 767 enum vpe_state state;
1047 struct vpe_notifications *not; 768 struct vpe_notifications *notifier;
1048 struct vpe *v; 769 struct vpe *v;
1049 int ret; 770 int ret;
1050 771
1051 if (minor != iminor(inode)) { 772 if (VPE_MODULE_MINOR != iminor(inode)) {
1052 /* assume only 1 device at the moment. */ 773 /* assume only 1 device at the moment. */
1053 pr_warning("VPE loader: only vpe1 is supported\n"); 774 pr_warn("VPE loader: only vpe1 is supported\n");
1054 775
1055 return -ENODEV; 776 return -ENODEV;
1056 } 777 }
1057 778
1058 if ((v = get_vpe(tclimit)) == NULL) { 779 v = get_vpe(aprp_cpu_index());
1059 pr_warning("VPE loader: unable to get vpe\n"); 780 if (v == NULL) {
781 pr_warn("VPE loader: unable to get vpe\n");
1060 782
1061 return -ENODEV; 783 return -ENODEV;
1062 } 784 }
1063 785
1064 state = xchg(&v->state, VPE_STATE_INUSE); 786 state = xchg(&v->state, VPE_STATE_INUSE);
1065 if (state != VPE_STATE_UNUSED) { 787 if (state != VPE_STATE_UNUSED) {
1066 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); 788 pr_debug("VPE loader: tc in use dumping regs\n");
1067 789
1068 list_for_each_entry(not, &v->notify, list) { 790 list_for_each_entry(notifier, &v->notify, list)
1069 not->stop(tclimit); 791 notifier->stop(aprp_cpu_index());
1070 }
1071 792
1072 release_progmem(v->load_addr); 793 release_progmem(v->load_addr);
1073 cleanup_tc(get_tc(tclimit)); 794 cleanup_tc(get_tc(aprp_cpu_index()));
1074 } 795 }
1075 796
1076 /* this of-course trashes what was there before... */ 797 /* this of-course trashes what was there before... */
1077 v->pbuffer = vmalloc(P_SIZE); 798 v->pbuffer = vmalloc(P_SIZE);
1078 if (!v->pbuffer) { 799 if (!v->pbuffer) {
1079 pr_warning("VPE loader: unable to allocate memory\n"); 800 pr_warn("VPE loader: unable to allocate memory\n");
1080 return -ENOMEM; 801 return -ENOMEM;
1081 } 802 }
1082 v->plen = P_SIZE; 803 v->plen = P_SIZE;
1083 v->load_addr = NULL; 804 v->load_addr = NULL;
1084 v->len = 0; 805 v->len = 0;
1085 806
1086 v->uid = filp->f_cred->fsuid;
1087 v->gid = filp->f_cred->fsgid;
1088
1089 v->cwd[0] = 0; 807 v->cwd[0] = 0;
1090 ret = getcwd(v->cwd, VPE_PATH_MAX); 808 ret = getcwd(v->cwd, VPE_PATH_MAX);
1091 if (ret < 0) 809 if (ret < 0)
1092 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret); 810 pr_warn("VPE loader: open, getcwd returned %d\n", ret);
1093 811
1094 v->shared_ptr = NULL; 812 v->shared_ptr = NULL;
1095 v->__start = 0; 813 v->__start = 0;
@@ -1103,20 +821,20 @@ static int vpe_release(struct inode *inode, struct file *filp)
1103 Elf_Ehdr *hdr; 821 Elf_Ehdr *hdr;
1104 int ret = 0; 822 int ret = 0;
1105 823
1106 v = get_vpe(tclimit); 824 v = get_vpe(aprp_cpu_index());
1107 if (v == NULL) 825 if (v == NULL)
1108 return -ENODEV; 826 return -ENODEV;
1109 827
1110 hdr = (Elf_Ehdr *) v->pbuffer; 828 hdr = (Elf_Ehdr *) v->pbuffer;
1111 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { 829 if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
1112 if (vpe_elfload(v) >= 0) { 830 if ((vpe_elfload(v) >= 0) && vpe_run) {
1113 vpe_run(v); 831 vpe_run(v);
1114 } else { 832 } else {
1115 printk(KERN_WARNING "VPE loader: ELF load failed.\n"); 833 pr_warn("VPE loader: ELF load failed.\n");
1116 ret = -ENOEXEC; 834 ret = -ENOEXEC;
1117 } 835 }
1118 } else { 836 } else {
1119 printk(KERN_WARNING "VPE loader: only elf files are supported\n"); 837 pr_warn("VPE loader: only elf files are supported\n");
1120 ret = -ENOEXEC; 838 ret = -ENOEXEC;
1121 } 839 }
1122 840
@@ -1134,22 +852,22 @@ static int vpe_release(struct inode *inode, struct file *filp)
1134 return ret; 852 return ret;
1135} 853}
1136 854
1137static ssize_t vpe_write(struct file *file, const char __user * buffer, 855static ssize_t vpe_write(struct file *file, const char __user *buffer,
1138 size_t count, loff_t * ppos) 856 size_t count, loff_t *ppos)
1139{ 857{
1140 size_t ret = count; 858 size_t ret = count;
1141 struct vpe *v; 859 struct vpe *v;
1142 860
1143 if (iminor(file_inode(file)) != minor) 861 if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
1144 return -ENODEV; 862 return -ENODEV;
1145 863
1146 v = get_vpe(tclimit); 864 v = get_vpe(aprp_cpu_index());
865
1147 if (v == NULL) 866 if (v == NULL)
1148 return -ENODEV; 867 return -ENODEV;
1149 868
1150 if ((count + v->len) > v->plen) { 869 if ((count + v->len) > v->plen) {
1151 printk(KERN_WARNING 870 pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
1152 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
1153 return -ENOMEM; 871 return -ENOMEM;
1154 } 872 }
1155 873
@@ -1161,7 +879,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
1161 return ret; 879 return ret;
1162} 880}
1163 881
1164static const struct file_operations vpe_fops = { 882const struct file_operations vpe_fops = {
1165 .owner = THIS_MODULE, 883 .owner = THIS_MODULE,
1166 .open = vpe_open, 884 .open = vpe_open,
1167 .release = vpe_release, 885 .release = vpe_release,
@@ -1169,420 +887,40 @@ static const struct file_operations vpe_fops = {
1169 .llseek = noop_llseek, 887 .llseek = noop_llseek,
1170}; 888};
1171 889
1172/* module wrapper entry points */
1173/* give me a vpe */
1174vpe_handle vpe_alloc(void)
1175{
1176 int i;
1177 struct vpe *v;
1178
1179 /* find a vpe */
1180 for (i = 1; i < MAX_VPES; i++) {
1181 if ((v = get_vpe(i)) != NULL) {
1182 v->state = VPE_STATE_INUSE;
1183 return v;
1184 }
1185 }
1186 return NULL;
1187}
1188
1189EXPORT_SYMBOL(vpe_alloc);
1190
1191/* start running from here */
1192int vpe_start(vpe_handle vpe, unsigned long start)
1193{
1194 struct vpe *v = vpe;
1195
1196 v->__start = start;
1197 return vpe_run(v);
1198}
1199
1200EXPORT_SYMBOL(vpe_start);
1201
1202/* halt it for now */
1203int vpe_stop(vpe_handle vpe)
1204{
1205 struct vpe *v = vpe;
1206 struct tc *t;
1207 unsigned int evpe_flags;
1208
1209 evpe_flags = dvpe();
1210
1211 if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) {
1212
1213 settc(t->index);
1214 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1215 }
1216
1217 evpe(evpe_flags);
1218
1219 return 0;
1220}
1221
1222EXPORT_SYMBOL(vpe_stop);
1223
1224/* I've done with it thank you */
1225int vpe_free(vpe_handle vpe)
1226{
1227 struct vpe *v = vpe;
1228 struct tc *t;
1229 unsigned int evpe_flags;
1230
1231 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
1232 return -ENOEXEC;
1233 }
1234
1235 evpe_flags = dvpe();
1236
1237 /* Put MVPE's into 'configuration state' */
1238 set_c0_mvpcontrol(MVPCONTROL_VPC);
1239
1240 settc(t->index);
1241 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
1242
1243 /* halt the TC */
1244 write_tc_c0_tchalt(TCHALT_H);
1245 mips_ihb();
1246
1247 /* mark the TC unallocated */
1248 write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
1249
1250 v->state = VPE_STATE_UNUSED;
1251
1252 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1253 evpe(evpe_flags);
1254
1255 return 0;
1256}
1257
1258EXPORT_SYMBOL(vpe_free);
1259
1260void *vpe_get_shared(int index) 890void *vpe_get_shared(int index)
1261{ 891{
1262 struct vpe *v; 892 struct vpe *v = get_vpe(index);
1263 893
1264 if ((v = get_vpe(index)) == NULL) 894 if (v == NULL)
1265 return NULL; 895 return NULL;
1266 896
1267 return v->shared_ptr; 897 return v->shared_ptr;
1268} 898}
1269
1270EXPORT_SYMBOL(vpe_get_shared); 899EXPORT_SYMBOL(vpe_get_shared);
1271 900
1272int vpe_getuid(int index)
1273{
1274 struct vpe *v;
1275
1276 if ((v = get_vpe(index)) == NULL)
1277 return -1;
1278
1279 return v->uid;
1280}
1281
1282EXPORT_SYMBOL(vpe_getuid);
1283
1284int vpe_getgid(int index)
1285{
1286 struct vpe *v;
1287
1288 if ((v = get_vpe(index)) == NULL)
1289 return -1;
1290
1291 return v->gid;
1292}
1293
1294EXPORT_SYMBOL(vpe_getgid);
1295
1296int vpe_notify(int index, struct vpe_notifications *notify) 901int vpe_notify(int index, struct vpe_notifications *notify)
1297{ 902{
1298 struct vpe *v; 903 struct vpe *v = get_vpe(index);
1299 904
1300 if ((v = get_vpe(index)) == NULL) 905 if (v == NULL)
1301 return -1; 906 return -1;
1302 907
1303 list_add(&notify->list, &v->notify); 908 list_add(&notify->list, &v->notify);
1304 return 0; 909 return 0;
1305} 910}
1306
1307EXPORT_SYMBOL(vpe_notify); 911EXPORT_SYMBOL(vpe_notify);
1308 912
1309char *vpe_getcwd(int index) 913char *vpe_getcwd(int index)
1310{ 914{
1311 struct vpe *v; 915 struct vpe *v = get_vpe(index);
1312 916
1313 if ((v = get_vpe(index)) == NULL) 917 if (v == NULL)
1314 return NULL; 918 return NULL;
1315 919
1316 return v->cwd; 920 return v->cwd;
1317} 921}
1318
1319EXPORT_SYMBOL(vpe_getcwd); 922EXPORT_SYMBOL(vpe_getcwd);
1320 923
1321static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1322 const char *buf, size_t len)
1323{
1324 struct vpe *vpe = get_vpe(tclimit);
1325 struct vpe_notifications *not;
1326
1327 list_for_each_entry(not, &vpe->notify, list) {
1328 not->stop(tclimit);
1329 }
1330
1331 release_progmem(vpe->load_addr);
1332 cleanup_tc(get_tc(tclimit));
1333 vpe_stop(vpe);
1334 vpe_free(vpe);
1335
1336 return len;
1337}
1338static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
1339
1340static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
1341 char *buf)
1342{
1343 struct vpe *vpe = get_vpe(tclimit);
1344
1345 return sprintf(buf, "%d\n", vpe->ntcs);
1346}
1347
1348static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
1349 const char *buf, size_t len)
1350{
1351 struct vpe *vpe = get_vpe(tclimit);
1352 unsigned long new;
1353 char *endp;
1354
1355 new = simple_strtoul(buf, &endp, 0);
1356 if (endp == buf)
1357 goto out_einval;
1358
1359 if (new == 0 || new > (hw_tcs - tclimit))
1360 goto out_einval;
1361
1362 vpe->ntcs = new;
1363
1364 return len;
1365
1366out_einval:
1367 return -EINVAL;
1368}
1369static DEVICE_ATTR_RW(ntcs);
1370
1371static struct attribute *vpe_attrs[] = {
1372 &dev_attr_kill.attr,
1373 &dev_attr_ntcs.attr,
1374 NULL,
1375};
1376ATTRIBUTE_GROUPS(vpe);
1377
1378static void vpe_device_release(struct device *cd)
1379{
1380 kfree(cd);
1381}
1382
1383struct class vpe_class = {
1384 .name = "vpe",
1385 .owner = THIS_MODULE,
1386 .dev_release = vpe_device_release,
1387 .dev_groups = vpe_groups,
1388};
1389
1390struct device vpe_device;
1391
1392static int __init vpe_module_init(void)
1393{
1394 unsigned int mtflags, vpflags;
1395 unsigned long flags, val;
1396 struct vpe *v = NULL;
1397 struct tc *t;
1398 int tc, err;
1399
1400 if (!cpu_has_mipsmt) {
1401 printk("VPE loader: not a MIPS MT capable processor\n");
1402 return -ENODEV;
1403 }
1404
1405 if (vpelimit == 0) {
1406 printk(KERN_WARNING "No VPEs reserved for AP/SP, not "
1407 "initializing VPE loader.\nPass maxvpes=<n> argument as "
1408 "kernel argument\n");
1409
1410 return -ENODEV;
1411 }
1412
1413 if (tclimit == 0) {
1414 printk(KERN_WARNING "No TCs reserved for AP/SP, not "
1415 "initializing VPE loader.\nPass maxtcs=<n> argument as "
1416 "kernel argument\n");
1417
1418 return -ENODEV;
1419 }
1420
1421 major = register_chrdev(0, module_name, &vpe_fops);
1422 if (major < 0) {
1423 printk("VPE loader: unable to register character device\n");
1424 return major;
1425 }
1426
1427 err = class_register(&vpe_class);
1428 if (err) {
1429 printk(KERN_ERR "vpe_class registration failed\n");
1430 goto out_chrdev;
1431 }
1432
1433 device_initialize(&vpe_device);
1434 vpe_device.class = &vpe_class,
1435 vpe_device.parent = NULL,
1436 dev_set_name(&vpe_device, "vpe1");
1437 vpe_device.devt = MKDEV(major, minor);
1438 err = device_add(&vpe_device);
1439 if (err) {
1440 printk(KERN_ERR "Adding vpe_device failed\n");
1441 goto out_class;
1442 }
1443
1444 local_irq_save(flags);
1445 mtflags = dmt();
1446 vpflags = dvpe();
1447
1448 /* Put MVPE's into 'configuration state' */
1449 set_c0_mvpcontrol(MVPCONTROL_VPC);
1450
1451 /* dump_mtregs(); */
1452
1453 val = read_c0_mvpconf0();
1454 hw_tcs = (val & MVPCONF0_PTC) + 1;
1455 hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
1456
1457 for (tc = tclimit; tc < hw_tcs; tc++) {
1458 /*
1459 * Must re-enable multithreading temporarily or in case we
1460 * reschedule send IPIs or similar we might hang.
1461 */
1462 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1463 evpe(vpflags);
1464 emt(mtflags);
1465 local_irq_restore(flags);
1466 t = alloc_tc(tc);
1467 if (!t) {
1468 err = -ENOMEM;
1469 goto out;
1470 }
1471
1472 local_irq_save(flags);
1473 mtflags = dmt();
1474 vpflags = dvpe();
1475 set_c0_mvpcontrol(MVPCONTROL_VPC);
1476
1477 /* VPE's */
1478 if (tc < hw_tcs) {
1479 settc(tc);
1480
1481 if ((v = alloc_vpe(tc)) == NULL) {
1482 printk(KERN_WARNING "VPE: unable to allocate VPE\n");
1483
1484 goto out_reenable;
1485 }
1486
1487 v->ntcs = hw_tcs - tclimit;
1488
1489 /* add the tc to the list of this vpe's tc's. */
1490 list_add(&t->tc, &v->tc);
1491
1492 /* deactivate all but vpe0 */
1493 if (tc >= tclimit) {
1494 unsigned long tmp = read_vpe_c0_vpeconf0();
1495
1496 tmp &= ~VPECONF0_VPA;
1497
1498 /* master VPE */
1499 tmp |= VPECONF0_MVP;
1500 write_vpe_c0_vpeconf0(tmp);
1501 }
1502
1503 /* disable multi-threading with TC's */
1504 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
1505
1506 if (tc >= vpelimit) {
1507 /*
1508 * Set config to be the same as vpe0,
1509 * particularly kseg0 coherency alg
1510 */
1511 write_vpe_c0_config(read_c0_config());
1512 }
1513 }
1514
1515 /* TC's */
1516 t->pvpe = v; /* set the parent vpe */
1517
1518 if (tc >= tclimit) {
1519 unsigned long tmp;
1520
1521 settc(tc);
1522
1523 /* Any TC that is bound to VPE0 gets left as is - in case
1524 we are running SMTC on VPE0. A TC that is bound to any
1525 other VPE gets bound to VPE0, ideally I'd like to make
1526 it homeless but it doesn't appear to let me bind a TC
1527 to a non-existent VPE. Which is perfectly reasonable.
1528
1529 The (un)bound state is visible to an EJTAG probe so may
1530 notify GDB...
1531 */
1532
1533 if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
1534 /* tc is bound >vpe0 */
1535 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
1536
1537 t->pvpe = get_vpe(0); /* set the parent vpe */
1538 }
1539
1540 /* halt the TC */
1541 write_tc_c0_tchalt(TCHALT_H);
1542 mips_ihb();
1543
1544 tmp = read_tc_c0_tcstatus();
1545
1546 /* mark not activated and not dynamically allocatable */
1547 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1548 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1549 write_tc_c0_tcstatus(tmp);
1550 }
1551 }
1552
1553out_reenable:
1554 /* release config state */
1555 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1556
1557 evpe(vpflags);
1558 emt(mtflags);
1559 local_irq_restore(flags);
1560
1561 return 0;
1562
1563out_class:
1564 class_unregister(&vpe_class);
1565out_chrdev:
1566 unregister_chrdev(major, module_name);
1567
1568out:
1569 return err;
1570}
1571
1572static void __exit vpe_module_exit(void)
1573{
1574 struct vpe *v, *n;
1575
1576 device_del(&vpe_device);
1577 unregister_chrdev(major, module_name);
1578
1579 /* No locking needed here */
1580 list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
1581 if (v->state != VPE_STATE_UNUSED)
1582 release_vpe(v);
1583 }
1584}
1585
1586module_init(vpe_module_init); 924module_init(vpe_module_init);
1587module_exit(vpe_module_exit); 925module_exit(vpe_module_exit);
1588MODULE_DESCRIPTION("MIPS VPE Loader"); 926MODULE_DESCRIPTION("MIPS VPE Loader");