aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 16:40:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-02 16:40:50 -0400
commitbdfc7cbdeef8cadba0e5793079ac0130b8e2220c (patch)
tree82af0cae4898e259edcc6cbdad639087dc1189a8 /arch/mips/include/asm
parent62d1a3ba5adc5653d43f6cd3a90758bb6ad5d5bd (diff)
parentade63aada79c61bcd5f51cbd310f237399892268 (diff)
Merge branch 'mips-for-linux-next' of git://git.linux-mips.org/pub/scm/ralf/upstream-sfr
Pull MIPS updates from Ralf Baechle: - Support for Imgtec's Aptiv family of MIPS cores. - Improved detection of BCM47xx configurations. - Fix hiberation for certain configurations. - Add support for the Chinese Loongson 3 CPU, a MIPS64 R2 core and systems. - Detection and support for the MIPS P5600 core. - A few more random fixes that didn't make 3.14. - Support for the EVA Extended Virtual Addressing - Switch Alchemy to the platform PATA driver - Complete unification of Alchemy support - Allow availability of I/O cache coherency to be runtime detected - Improvments to multiprocessing support for Imgtec platforms - A few microoptimizations - Cleanups of FPU support - Paul Gortmaker's fixes for the init stuff - Support for seccomp * 'mips-for-linux-next' of git://git.linux-mips.org/pub/scm/ralf/upstream-sfr: (165 commits) MIPS: CPC: Use __raw_ memory access functions MIPS: CM: use __raw_ memory access functions MIPS: Fix warning when including smp-ops.h with CONFIG_SMP=n MIPS: Malta: GIC IPIs may be used without MT MIPS: smp-mt: Use common GIC IPI implementation MIPS: smp-cmp: Remove incorrect core number probe MIPS: Fix gigaton of warning building with microMIPS. MIPS: Fix core number detection for MT cores MIPS: MT: core_nvpes function to retrieve VPE count MIPS: Provide empty mips_mt_set_cpuoptions when CONFIG_MIPS_MT=n MIPS: Lasat: Replace del_timer by del_timer_sync MIPS: Malta: Setup PM I/O region on boot MIPS: Loongson: Add a Loongson-3 default config file MIPS: Loongson 3: Add CPU hotplug support MIPS: Loongson 3: Add Loongson-3 SMP support MIPS: Loongson: Add Loongson-3 Kconfig options MIPS: Loongson: Add swiotlb to support All-Memory DMA MIPS: Loongson 3: Add serial port support MIPS: Loongson 3: Add IRQ init and dispatch support MIPS: Loongson 3: Add HT-linked PCI support ...
Diffstat (limited to 'arch/mips/include/asm')
-rw-r--r--arch/mips/include/asm/asm-eva.h135
-rw-r--r--arch/mips/include/asm/asm.h13
-rw-r--r--arch/mips/include/asm/asmmacro-32.h128
-rw-r--r--arch/mips/include/asm/asmmacro.h330
-rw-r--r--arch/mips/include/asm/atomic.h40
-rw-r--r--arch/mips/include/asm/bitops.h28
-rw-r--r--arch/mips/include/asm/bootinfo.h26
-rw-r--r--arch/mips/include/asm/checksum.h44
-rw-r--r--arch/mips/include/asm/cmpxchg.h20
-rw-r--r--arch/mips/include/asm/cpu-features.h10
-rw-r--r--arch/mips/include/asm/cpu-info.h28
-rw-r--r--arch/mips/include/asm/cpu-type.h6
-rw-r--r--arch/mips/include/asm/cpu.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h5
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/futex.h25
-rw-r--r--arch/mips/include/asm/fw/fw.h2
-rw-r--r--arch/mips/include/asm/gcmpregs.h125
-rw-r--r--arch/mips/include/asm/gic.h3
-rw-r--r--arch/mips/include/asm/io.h8
-rw-r--r--arch/mips/include/asm/local.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h12
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h8
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1200.h91
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1300.h40
-rw-r--r--arch/mips/include/asm/mach-loongson/boot_param.h163
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h22
-rw-r--r--arch/mips/include/asm/mach-loongson/irq.h44
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h28
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h6
-rw-r--r--arch/mips/include/asm/mach-loongson/pci.h5
-rw-r--r--arch/mips/include/asm/mach-loongson/spaces.h9
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h115
-rw-r--r--arch/mips/include/asm/mach-malta/spaces.h46
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h12
-rw-r--r--arch/mips/include/asm/mips-boards/malta.h5
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h5
-rw-r--r--arch/mips/include/asm/mips-cm.h322
-rw-r--r--arch/mips/include/asm/mips-cpc.h150
-rw-r--r--arch/mips/include/asm/mips_mt.h5
-rw-r--r--arch/mips/include/asm/mipsmtregs.h11
-rw-r--r--arch/mips/include/asm/mipsregs.h22
-rw-r--r--arch/mips/include/asm/module.h2
-rw-r--r--arch/mips/include/asm/msa.h203
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pgtable-bits.h9
-rw-r--r--arch/mips/include/asm/processor.h45
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h175
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/smp-cps.h33
-rw-r--r--arch/mips/include/asm/smp-ops.h17
-rw-r--r--arch/mips/include/asm/smp.h1
-rw-r--r--arch/mips/include/asm/stackframe.h2
-rw-r--r--arch/mips/include/asm/switch_to.h22
-rw-r--r--arch/mips/include/asm/syscall.h32
-rw-r--r--arch/mips/include/asm/thread_info.h7
-rw-r--r--arch/mips/include/asm/uaccess.h559
58 files changed, 2587 insertions, 648 deletions
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
new file mode 100644
index 000000000000..e41c56e375b1
--- /dev/null
+++ b/arch/mips/include/asm/asm-eva.h
@@ -0,0 +1,135 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2014 Imagination Technologies Ltd.
7 *
8 */
9
10#ifndef __ASM_ASM_EVA_H
11#define __ASM_ASM_EVA_H
12
13#ifndef __ASSEMBLY__
14#ifdef CONFIG_EVA
15
16#define __BUILD_EVA_INSN(insn, reg, addr) \
17 " .set push\n" \
18 " .set mips0\n" \
19 " .set eva\n" \
20 " "insn" "reg", "addr "\n" \
21 " .set pop\n"
22
23#define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base)
24#define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr)
25#define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr)
26#define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr)
27#define user_lwl(reg, addr) __BUILD_EVA_INSN("lwle", reg, addr)
28#define user_lwr(reg, addr) __BUILD_EVA_INSN("lwre", reg, addr)
29#define user_lh(reg, addr) __BUILD_EVA_INSN("lhe", reg, addr)
30#define user_lb(reg, addr) __BUILD_EVA_INSN("lbe", reg, addr)
31#define user_lbu(reg, addr) __BUILD_EVA_INSN("lbue", reg, addr)
32/* No 64-bit EVA instruction for loading double words */
33#define user_ld(reg, addr) user_lw(reg, addr)
34#define user_sw(reg, addr) __BUILD_EVA_INSN("swe", reg, addr)
35#define user_swl(reg, addr) __BUILD_EVA_INSN("swle", reg, addr)
36#define user_swr(reg, addr) __BUILD_EVA_INSN("swre", reg, addr)
37#define user_sh(reg, addr) __BUILD_EVA_INSN("she", reg, addr)
38#define user_sb(reg, addr) __BUILD_EVA_INSN("sbe", reg, addr)
39/* No 64-bit EVA instruction for storing double words */
40#define user_sd(reg, addr) user_sw(reg, addr)
41
42#else
43
44#define user_cache(op, base) "cache " op ", " base "\n"
45#define user_ll(reg, addr) "ll " reg ", " addr "\n"
46#define user_sc(reg, addr) "sc " reg ", " addr "\n"
47#define user_lw(reg, addr) "lw " reg ", " addr "\n"
48#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
49#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
50#define user_lh(reg, addr) "lh " reg ", " addr "\n"
51#define user_lb(reg, addr) "lb " reg ", " addr "\n"
52#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
53#define user_sw(reg, addr) "sw " reg ", " addr "\n"
54#define user_swl(reg, addr) "swl " reg ", " addr "\n"
55#define user_swr(reg, addr) "swr " reg ", " addr "\n"
56#define user_sh(reg, addr) "sh " reg ", " addr "\n"
57#define user_sb(reg, addr) "sb " reg ", " addr "\n"
58
59#ifdef CONFIG_32BIT
60/*
61 * No 'sd' or 'ld' instructions in 32-bit but the code will
62 * do the correct thing
63 */
64#define user_sd(reg, addr) user_sw(reg, addr)
65#define user_ld(reg, addr) user_lw(reg, addr)
66#else
67#define user_sd(reg, addr) "sd " reg", " addr "\n"
68#define user_ld(reg, addr) "ld " reg", " addr "\n"
69#endif /* CONFIG_32BIT */
70
71#endif /* CONFIG_EVA */
72
73#else /* __ASSEMBLY__ */
74
75#ifdef CONFIG_EVA
76
77#define __BUILD_EVA_INSN(insn, reg, addr) \
78 .set push; \
79 .set mips0; \
80 .set eva; \
81 insn reg, addr; \
82 .set pop;
83
84#define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base)
85#define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr)
86#define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr)
87#define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr)
88#define user_lwl(reg, addr) __BUILD_EVA_INSN(lwle, reg, addr)
89#define user_lwr(reg, addr) __BUILD_EVA_INSN(lwre, reg, addr)
90#define user_lh(reg, addr) __BUILD_EVA_INSN(lhe, reg, addr)
91#define user_lb(reg, addr) __BUILD_EVA_INSN(lbe, reg, addr)
92#define user_lbu(reg, addr) __BUILD_EVA_INSN(lbue, reg, addr)
93/* No 64-bit EVA instruction for loading double words */
94#define user_ld(reg, addr) user_lw(reg, addr)
95#define user_sw(reg, addr) __BUILD_EVA_INSN(swe, reg, addr)
96#define user_swl(reg, addr) __BUILD_EVA_INSN(swle, reg, addr)
97#define user_swr(reg, addr) __BUILD_EVA_INSN(swre, reg, addr)
98#define user_sh(reg, addr) __BUILD_EVA_INSN(she, reg, addr)
99#define user_sb(reg, addr) __BUILD_EVA_INSN(sbe, reg, addr)
100/* No 64-bit EVA instruction for loading double words */
101#define user_sd(reg, addr) user_sw(reg, addr)
102#else
103
104#define user_cache(op, base) cache op, base
105#define user_ll(reg, addr) ll reg, addr
106#define user_sc(reg, addr) sc reg, addr
107#define user_lw(reg, addr) lw reg, addr
108#define user_lwl(reg, addr) lwl reg, addr
109#define user_lwr(reg, addr) lwr reg, addr
110#define user_lh(reg, addr) lh reg, addr
111#define user_lb(reg, addr) lb reg, addr
112#define user_lbu(reg, addr) lbu reg, addr
113#define user_sw(reg, addr) sw reg, addr
114#define user_swl(reg, addr) swl reg, addr
115#define user_swr(reg, addr) swr reg, addr
116#define user_sh(reg, addr) sh reg, addr
117#define user_sb(reg, addr) sb reg, addr
118
119#ifdef CONFIG_32BIT
120/*
121 * No 'sd' or 'ld' instructions in 32-bit but the code will
122 * do the correct thing
123 */
124#define user_sd(reg, addr) user_sw(reg, addr)
125#define user_ld(reg, addr) user_lw(reg, addr)
126#else
127#define user_sd(reg, addr) sd reg, addr
128#define user_ld(reg, addr) ld reg, addr
129#endif /* CONFIG_32BIT */
130
131#endif /* CONFIG_EVA */
132
133#endif /* __ASSEMBLY__ */
134
135#endif /* __ASM_ASM_EVA_H */
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 879691d194af..7c26b28bf252 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -18,6 +18,7 @@
18#define __ASM_ASM_H 18#define __ASM_ASM_H
19 19
20#include <asm/sgidefs.h> 20#include <asm/sgidefs.h>
21#include <asm/asm-eva.h>
21 22
22#ifndef CAT 23#ifndef CAT
23#ifdef __STDC__ 24#ifdef __STDC__
@@ -145,19 +146,27 @@ symbol = value
145 146
146#define PREF(hint,addr) \ 147#define PREF(hint,addr) \
147 .set push; \ 148 .set push; \
148 .set mips4; \ 149 .set arch=r5000; \
149 pref hint, addr; \ 150 pref hint, addr; \
150 .set pop 151 .set pop
151 152
153#define PREFE(hint, addr) \
154 .set push; \
155 .set mips0; \
156 .set eva; \
157 prefe hint, addr; \
158 .set pop
159
152#define PREFX(hint,addr) \ 160#define PREFX(hint,addr) \
153 .set push; \ 161 .set push; \
154 .set mips4; \ 162 .set arch=r5000; \
155 prefx hint, addr; \ 163 prefx hint, addr; \
156 .set pop 164 .set pop
157 165
158#else /* !CONFIG_CPU_HAS_PREFETCH */ 166#else /* !CONFIG_CPU_HAS_PREFETCH */
159 167
160#define PREF(hint, addr) 168#define PREF(hint, addr)
169#define PREFE(hint, addr)
161#define PREFX(hint, addr) 170#define PREFX(hint, addr)
162 171
163#endif /* !CONFIG_CPU_HAS_PREFETCH */ 172#endif /* !CONFIG_CPU_HAS_PREFETCH */
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index 70e1f176f123..e38c2811d4e2 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -14,75 +14,75 @@
14 14
15 .macro fpu_save_single thread tmp=t0 15 .macro fpu_save_single thread tmp=t0
16 cfc1 \tmp, fcr31 16 cfc1 \tmp, fcr31
17 swc1 $f0, THREAD_FPR0(\thread) 17 swc1 $f0, THREAD_FPR0_LS64(\thread)
18 swc1 $f1, THREAD_FPR1(\thread) 18 swc1 $f1, THREAD_FPR1_LS64(\thread)
19 swc1 $f2, THREAD_FPR2(\thread) 19 swc1 $f2, THREAD_FPR2_LS64(\thread)
20 swc1 $f3, THREAD_FPR3(\thread) 20 swc1 $f3, THREAD_FPR3_LS64(\thread)
21 swc1 $f4, THREAD_FPR4(\thread) 21 swc1 $f4, THREAD_FPR4_LS64(\thread)
22 swc1 $f5, THREAD_FPR5(\thread) 22 swc1 $f5, THREAD_FPR5_LS64(\thread)
23 swc1 $f6, THREAD_FPR6(\thread) 23 swc1 $f6, THREAD_FPR6_LS64(\thread)
24 swc1 $f7, THREAD_FPR7(\thread) 24 swc1 $f7, THREAD_FPR7_LS64(\thread)
25 swc1 $f8, THREAD_FPR8(\thread) 25 swc1 $f8, THREAD_FPR8_LS64(\thread)
26 swc1 $f9, THREAD_FPR9(\thread) 26 swc1 $f9, THREAD_FPR9_LS64(\thread)
27 swc1 $f10, THREAD_FPR10(\thread) 27 swc1 $f10, THREAD_FPR10_LS64(\thread)
28 swc1 $f11, THREAD_FPR11(\thread) 28 swc1 $f11, THREAD_FPR11_LS64(\thread)
29 swc1 $f12, THREAD_FPR12(\thread) 29 swc1 $f12, THREAD_FPR12_LS64(\thread)
30 swc1 $f13, THREAD_FPR13(\thread) 30 swc1 $f13, THREAD_FPR13_LS64(\thread)
31 swc1 $f14, THREAD_FPR14(\thread) 31 swc1 $f14, THREAD_FPR14_LS64(\thread)
32 swc1 $f15, THREAD_FPR15(\thread) 32 swc1 $f15, THREAD_FPR15_LS64(\thread)
33 swc1 $f16, THREAD_FPR16(\thread) 33 swc1 $f16, THREAD_FPR16_LS64(\thread)
34 swc1 $f17, THREAD_FPR17(\thread) 34 swc1 $f17, THREAD_FPR17_LS64(\thread)
35 swc1 $f18, THREAD_FPR18(\thread) 35 swc1 $f18, THREAD_FPR18_LS64(\thread)
36 swc1 $f19, THREAD_FPR19(\thread) 36 swc1 $f19, THREAD_FPR19_LS64(\thread)
37 swc1 $f20, THREAD_FPR20(\thread) 37 swc1 $f20, THREAD_FPR20_LS64(\thread)
38 swc1 $f21, THREAD_FPR21(\thread) 38 swc1 $f21, THREAD_FPR21_LS64(\thread)
39 swc1 $f22, THREAD_FPR22(\thread) 39 swc1 $f22, THREAD_FPR22_LS64(\thread)
40 swc1 $f23, THREAD_FPR23(\thread) 40 swc1 $f23, THREAD_FPR23_LS64(\thread)
41 swc1 $f24, THREAD_FPR24(\thread) 41 swc1 $f24, THREAD_FPR24_LS64(\thread)
42 swc1 $f25, THREAD_FPR25(\thread) 42 swc1 $f25, THREAD_FPR25_LS64(\thread)
43 swc1 $f26, THREAD_FPR26(\thread) 43 swc1 $f26, THREAD_FPR26_LS64(\thread)
44 swc1 $f27, THREAD_FPR27(\thread) 44 swc1 $f27, THREAD_FPR27_LS64(\thread)
45 swc1 $f28, THREAD_FPR28(\thread) 45 swc1 $f28, THREAD_FPR28_LS64(\thread)
46 swc1 $f29, THREAD_FPR29(\thread) 46 swc1 $f29, THREAD_FPR29_LS64(\thread)
47 swc1 $f30, THREAD_FPR30(\thread) 47 swc1 $f30, THREAD_FPR30_LS64(\thread)
48 swc1 $f31, THREAD_FPR31(\thread) 48 swc1 $f31, THREAD_FPR31_LS64(\thread)
49 sw \tmp, THREAD_FCR31(\thread) 49 sw \tmp, THREAD_FCR31(\thread)
50 .endm 50 .endm
51 51
52 .macro fpu_restore_single thread tmp=t0 52 .macro fpu_restore_single thread tmp=t0
53 lw \tmp, THREAD_FCR31(\thread) 53 lw \tmp, THREAD_FCR31(\thread)
54 lwc1 $f0, THREAD_FPR0(\thread) 54 lwc1 $f0, THREAD_FPR0_LS64(\thread)
55 lwc1 $f1, THREAD_FPR1(\thread) 55 lwc1 $f1, THREAD_FPR1_LS64(\thread)
56 lwc1 $f2, THREAD_FPR2(\thread) 56 lwc1 $f2, THREAD_FPR2_LS64(\thread)
57 lwc1 $f3, THREAD_FPR3(\thread) 57 lwc1 $f3, THREAD_FPR3_LS64(\thread)
58 lwc1 $f4, THREAD_FPR4(\thread) 58 lwc1 $f4, THREAD_FPR4_LS64(\thread)
59 lwc1 $f5, THREAD_FPR5(\thread) 59 lwc1 $f5, THREAD_FPR5_LS64(\thread)
60 lwc1 $f6, THREAD_FPR6(\thread) 60 lwc1 $f6, THREAD_FPR6_LS64(\thread)
61 lwc1 $f7, THREAD_FPR7(\thread) 61 lwc1 $f7, THREAD_FPR7_LS64(\thread)
62 lwc1 $f8, THREAD_FPR8(\thread) 62 lwc1 $f8, THREAD_FPR8_LS64(\thread)
63 lwc1 $f9, THREAD_FPR9(\thread) 63 lwc1 $f9, THREAD_FPR9_LS64(\thread)
64 lwc1 $f10, THREAD_FPR10(\thread) 64 lwc1 $f10, THREAD_FPR10_LS64(\thread)
65 lwc1 $f11, THREAD_FPR11(\thread) 65 lwc1 $f11, THREAD_FPR11_LS64(\thread)
66 lwc1 $f12, THREAD_FPR12(\thread) 66 lwc1 $f12, THREAD_FPR12_LS64(\thread)
67 lwc1 $f13, THREAD_FPR13(\thread) 67 lwc1 $f13, THREAD_FPR13_LS64(\thread)
68 lwc1 $f14, THREAD_FPR14(\thread) 68 lwc1 $f14, THREAD_FPR14_LS64(\thread)
69 lwc1 $f15, THREAD_FPR15(\thread) 69 lwc1 $f15, THREAD_FPR15_LS64(\thread)
70 lwc1 $f16, THREAD_FPR16(\thread) 70 lwc1 $f16, THREAD_FPR16_LS64(\thread)
71 lwc1 $f17, THREAD_FPR17(\thread) 71 lwc1 $f17, THREAD_FPR17_LS64(\thread)
72 lwc1 $f18, THREAD_FPR18(\thread) 72 lwc1 $f18, THREAD_FPR18_LS64(\thread)
73 lwc1 $f19, THREAD_FPR19(\thread) 73 lwc1 $f19, THREAD_FPR19_LS64(\thread)
74 lwc1 $f20, THREAD_FPR20(\thread) 74 lwc1 $f20, THREAD_FPR20_LS64(\thread)
75 lwc1 $f21, THREAD_FPR21(\thread) 75 lwc1 $f21, THREAD_FPR21_LS64(\thread)
76 lwc1 $f22, THREAD_FPR22(\thread) 76 lwc1 $f22, THREAD_FPR22_LS64(\thread)
77 lwc1 $f23, THREAD_FPR23(\thread) 77 lwc1 $f23, THREAD_FPR23_LS64(\thread)
78 lwc1 $f24, THREAD_FPR24(\thread) 78 lwc1 $f24, THREAD_FPR24_LS64(\thread)
79 lwc1 $f25, THREAD_FPR25(\thread) 79 lwc1 $f25, THREAD_FPR25_LS64(\thread)
80 lwc1 $f26, THREAD_FPR26(\thread) 80 lwc1 $f26, THREAD_FPR26_LS64(\thread)
81 lwc1 $f27, THREAD_FPR27(\thread) 81 lwc1 $f27, THREAD_FPR27_LS64(\thread)
82 lwc1 $f28, THREAD_FPR28(\thread) 82 lwc1 $f28, THREAD_FPR28_LS64(\thread)
83 lwc1 $f29, THREAD_FPR29(\thread) 83 lwc1 $f29, THREAD_FPR29_LS64(\thread)
84 lwc1 $f30, THREAD_FPR30(\thread) 84 lwc1 $f30, THREAD_FPR30_LS64(\thread)
85 lwc1 $f31, THREAD_FPR31(\thread) 85 lwc1 $f31, THREAD_FPR31_LS64(\thread)
86 ctc1 \tmp, fcr31 86 ctc1 \tmp, fcr31
87 .endm 87 .endm
88 88
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 4225e99bd7bf..b464b8b1147a 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -75,44 +75,44 @@
75 75
76 .macro fpu_save_16even thread tmp=t0 76 .macro fpu_save_16even thread tmp=t0
77 cfc1 \tmp, fcr31 77 cfc1 \tmp, fcr31
78 sdc1 $f0, THREAD_FPR0(\thread) 78 sdc1 $f0, THREAD_FPR0_LS64(\thread)
79 sdc1 $f2, THREAD_FPR2(\thread) 79 sdc1 $f2, THREAD_FPR2_LS64(\thread)
80 sdc1 $f4, THREAD_FPR4(\thread) 80 sdc1 $f4, THREAD_FPR4_LS64(\thread)
81 sdc1 $f6, THREAD_FPR6(\thread) 81 sdc1 $f6, THREAD_FPR6_LS64(\thread)
82 sdc1 $f8, THREAD_FPR8(\thread) 82 sdc1 $f8, THREAD_FPR8_LS64(\thread)
83 sdc1 $f10, THREAD_FPR10(\thread) 83 sdc1 $f10, THREAD_FPR10_LS64(\thread)
84 sdc1 $f12, THREAD_FPR12(\thread) 84 sdc1 $f12, THREAD_FPR12_LS64(\thread)
85 sdc1 $f14, THREAD_FPR14(\thread) 85 sdc1 $f14, THREAD_FPR14_LS64(\thread)
86 sdc1 $f16, THREAD_FPR16(\thread) 86 sdc1 $f16, THREAD_FPR16_LS64(\thread)
87 sdc1 $f18, THREAD_FPR18(\thread) 87 sdc1 $f18, THREAD_FPR18_LS64(\thread)
88 sdc1 $f20, THREAD_FPR20(\thread) 88 sdc1 $f20, THREAD_FPR20_LS64(\thread)
89 sdc1 $f22, THREAD_FPR22(\thread) 89 sdc1 $f22, THREAD_FPR22_LS64(\thread)
90 sdc1 $f24, THREAD_FPR24(\thread) 90 sdc1 $f24, THREAD_FPR24_LS64(\thread)
91 sdc1 $f26, THREAD_FPR26(\thread) 91 sdc1 $f26, THREAD_FPR26_LS64(\thread)
92 sdc1 $f28, THREAD_FPR28(\thread) 92 sdc1 $f28, THREAD_FPR28_LS64(\thread)
93 sdc1 $f30, THREAD_FPR30(\thread) 93 sdc1 $f30, THREAD_FPR30_LS64(\thread)
94 sw \tmp, THREAD_FCR31(\thread) 94 sw \tmp, THREAD_FCR31(\thread)
95 .endm 95 .endm
96 96
97 .macro fpu_save_16odd thread 97 .macro fpu_save_16odd thread
98 .set push 98 .set push
99 .set mips64r2 99 .set mips64r2
100 sdc1 $f1, THREAD_FPR1(\thread) 100 sdc1 $f1, THREAD_FPR1_LS64(\thread)
101 sdc1 $f3, THREAD_FPR3(\thread) 101 sdc1 $f3, THREAD_FPR3_LS64(\thread)
102 sdc1 $f5, THREAD_FPR5(\thread) 102 sdc1 $f5, THREAD_FPR5_LS64(\thread)
103 sdc1 $f7, THREAD_FPR7(\thread) 103 sdc1 $f7, THREAD_FPR7_LS64(\thread)
104 sdc1 $f9, THREAD_FPR9(\thread) 104 sdc1 $f9, THREAD_FPR9_LS64(\thread)
105 sdc1 $f11, THREAD_FPR11(\thread) 105 sdc1 $f11, THREAD_FPR11_LS64(\thread)
106 sdc1 $f13, THREAD_FPR13(\thread) 106 sdc1 $f13, THREAD_FPR13_LS64(\thread)
107 sdc1 $f15, THREAD_FPR15(\thread) 107 sdc1 $f15, THREAD_FPR15_LS64(\thread)
108 sdc1 $f17, THREAD_FPR17(\thread) 108 sdc1 $f17, THREAD_FPR17_LS64(\thread)
109 sdc1 $f19, THREAD_FPR19(\thread) 109 sdc1 $f19, THREAD_FPR19_LS64(\thread)
110 sdc1 $f21, THREAD_FPR21(\thread) 110 sdc1 $f21, THREAD_FPR21_LS64(\thread)
111 sdc1 $f23, THREAD_FPR23(\thread) 111 sdc1 $f23, THREAD_FPR23_LS64(\thread)
112 sdc1 $f25, THREAD_FPR25(\thread) 112 sdc1 $f25, THREAD_FPR25_LS64(\thread)
113 sdc1 $f27, THREAD_FPR27(\thread) 113 sdc1 $f27, THREAD_FPR27_LS64(\thread)
114 sdc1 $f29, THREAD_FPR29(\thread) 114 sdc1 $f29, THREAD_FPR29_LS64(\thread)
115 sdc1 $f31, THREAD_FPR31(\thread) 115 sdc1 $f31, THREAD_FPR31_LS64(\thread)
116 .set pop 116 .set pop
117 .endm 117 .endm
118 118
@@ -128,44 +128,44 @@
128 128
129 .macro fpu_restore_16even thread tmp=t0 129 .macro fpu_restore_16even thread tmp=t0
130 lw \tmp, THREAD_FCR31(\thread) 130 lw \tmp, THREAD_FCR31(\thread)
131 ldc1 $f0, THREAD_FPR0(\thread) 131 ldc1 $f0, THREAD_FPR0_LS64(\thread)
132 ldc1 $f2, THREAD_FPR2(\thread) 132 ldc1 $f2, THREAD_FPR2_LS64(\thread)
133 ldc1 $f4, THREAD_FPR4(\thread) 133 ldc1 $f4, THREAD_FPR4_LS64(\thread)
134 ldc1 $f6, THREAD_FPR6(\thread) 134 ldc1 $f6, THREAD_FPR6_LS64(\thread)
135 ldc1 $f8, THREAD_FPR8(\thread) 135 ldc1 $f8, THREAD_FPR8_LS64(\thread)
136 ldc1 $f10, THREAD_FPR10(\thread) 136 ldc1 $f10, THREAD_FPR10_LS64(\thread)
137 ldc1 $f12, THREAD_FPR12(\thread) 137 ldc1 $f12, THREAD_FPR12_LS64(\thread)
138 ldc1 $f14, THREAD_FPR14(\thread) 138 ldc1 $f14, THREAD_FPR14_LS64(\thread)
139 ldc1 $f16, THREAD_FPR16(\thread) 139 ldc1 $f16, THREAD_FPR16_LS64(\thread)
140 ldc1 $f18, THREAD_FPR18(\thread) 140 ldc1 $f18, THREAD_FPR18_LS64(\thread)
141 ldc1 $f20, THREAD_FPR20(\thread) 141 ldc1 $f20, THREAD_FPR20_LS64(\thread)
142 ldc1 $f22, THREAD_FPR22(\thread) 142 ldc1 $f22, THREAD_FPR22_LS64(\thread)
143 ldc1 $f24, THREAD_FPR24(\thread) 143 ldc1 $f24, THREAD_FPR24_LS64(\thread)
144 ldc1 $f26, THREAD_FPR26(\thread) 144 ldc1 $f26, THREAD_FPR26_LS64(\thread)
145 ldc1 $f28, THREAD_FPR28(\thread) 145 ldc1 $f28, THREAD_FPR28_LS64(\thread)
146 ldc1 $f30, THREAD_FPR30(\thread) 146 ldc1 $f30, THREAD_FPR30_LS64(\thread)
147 ctc1 \tmp, fcr31 147 ctc1 \tmp, fcr31
148 .endm 148 .endm
149 149
150 .macro fpu_restore_16odd thread 150 .macro fpu_restore_16odd thread
151 .set push 151 .set push
152 .set mips64r2 152 .set mips64r2
153 ldc1 $f1, THREAD_FPR1(\thread) 153 ldc1 $f1, THREAD_FPR1_LS64(\thread)
154 ldc1 $f3, THREAD_FPR3(\thread) 154 ldc1 $f3, THREAD_FPR3_LS64(\thread)
155 ldc1 $f5, THREAD_FPR5(\thread) 155 ldc1 $f5, THREAD_FPR5_LS64(\thread)
156 ldc1 $f7, THREAD_FPR7(\thread) 156 ldc1 $f7, THREAD_FPR7_LS64(\thread)
157 ldc1 $f9, THREAD_FPR9(\thread) 157 ldc1 $f9, THREAD_FPR9_LS64(\thread)
158 ldc1 $f11, THREAD_FPR11(\thread) 158 ldc1 $f11, THREAD_FPR11_LS64(\thread)
159 ldc1 $f13, THREAD_FPR13(\thread) 159 ldc1 $f13, THREAD_FPR13_LS64(\thread)
160 ldc1 $f15, THREAD_FPR15(\thread) 160 ldc1 $f15, THREAD_FPR15_LS64(\thread)
161 ldc1 $f17, THREAD_FPR17(\thread) 161 ldc1 $f17, THREAD_FPR17_LS64(\thread)
162 ldc1 $f19, THREAD_FPR19(\thread) 162 ldc1 $f19, THREAD_FPR19_LS64(\thread)
163 ldc1 $f21, THREAD_FPR21(\thread) 163 ldc1 $f21, THREAD_FPR21_LS64(\thread)
164 ldc1 $f23, THREAD_FPR23(\thread) 164 ldc1 $f23, THREAD_FPR23_LS64(\thread)
165 ldc1 $f25, THREAD_FPR25(\thread) 165 ldc1 $f25, THREAD_FPR25_LS64(\thread)
166 ldc1 $f27, THREAD_FPR27(\thread) 166 ldc1 $f27, THREAD_FPR27_LS64(\thread)
167 ldc1 $f29, THREAD_FPR29(\thread) 167 ldc1 $f29, THREAD_FPR29_LS64(\thread)
168 ldc1 $f31, THREAD_FPR31(\thread) 168 ldc1 $f31, THREAD_FPR31_LS64(\thread)
169 .set pop 169 .set pop
170 .endm 170 .endm
171 171
@@ -180,6 +180,17 @@
180 fpu_restore_16even \thread \tmp 180 fpu_restore_16even \thread \tmp
181 .endm 181 .endm
182 182
183#ifdef CONFIG_CPU_MIPSR2
184 .macro _EXT rd, rs, p, s
185 ext \rd, \rs, \p, \s
186 .endm
187#else /* !CONFIG_CPU_MIPSR2 */
188 .macro _EXT rd, rs, p, s
189 srl \rd, \rs, \p
190 andi \rd, \rd, (1 << \s) - 1
191 .endm
192#endif /* !CONFIG_CPU_MIPSR2 */
193
183/* 194/*
184 * Temporary until all gas have MT ASE support 195 * Temporary until all gas have MT ASE support
185 */ 196 */
@@ -207,4 +218,195 @@
207 .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel) 218 .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
208 .endm 219 .endm
209 220
221#ifdef TOOLCHAIN_SUPPORTS_MSA
222 .macro ld_d wd, off, base
223 .set push
224 .set mips32r2
225 .set msa
226 ld.d $w\wd, \off(\base)
227 .set pop
228 .endm
229
230 .macro st_d wd, off, base
231 .set push
232 .set mips32r2
233 .set msa
234 st.d $w\wd, \off(\base)
235 .set pop
236 .endm
237
238 .macro copy_u_w rd, ws, n
239 .set push
240 .set mips32r2
241 .set msa
242 copy_u.w \rd, $w\ws[\n]
243 .set pop
244 .endm
245
246 .macro copy_u_d rd, ws, n
247 .set push
248 .set mips64r2
249 .set msa
250 copy_u.d \rd, $w\ws[\n]
251 .set pop
252 .endm
253
254 .macro insert_w wd, n, rs
255 .set push
256 .set mips32r2
257 .set msa
258 insert.w $w\wd[\n], \rs
259 .set pop
260 .endm
261
262 .macro insert_d wd, n, rs
263 .set push
264 .set mips64r2
265 .set msa
266 insert.d $w\wd[\n], \rs
267 .set pop
268 .endm
269#else
270 /*
271 * Temporary until all toolchains in use include MSA support.
272 */
273 .macro cfcmsa rd, cs
274 .set push
275 .set noat
276 .word 0x787e0059 | (\cs << 11)
277 move \rd, $1
278 .set pop
279 .endm
280
281 .macro ctcmsa cd, rs
282 .set push
283 .set noat
284 move $1, \rs
285 .word 0x783e0819 | (\cd << 6)
286 .set pop
287 .endm
288
289 .macro ld_d wd, off, base
290 .set push
291 .set noat
292 add $1, \base, \off
293 .word 0x78000823 | (\wd << 6)
294 .set pop
295 .endm
296
297 .macro st_d wd, off, base
298 .set push
299 .set noat
300 add $1, \base, \off
301 .word 0x78000827 | (\wd << 6)
302 .set pop
303 .endm
304
305 .macro copy_u_w rd, ws, n
306 .set push
307 .set noat
308 .word 0x78f00059 | (\n << 16) | (\ws << 11)
309 /* move triggers an assembler bug... */
310 or \rd, $1, zero
311 .set pop
312 .endm
313
314 .macro copy_u_d rd, ws, n
315 .set push
316 .set noat
317 .word 0x78f80059 | (\n << 16) | (\ws << 11)
318 /* move triggers an assembler bug... */
319 or \rd, $1, zero
320 .set pop
321 .endm
322
323 .macro insert_w wd, n, rs
324 .set push
325 .set noat
326 /* move triggers an assembler bug... */
327 or $1, \rs, zero
328 .word 0x79300819 | (\n << 16) | (\wd << 6)
329 .set pop
330 .endm
331
332 .macro insert_d wd, n, rs
333 .set push
334 .set noat
335 /* move triggers an assembler bug... */
336 or $1, \rs, zero
337 .word 0x79380819 | (\n << 16) | (\wd << 6)
338 .set pop
339 .endm
340#endif
341
342 .macro msa_save_all thread
343 st_d 0, THREAD_FPR0, \thread
344 st_d 1, THREAD_FPR1, \thread
345 st_d 2, THREAD_FPR2, \thread
346 st_d 3, THREAD_FPR3, \thread
347 st_d 4, THREAD_FPR4, \thread
348 st_d 5, THREAD_FPR5, \thread
349 st_d 6, THREAD_FPR6, \thread
350 st_d 7, THREAD_FPR7, \thread
351 st_d 8, THREAD_FPR8, \thread
352 st_d 9, THREAD_FPR9, \thread
353 st_d 10, THREAD_FPR10, \thread
354 st_d 11, THREAD_FPR11, \thread
355 st_d 12, THREAD_FPR12, \thread
356 st_d 13, THREAD_FPR13, \thread
357 st_d 14, THREAD_FPR14, \thread
358 st_d 15, THREAD_FPR15, \thread
359 st_d 16, THREAD_FPR16, \thread
360 st_d 17, THREAD_FPR17, \thread
361 st_d 18, THREAD_FPR18, \thread
362 st_d 19, THREAD_FPR19, \thread
363 st_d 20, THREAD_FPR20, \thread
364 st_d 21, THREAD_FPR21, \thread
365 st_d 22, THREAD_FPR22, \thread
366 st_d 23, THREAD_FPR23, \thread
367 st_d 24, THREAD_FPR24, \thread
368 st_d 25, THREAD_FPR25, \thread
369 st_d 26, THREAD_FPR26, \thread
370 st_d 27, THREAD_FPR27, \thread
371 st_d 28, THREAD_FPR28, \thread
372 st_d 29, THREAD_FPR29, \thread
373 st_d 30, THREAD_FPR30, \thread
374 st_d 31, THREAD_FPR31, \thread
375 .endm
376
377 .macro msa_restore_all thread
378 ld_d 0, THREAD_FPR0, \thread
379 ld_d 1, THREAD_FPR1, \thread
380 ld_d 2, THREAD_FPR2, \thread
381 ld_d 3, THREAD_FPR3, \thread
382 ld_d 4, THREAD_FPR4, \thread
383 ld_d 5, THREAD_FPR5, \thread
384 ld_d 6, THREAD_FPR6, \thread
385 ld_d 7, THREAD_FPR7, \thread
386 ld_d 8, THREAD_FPR8, \thread
387 ld_d 9, THREAD_FPR9, \thread
388 ld_d 10, THREAD_FPR10, \thread
389 ld_d 11, THREAD_FPR11, \thread
390 ld_d 12, THREAD_FPR12, \thread
391 ld_d 13, THREAD_FPR13, \thread
392 ld_d 14, THREAD_FPR14, \thread
393 ld_d 15, THREAD_FPR15, \thread
394 ld_d 16, THREAD_FPR16, \thread
395 ld_d 17, THREAD_FPR17, \thread
396 ld_d 18, THREAD_FPR18, \thread
397 ld_d 19, THREAD_FPR19, \thread
398 ld_d 20, THREAD_FPR20, \thread
399 ld_d 21, THREAD_FPR21, \thread
400 ld_d 22, THREAD_FPR22, \thread
401 ld_d 23, THREAD_FPR23, \thread
402 ld_d 24, THREAD_FPR24, \thread
403 ld_d 25, THREAD_FPR25, \thread
404 ld_d 26, THREAD_FPR26, \thread
405 ld_d 27, THREAD_FPR27, \thread
406 ld_d 28, THREAD_FPR28, \thread
407 ld_d 29, THREAD_FPR29, \thread
408 ld_d 30, THREAD_FPR30, \thread
409 ld_d 31, THREAD_FPR31, \thread
410 .endm
411
210#endif /* _ASM_ASMMACRO_H */ 412#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 7eed2f261710..e8eb3d53a241 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
53 int temp; 53 int temp;
54 54
55 __asm__ __volatile__( 55 __asm__ __volatile__(
56 " .set mips3 \n" 56 " .set arch=r4000 \n"
57 "1: ll %0, %1 # atomic_add \n" 57 "1: ll %0, %1 # atomic_add \n"
58 " addu %0, %2 \n" 58 " addu %0, %2 \n"
59 " sc %0, %1 \n" 59 " sc %0, %1 \n"
@@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
66 66
67 do { 67 do {
68 __asm__ __volatile__( 68 __asm__ __volatile__(
69 " .set mips3 \n" 69 " .set arch=r4000 \n"
70 " ll %0, %1 # atomic_add \n" 70 " ll %0, %1 # atomic_add \n"
71 " addu %0, %2 \n" 71 " addu %0, %2 \n"
72 " sc %0, %1 \n" 72 " sc %0, %1 \n"
@@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
96 int temp; 96 int temp;
97 97
98 __asm__ __volatile__( 98 __asm__ __volatile__(
99 " .set mips3 \n" 99 " .set arch=r4000 \n"
100 "1: ll %0, %1 # atomic_sub \n" 100 "1: ll %0, %1 # atomic_sub \n"
101 " subu %0, %2 \n" 101 " subu %0, %2 \n"
102 " sc %0, %1 \n" 102 " sc %0, %1 \n"
@@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
109 109
110 do { 110 do {
111 __asm__ __volatile__( 111 __asm__ __volatile__(
112 " .set mips3 \n" 112 " .set arch=r4000 \n"
113 " ll %0, %1 # atomic_sub \n" 113 " ll %0, %1 # atomic_sub \n"
114 " subu %0, %2 \n" 114 " subu %0, %2 \n"
115 " sc %0, %1 \n" 115 " sc %0, %1 \n"
@@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
139 int temp; 139 int temp;
140 140
141 __asm__ __volatile__( 141 __asm__ __volatile__(
142 " .set mips3 \n" 142 " .set arch=r4000 \n"
143 "1: ll %1, %2 # atomic_add_return \n" 143 "1: ll %1, %2 # atomic_add_return \n"
144 " addu %0, %1, %3 \n" 144 " addu %0, %1, %3 \n"
145 " sc %0, %2 \n" 145 " sc %0, %2 \n"
@@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
153 153
154 do { 154 do {
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 " .set mips3 \n" 156 " .set arch=r4000 \n"
157 " ll %1, %2 # atomic_add_return \n" 157 " ll %1, %2 # atomic_add_return \n"
158 " addu %0, %1, %3 \n" 158 " addu %0, %1, %3 \n"
159 " sc %0, %2 \n" 159 " sc %0, %2 \n"
@@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
188 int temp; 188 int temp;
189 189
190 __asm__ __volatile__( 190 __asm__ __volatile__(
191 " .set mips3 \n" 191 " .set arch=r4000 \n"
192 "1: ll %1, %2 # atomic_sub_return \n" 192 "1: ll %1, %2 # atomic_sub_return \n"
193 " subu %0, %1, %3 \n" 193 " subu %0, %1, %3 \n"
194 " sc %0, %2 \n" 194 " sc %0, %2 \n"
@@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
205 205
206 do { 206 do {
207 __asm__ __volatile__( 207 __asm__ __volatile__(
208 " .set mips3 \n" 208 " .set arch=r4000 \n"
209 " ll %1, %2 # atomic_sub_return \n" 209 " ll %1, %2 # atomic_sub_return \n"
210 " subu %0, %1, %3 \n" 210 " subu %0, %1, %3 \n"
211 " sc %0, %2 \n" 211 " sc %0, %2 \n"
@@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
248 int temp; 248 int temp;
249 249
250 __asm__ __volatile__( 250 __asm__ __volatile__(
251 " .set mips3 \n" 251 " .set arch=r4000 \n"
252 "1: ll %1, %2 # atomic_sub_if_positive\n" 252 "1: ll %1, %2 # atomic_sub_if_positive\n"
253 " subu %0, %1, %3 \n" 253 " subu %0, %1, %3 \n"
254 " bltz %0, 1f \n" 254 " bltz %0, 1f \n"
@@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
266 int temp; 266 int temp;
267 267
268 __asm__ __volatile__( 268 __asm__ __volatile__(
269 " .set mips3 \n" 269 " .set arch=r4000 \n"
270 "1: ll %1, %2 # atomic_sub_if_positive\n" 270 "1: ll %1, %2 # atomic_sub_if_positive\n"
271 " subu %0, %1, %3 \n" 271 " subu %0, %1, %3 \n"
272 " bltz %0, 1f \n" 272 " bltz %0, 1f \n"
@@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
420 long temp; 420 long temp;
421 421
422 __asm__ __volatile__( 422 __asm__ __volatile__(
423 " .set mips3 \n" 423 " .set arch=r4000 \n"
424 "1: lld %0, %1 # atomic64_add \n" 424 "1: lld %0, %1 # atomic64_add \n"
425 " daddu %0, %2 \n" 425 " daddu %0, %2 \n"
426 " scd %0, %1 \n" 426 " scd %0, %1 \n"
@@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
433 433
434 do { 434 do {
435 __asm__ __volatile__( 435 __asm__ __volatile__(
436 " .set mips3 \n" 436 " .set arch=r4000 \n"
437 " lld %0, %1 # atomic64_add \n" 437 " lld %0, %1 # atomic64_add \n"
438 " daddu %0, %2 \n" 438 " daddu %0, %2 \n"
439 " scd %0, %1 \n" 439 " scd %0, %1 \n"
@@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
463 long temp; 463 long temp;
464 464
465 __asm__ __volatile__( 465 __asm__ __volatile__(
466 " .set mips3 \n" 466 " .set arch=r4000 \n"
467 "1: lld %0, %1 # atomic64_sub \n" 467 "1: lld %0, %1 # atomic64_sub \n"
468 " dsubu %0, %2 \n" 468 " dsubu %0, %2 \n"
469 " scd %0, %1 \n" 469 " scd %0, %1 \n"
@@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
476 476
477 do { 477 do {
478 __asm__ __volatile__( 478 __asm__ __volatile__(
479 " .set mips3 \n" 479 " .set arch=r4000 \n"
480 " lld %0, %1 # atomic64_sub \n" 480 " lld %0, %1 # atomic64_sub \n"
481 " dsubu %0, %2 \n" 481 " dsubu %0, %2 \n"
482 " scd %0, %1 \n" 482 " scd %0, %1 \n"
@@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
506 long temp; 506 long temp;
507 507
508 __asm__ __volatile__( 508 __asm__ __volatile__(
509 " .set mips3 \n" 509 " .set arch=r4000 \n"
510 "1: lld %1, %2 # atomic64_add_return \n" 510 "1: lld %1, %2 # atomic64_add_return \n"
511 " daddu %0, %1, %3 \n" 511 " daddu %0, %1, %3 \n"
512 " scd %0, %2 \n" 512 " scd %0, %2 \n"
@@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
520 520
521 do { 521 do {
522 __asm__ __volatile__( 522 __asm__ __volatile__(
523 " .set mips3 \n" 523 " .set arch=r4000 \n"
524 " lld %1, %2 # atomic64_add_return \n" 524 " lld %1, %2 # atomic64_add_return \n"
525 " daddu %0, %1, %3 \n" 525 " daddu %0, %1, %3 \n"
526 " scd %0, %2 \n" 526 " scd %0, %2 \n"
@@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
556 long temp; 556 long temp;
557 557
558 __asm__ __volatile__( 558 __asm__ __volatile__(
559 " .set mips3 \n" 559 " .set arch=r4000 \n"
560 "1: lld %1, %2 # atomic64_sub_return \n" 560 "1: lld %1, %2 # atomic64_sub_return \n"
561 " dsubu %0, %1, %3 \n" 561 " dsubu %0, %1, %3 \n"
562 " scd %0, %2 \n" 562 " scd %0, %2 \n"
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
571 571
572 do { 572 do {
573 __asm__ __volatile__( 573 __asm__ __volatile__(
574 " .set mips3 \n" 574 " .set arch=r4000 \n"
575 " lld %1, %2 # atomic64_sub_return \n" 575 " lld %1, %2 # atomic64_sub_return \n"
576 " dsubu %0, %1, %3 \n" 576 " dsubu %0, %1, %3 \n"
577 " scd %0, %2 \n" 577 " scd %0, %2 \n"
@@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
615 long temp; 615 long temp;
616 616
617 __asm__ __volatile__( 617 __asm__ __volatile__(
618 " .set mips3 \n" 618 " .set arch=r4000 \n"
619 "1: lld %1, %2 # atomic64_sub_if_positive\n" 619 "1: lld %1, %2 # atomic64_sub_if_positive\n"
620 " dsubu %0, %1, %3 \n" 620 " dsubu %0, %1, %3 \n"
621 " bltz %0, 1f \n" 621 " bltz %0, 1f \n"
@@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
633 long temp; 633 long temp;
634 634
635 __asm__ __volatile__( 635 __asm__ __volatile__(
636 " .set mips3 \n" 636 " .set arch=r4000 \n"
637 "1: lld %1, %2 # atomic64_sub_if_positive\n" 637 "1: lld %1, %2 # atomic64_sub_if_positive\n"
638 " dsubu %0, %1, %3 \n" 638 " dsubu %0, %1, %3 \n"
639 " bltz %0, 1f \n" 639 " bltz %0, 1f \n"
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 71305a8b3d78..6a65d49e2c0d 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,7 +79,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
79 79
80 if (kernel_uses_llsc && R10000_LLSC_WAR) { 80 if (kernel_uses_llsc && R10000_LLSC_WAR) {
81 __asm__ __volatile__( 81 __asm__ __volatile__(
82 " .set mips3 \n" 82 " .set arch=r4000 \n"
83 "1: " __LL "%0, %1 # set_bit \n" 83 "1: " __LL "%0, %1 # set_bit \n"
84 " or %0, %2 \n" 84 " or %0, %2 \n"
85 " " __SC "%0, %1 \n" 85 " " __SC "%0, %1 \n"
@@ -101,7 +101,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
101 } else if (kernel_uses_llsc) { 101 } else if (kernel_uses_llsc) {
102 do { 102 do {
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 " .set mips3 \n" 104 " .set arch=r4000 \n"
105 " " __LL "%0, %1 # set_bit \n" 105 " " __LL "%0, %1 # set_bit \n"
106 " or %0, %2 \n" 106 " or %0, %2 \n"
107 " " __SC "%0, %1 \n" 107 " " __SC "%0, %1 \n"
@@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
131 131
132 if (kernel_uses_llsc && R10000_LLSC_WAR) { 132 if (kernel_uses_llsc && R10000_LLSC_WAR) {
133 __asm__ __volatile__( 133 __asm__ __volatile__(
134 " .set mips3 \n" 134 " .set arch=r4000 \n"
135 "1: " __LL "%0, %1 # clear_bit \n" 135 "1: " __LL "%0, %1 # clear_bit \n"
136 " and %0, %2 \n" 136 " and %0, %2 \n"
137 " " __SC "%0, %1 \n" 137 " " __SC "%0, %1 \n"
@@ -153,7 +153,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
153 } else if (kernel_uses_llsc) { 153 } else if (kernel_uses_llsc) {
154 do { 154 do {
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 " .set mips3 \n" 156 " .set arch=r4000 \n"
157 " " __LL "%0, %1 # clear_bit \n" 157 " " __LL "%0, %1 # clear_bit \n"
158 " and %0, %2 \n" 158 " and %0, %2 \n"
159 " " __SC "%0, %1 \n" 159 " " __SC "%0, %1 \n"
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
197 unsigned long temp; 197 unsigned long temp;
198 198
199 __asm__ __volatile__( 199 __asm__ __volatile__(
200 " .set mips3 \n" 200 " .set arch=r4000 \n"
201 "1: " __LL "%0, %1 # change_bit \n" 201 "1: " __LL "%0, %1 # change_bit \n"
202 " xor %0, %2 \n" 202 " xor %0, %2 \n"
203 " " __SC "%0, %1 \n" 203 " " __SC "%0, %1 \n"
@@ -211,7 +211,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
211 211
212 do { 212 do {
213 __asm__ __volatile__( 213 __asm__ __volatile__(
214 " .set mips3 \n" 214 " .set arch=r4000 \n"
215 " " __LL "%0, %1 # change_bit \n" 215 " " __LL "%0, %1 # change_bit \n"
216 " xor %0, %2 \n" 216 " xor %0, %2 \n"
217 " " __SC "%0, %1 \n" 217 " " __SC "%0, %1 \n"
@@ -244,7 +244,7 @@ static inline int test_and_set_bit(unsigned long nr,
244 unsigned long temp; 244 unsigned long temp;
245 245
246 __asm__ __volatile__( 246 __asm__ __volatile__(
247 " .set mips3 \n" 247 " .set arch=r4000 \n"
248 "1: " __LL "%0, %1 # test_and_set_bit \n" 248 "1: " __LL "%0, %1 # test_and_set_bit \n"
249 " or %2, %0, %3 \n" 249 " or %2, %0, %3 \n"
250 " " __SC "%2, %1 \n" 250 " " __SC "%2, %1 \n"
@@ -260,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,
260 260
261 do { 261 do {
262 __asm__ __volatile__( 262 __asm__ __volatile__(
263 " .set mips3 \n" 263 " .set arch=r4000 \n"
264 " " __LL "%0, %1 # test_and_set_bit \n" 264 " " __LL "%0, %1 # test_and_set_bit \n"
265 " or %2, %0, %3 \n" 265 " or %2, %0, %3 \n"
266 " " __SC "%2, %1 \n" 266 " " __SC "%2, %1 \n"
@@ -298,7 +298,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
298 unsigned long temp; 298 unsigned long temp;
299 299
300 __asm__ __volatile__( 300 __asm__ __volatile__(
301 " .set mips3 \n" 301 " .set arch=r4000 \n"
302 "1: " __LL "%0, %1 # test_and_set_bit \n" 302 "1: " __LL "%0, %1 # test_and_set_bit \n"
303 " or %2, %0, %3 \n" 303 " or %2, %0, %3 \n"
304 " " __SC "%2, %1 \n" 304 " " __SC "%2, %1 \n"
@@ -314,7 +314,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
314 314
315 do { 315 do {
316 __asm__ __volatile__( 316 __asm__ __volatile__(
317 " .set mips3 \n" 317 " .set arch=r4000 \n"
318 " " __LL "%0, %1 # test_and_set_bit \n" 318 " " __LL "%0, %1 # test_and_set_bit \n"
319 " or %2, %0, %3 \n" 319 " or %2, %0, %3 \n"
320 " " __SC "%2, %1 \n" 320 " " __SC "%2, %1 \n"
@@ -353,7 +353,7 @@ static inline int test_and_clear_bit(unsigned long nr,
353 unsigned long temp; 353 unsigned long temp;
354 354
355 __asm__ __volatile__( 355 __asm__ __volatile__(
356 " .set mips3 \n" 356 " .set arch=r4000 \n"
357 "1: " __LL "%0, %1 # test_and_clear_bit \n" 357 "1: " __LL "%0, %1 # test_and_clear_bit \n"
358 " or %2, %0, %3 \n" 358 " or %2, %0, %3 \n"
359 " xor %2, %3 \n" 359 " xor %2, %3 \n"
@@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
386 386
387 do { 387 do {
388 __asm__ __volatile__( 388 __asm__ __volatile__(
389 " .set mips3 \n" 389 " .set arch=r4000 \n"
390 " " __LL "%0, %1 # test_and_clear_bit \n" 390 " " __LL "%0, %1 # test_and_clear_bit \n"
391 " or %2, %0, %3 \n" 391 " or %2, %0, %3 \n"
392 " xor %2, %3 \n" 392 " xor %2, %3 \n"
@@ -427,7 +427,7 @@ static inline int test_and_change_bit(unsigned long nr,
427 unsigned long temp; 427 unsigned long temp;
428 428
429 __asm__ __volatile__( 429 __asm__ __volatile__(
430 " .set mips3 \n" 430 " .set arch=r4000 \n"
431 "1: " __LL "%0, %1 # test_and_change_bit \n" 431 "1: " __LL "%0, %1 # test_and_change_bit \n"
432 " xor %2, %0, %3 \n" 432 " xor %2, %0, %3 \n"
433 " " __SC "%2, %1 \n" 433 " " __SC "%2, %1 \n"
@@ -443,7 +443,7 @@ static inline int test_and_change_bit(unsigned long nr,
443 443
444 do { 444 do {
445 __asm__ __volatile__( 445 __asm__ __volatile__(
446 " .set mips3 \n" 446 " .set arch=r4000 \n"
447 " " __LL "%0, %1 # test_and_change_bit \n" 447 " " __LL "%0, %1 # test_and_change_bit \n"
448 " xor %2, %0, %3 \n" 448 " xor %2, %0, %3 \n"
449 " " __SC "\t%2, %1 \n" 449 " " __SC "\t%2, %1 \n"
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 4d2cdea5aa37..1f7ca8b00404 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -61,15 +61,21 @@
61/* 61/*
62 * Valid machtype for Loongson family 62 * Valid machtype for Loongson family
63 */ 63 */
64#define MACH_LOONGSON_UNKNOWN 0 64enum loongson_machine_type {
65#define MACH_LEMOTE_FL2E 1 65 MACH_LOONGSON_UNKNOWN,
66#define MACH_LEMOTE_FL2F 2 66 MACH_LEMOTE_FL2E,
67#define MACH_LEMOTE_ML2F7 3 67 MACH_LEMOTE_FL2F,
68#define MACH_LEMOTE_YL2F89 4 68 MACH_LEMOTE_ML2F7,
69#define MACH_DEXXON_GDIUM2F10 5 69 MACH_LEMOTE_YL2F89,
70#define MACH_LEMOTE_NAS 6 70 MACH_DEXXON_GDIUM2F10,
71#define MACH_LEMOTE_LL2F 7 71 MACH_LEMOTE_NAS,
72#define MACH_LOONGSON_END 8 72 MACH_LEMOTE_LL2F,
73 MACH_LEMOTE_A1004,
74 MACH_LEMOTE_A1101,
75 MACH_LEMOTE_A1201,
76 MACH_LEMOTE_A1205,
77 MACH_LOONGSON_END
78};
73 79
74/* 80/*
75 * Valid machtype for group INGENIC 81 * Valid machtype for group INGENIC
@@ -112,6 +118,8 @@ extern void prom_free_prom_memory(void);
112extern void free_init_pages(const char *what, 118extern void free_init_pages(const char *what,
113 unsigned long begin, unsigned long end); 119 unsigned long begin, unsigned long end);
114 120
121extern void (*free_init_pages_eva)(void *begin, void *end);
122
115/* 123/*
116 * Initial kernel command line, usually setup by prom_init() 124 * Initial kernel command line, usually setup by prom_init()
117 */ 125 */
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index ac3d2b8a20d4..3418c51e1151 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -7,6 +7,7 @@
7 * Copyright (C) 1999 Silicon Graphics, Inc. 7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2001 Thiemo Seufer. 8 * Copyright (C) 2001 Thiemo Seufer.
9 * Copyright (C) 2002 Maciej W. Rozycki 9 * Copyright (C) 2002 Maciej W. Rozycki
10 * Copyright (C) 2014 Imagination Technologies Ltd.
10 */ 11 */
11#ifndef _ASM_CHECKSUM_H 12#ifndef _ASM_CHECKSUM_H
12#define _ASM_CHECKSUM_H 13#define _ASM_CHECKSUM_H
@@ -29,9 +30,13 @@
29 */ 30 */
30__wsum csum_partial(const void *buff, int len, __wsum sum); 31__wsum csum_partial(const void *buff, int len, __wsum sum);
31 32
32__wsum __csum_partial_copy_user(const void *src, void *dst, 33__wsum __csum_partial_copy_kernel(const void *src, void *dst,
33 int len, __wsum sum, int *err_ptr); 34 int len, __wsum sum, int *err_ptr);
34 35
36__wsum __csum_partial_copy_from_user(const void *src, void *dst,
37 int len, __wsum sum, int *err_ptr);
38__wsum __csum_partial_copy_to_user(const void *src, void *dst,
39 int len, __wsum sum, int *err_ptr);
35/* 40/*
36 * this is a new version of the above that records errors it finds in *errp, 41 * this is a new version of the above that records errors it finds in *errp,
37 * but continues and zeros the rest of the buffer. 42 * but continues and zeros the rest of the buffer.
@@ -41,8 +46,26 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
41 __wsum sum, int *err_ptr) 46 __wsum sum, int *err_ptr)
42{ 47{
43 might_fault(); 48 might_fault();
44 return __csum_partial_copy_user((__force void *)src, dst, 49 if (segment_eq(get_fs(), get_ds()))
45 len, sum, err_ptr); 50 return __csum_partial_copy_kernel((__force void *)src, dst,
51 len, sum, err_ptr);
52 else
53 return __csum_partial_copy_from_user((__force void *)src, dst,
54 len, sum, err_ptr);
55}
56
57#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
58static inline
59__wsum csum_and_copy_from_user(const void __user *src, void *dst,
60 int len, __wsum sum, int *err_ptr)
61{
62 if (access_ok(VERIFY_READ, src, len))
63 return csum_partial_copy_from_user(src, dst, len, sum,
64 err_ptr);
65 if (len)
66 *err_ptr = -EFAULT;
67
68 return sum;
46} 69}
47 70
48/* 71/*
@@ -54,9 +77,16 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
54 __wsum sum, int *err_ptr) 77 __wsum sum, int *err_ptr)
55{ 78{
56 might_fault(); 79 might_fault();
57 if (access_ok(VERIFY_WRITE, dst, len)) 80 if (access_ok(VERIFY_WRITE, dst, len)) {
58 return __csum_partial_copy_user(src, (__force void *)dst, 81 if (segment_eq(get_fs(), get_ds()))
59 len, sum, err_ptr); 82 return __csum_partial_copy_kernel(src,
83 (__force void *)dst,
84 len, sum, err_ptr);
85 else
86 return __csum_partial_copy_to_user(src,
87 (__force void *)dst,
88 len, sum, err_ptr);
89 }
60 if (len) 90 if (len)
61 *err_ptr = -EFAULT; 91 *err_ptr = -EFAULT;
62 92
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 466069bd8465..eefcaa363a87 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
22 unsigned long dummy; 22 unsigned long dummy;
23 23
24 __asm__ __volatile__( 24 __asm__ __volatile__(
25 " .set mips3 \n" 25 " .set arch=r4000 \n"
26 "1: ll %0, %3 # xchg_u32 \n" 26 "1: ll %0, %3 # xchg_u32 \n"
27 " .set mips0 \n" 27 " .set mips0 \n"
28 " move %2, %z4 \n" 28 " move %2, %z4 \n"
29 " .set mips3 \n" 29 " .set arch=r4000 \n"
30 " sc %2, %1 \n" 30 " sc %2, %1 \n"
31 " beqzl %2, 1b \n" 31 " beqzl %2, 1b \n"
32 " .set mips0 \n" 32 " .set mips0 \n"
@@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
38 38
39 do { 39 do {
40 __asm__ __volatile__( 40 __asm__ __volatile__(
41 " .set mips3 \n" 41 " .set arch=r4000 \n"
42 " ll %0, %3 # xchg_u32 \n" 42 " ll %0, %3 # xchg_u32 \n"
43 " .set mips0 \n" 43 " .set mips0 \n"
44 " move %2, %z4 \n" 44 " move %2, %z4 \n"
45 " .set mips3 \n" 45 " .set arch=r4000 \n"
46 " sc %2, %1 \n" 46 " sc %2, %1 \n"
47 " .set mips0 \n" 47 " .set mips0 \n"
48 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 48 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
@@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
74 unsigned long dummy; 74 unsigned long dummy;
75 75
76 __asm__ __volatile__( 76 __asm__ __volatile__(
77 " .set mips3 \n" 77 " .set arch=r4000 \n"
78 "1: lld %0, %3 # xchg_u64 \n" 78 "1: lld %0, %3 # xchg_u64 \n"
79 " move %2, %z4 \n" 79 " move %2, %z4 \n"
80 " scd %2, %1 \n" 80 " scd %2, %1 \n"
@@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
88 88
89 do { 89 do {
90 __asm__ __volatile__( 90 __asm__ __volatile__(
91 " .set mips3 \n" 91 " .set arch=r4000 \n"
92 " lld %0, %3 # xchg_u64 \n" 92 " lld %0, %3 # xchg_u64 \n"
93 " move %2, %z4 \n" 93 " move %2, %z4 \n"
94 " scd %2, %1 \n" 94 " scd %2, %1 \n"
@@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
145 __asm__ __volatile__( \ 145 __asm__ __volatile__( \
146 " .set push \n" \ 146 " .set push \n" \
147 " .set noat \n" \ 147 " .set noat \n" \
148 " .set mips3 \n" \ 148 " .set arch=r4000 \n" \
149 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 149 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
150 " bne %0, %z3, 2f \n" \ 150 " bne %0, %z3, 2f \n" \
151 " .set mips0 \n" \ 151 " .set mips0 \n" \
152 " move $1, %z4 \n" \ 152 " move $1, %z4 \n" \
153 " .set mips3 \n" \ 153 " .set arch=r4000 \n" \
154 " " st " $1, %1 \n" \ 154 " " st " $1, %1 \n" \
155 " beqzl $1, 1b \n" \ 155 " beqzl $1, 1b \n" \
156 "2: \n" \ 156 "2: \n" \
@@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
162 __asm__ __volatile__( \ 162 __asm__ __volatile__( \
163 " .set push \n" \ 163 " .set push \n" \
164 " .set noat \n" \ 164 " .set noat \n" \
165 " .set mips3 \n" \ 165 " .set arch=r4000 \n" \
166 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 166 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
167 " bne %0, %z3, 2f \n" \ 167 " bne %0, %z3, 2f \n" \
168 " .set mips0 \n" \ 168 " .set mips0 \n" \
169 " move $1, %z4 \n" \ 169 " move $1, %z4 \n" \
170 " .set mips3 \n" \ 170 " .set arch=r4000 \n" \
171 " " st " $1, %1 \n" \ 171 " " st " $1, %1 \n" \
172 " beqz $1, 1b \n" \ 172 " beqz $1, 1b \n" \
173 " .set pop \n" \ 173 " .set pop \n" \
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 6e70b03b6aab..f56cc975b92f 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -26,7 +26,9 @@
26#ifndef cpu_has_segments 26#ifndef cpu_has_segments
27#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS) 27#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
28#endif 28#endif
29 29#ifndef cpu_has_eva
30#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
31#endif
30 32
31/* 33/*
32 * For the moment we don't consider R6000 and R8000 so we can assume that 34 * For the moment we don't consider R6000 and R8000 so we can assume that
@@ -299,4 +301,10 @@
299#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ) 301#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
300#endif 302#endif
301 303
304#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
305# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
306#elif !defined(cpu_has_msa)
307# define cpu_has_msa 0
308#endif
309
302#endif /* __ASM_CPU_FEATURES_H */ 310#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 8f7adf0ac1e3..dc2135be2a3a 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -49,6 +49,7 @@ struct cpuinfo_mips {
49 unsigned long ases; 49 unsigned long ases;
50 unsigned int processor_id; 50 unsigned int processor_id;
51 unsigned int fpu_id; 51 unsigned int fpu_id;
52 unsigned int msa_id;
52 unsigned int cputype; 53 unsigned int cputype;
53 int isa_level; 54 int isa_level;
54 int tlbsize; 55 int tlbsize;
@@ -95,4 +96,31 @@ extern void cpu_report(void);
95extern const char *__cpu_name[]; 96extern const char *__cpu_name[];
96#define cpu_name_string() __cpu_name[smp_processor_id()] 97#define cpu_name_string() __cpu_name[smp_processor_id()]
97 98
99struct seq_file;
100struct notifier_block;
101
102extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
103extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
104
105#define proc_cpuinfo_notifier(fn, pri) \
106({ \
107 static struct notifier_block fn##_nb = { \
108 .notifier_call = fn, \
109 .priority = pri \
110 }; \
111 \
112 register_proc_cpuinfo_notifier(&fn##_nb); \
113})
114
115struct proc_cpuinfo_notifier_args {
116 struct seq_file *m;
117 unsigned long n;
118};
119
120#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
121# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
122#else
123# define cpu_vpe_id(cpuinfo) 0
124#endif
125
98#endif /* __ASM_CPU_INFO_H */ 126#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 02f591bd95ca..721906130a57 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -20,6 +20,10 @@ static inline int __pure __get_cpu_type(const int cpu_type)
20 case CPU_LOONGSON2: 20 case CPU_LOONGSON2:
21#endif 21#endif
22 22
23#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3
24 case CPU_LOONGSON3:
25#endif
26
23#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B 27#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
24 case CPU_LOONGSON1: 28 case CPU_LOONGSON1:
25#endif 29#endif
@@ -46,6 +50,8 @@ static inline int __pure __get_cpu_type(const int cpu_type)
46 case CPU_M14KEC: 50 case CPU_M14KEC:
47 case CPU_INTERAPTIV: 51 case CPU_INTERAPTIV:
48 case CPU_PROAPTIV: 52 case CPU_PROAPTIV:
53 case CPU_P5600:
54 case CPU_M5150:
49#endif 55#endif
50 56
51#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 57#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 76411df3d971..530eb8b3a68e 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -82,10 +82,10 @@
82#define PRID_IMP_RM7000 0x2700 82#define PRID_IMP_RM7000 0x2700
83#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */ 83#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */
84#define PRID_IMP_RM9000 0x3400 84#define PRID_IMP_RM9000 0x3400
85#define PRID_IMP_LOONGSON1 0x4200 85#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson-1 */
86#define PRID_IMP_R5432 0x5400 86#define PRID_IMP_R5432 0x5400
87#define PRID_IMP_R5500 0x5500 87#define PRID_IMP_R5500 0x5500
88#define PRID_IMP_LOONGSON2 0x6300 88#define PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */
89 89
90#define PRID_IMP_UNKNOWN 0xff00 90#define PRID_IMP_UNKNOWN 0xff00
91 91
@@ -115,6 +115,8 @@
115#define PRID_IMP_INTERAPTIV_MP 0xa100 115#define PRID_IMP_INTERAPTIV_MP 0xa100
116#define PRID_IMP_PROAPTIV_UP 0xa200 116#define PRID_IMP_PROAPTIV_UP 0xa200
117#define PRID_IMP_PROAPTIV_MP 0xa300 117#define PRID_IMP_PROAPTIV_MP 0xa300
118#define PRID_IMP_M5150 0xa700
119#define PRID_IMP_P5600 0xa800
118 120
119/* 121/*
120 * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE 122 * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -229,6 +231,7 @@
229#define PRID_REV_LOONGSON1B 0x0020 231#define PRID_REV_LOONGSON1B 0x0020
230#define PRID_REV_LOONGSON2E 0x0002 232#define PRID_REV_LOONGSON2E 0x0002
231#define PRID_REV_LOONGSON2F 0x0003 233#define PRID_REV_LOONGSON2F 0x0003
234#define PRID_REV_LOONGSON3A 0x0005
232 235
233/* 236/*
234 * Older processors used to encode processor version and revision in two 237 * Older processors used to encode processor version and revision in two
@@ -296,14 +299,14 @@ enum cpu_type_enum {
296 CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K, 299 CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
297 CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350, 300 CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
298 CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC, 301 CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
299 CPU_M14KEC, CPU_INTERAPTIV, CPU_PROAPTIV, 302 CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
300 303
301 /* 304 /*
302 * MIPS64 class processors 305 * MIPS64 class processors
303 */ 306 */
304 CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, 307 CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
305 CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, 308 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
306 CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, 309 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
307 310
308 CPU_LAST 311 CPU_LAST
309}; 312};
@@ -358,6 +361,7 @@ enum cpu_type_enum {
358#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */ 361#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */
359#define MIPS_CPU_TLBINV 0x02000000 /* CPU supports TLBINV/F */ 362#define MIPS_CPU_TLBINV 0x02000000 /* CPU supports TLBINV/F */
360#define MIPS_CPU_SEGMENTS 0x04000000 /* CPU supports Segmentation Control registers */ 363#define MIPS_CPU_SEGMENTS 0x04000000 /* CPU supports Segmentation Control registers */
364#define MIPS_CPU_EVA 0x80000000 /* CPU supports Enhanced Virtual Addressing */
361 365
362/* 366/*
363 * CPU ASE encodings 367 * CPU ASE encodings
@@ -370,5 +374,6 @@ enum cpu_type_enum {
370#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */ 374#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
371#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */ 375#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
372#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */ 376#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
377#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
373 378
374#endif /* _ASM_CPU_H */ 379#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 84238c574d5e..06412aa9e3fb 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -49,9 +49,14 @@ static inline int dma_mapping_error(struct device *dev, u64 mask)
49static inline int 49static inline int
50dma_set_mask(struct device *dev, u64 mask) 50dma_set_mask(struct device *dev, u64 mask)
51{ 51{
52 struct dma_map_ops *ops = get_dma_ops(dev);
53
52 if(!dev->dma_mask || !dma_supported(dev, mask)) 54 if(!dev->dma_mask || !dma_supported(dev, mask))
53 return -EIO; 55 return -EIO;
54 56
57 if (ops->set_dma_mask)
58 return ops->set_dma_mask(dev, mask);
59
55 *dev->dma_mask = mask; 60 *dev->dma_mask = mask;
56 61
57 return 0; 62 return 0;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 58e50cbdb1a6..4d86b72750c7 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -180,7 +180,7 @@ static inline void restore_fp(struct task_struct *tsk)
180 _restore_fp(tsk); 180 _restore_fp(tsk);
181} 181}
182 182
183static inline fpureg_t *get_fpu_regs(struct task_struct *tsk) 183static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
184{ 184{
185 if (tsk == current) { 185 if (tsk == current) {
186 preempt_disable(); 186 preempt_disable();
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 6ea15815d3ee..194cda0396a3 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/futex.h> 13#include <linux/futex.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/asm-eva.h>
15#include <asm/barrier.h> 16#include <asm/barrier.h>
16#include <asm/errno.h> 17#include <asm/errno.h>
17#include <asm/war.h> 18#include <asm/war.h>
@@ -22,11 +23,11 @@
22 __asm__ __volatile__( \ 23 __asm__ __volatile__( \
23 " .set push \n" \ 24 " .set push \n" \
24 " .set noat \n" \ 25 " .set noat \n" \
25 " .set mips3 \n" \ 26 " .set arch=r4000 \n" \
26 "1: ll %1, %4 # __futex_atomic_op \n" \ 27 "1: ll %1, %4 # __futex_atomic_op \n" \
27 " .set mips0 \n" \ 28 " .set mips0 \n" \
28 " " insn " \n" \ 29 " " insn " \n" \
29 " .set mips3 \n" \ 30 " .set arch=r4000 \n" \
30 "2: sc $1, %2 \n" \ 31 "2: sc $1, %2 \n" \
31 " beqzl $1, 1b \n" \ 32 " beqzl $1, 1b \n" \
32 __WEAK_LLSC_MB \ 33 __WEAK_LLSC_MB \
@@ -48,12 +49,12 @@
48 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
49 " .set push \n" \ 50 " .set push \n" \
50 " .set noat \n" \ 51 " .set noat \n" \
51 " .set mips3 \n" \ 52 " .set arch=r4000 \n" \
52 "1: ll %1, %4 # __futex_atomic_op \n" \ 53 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
53 " .set mips0 \n" \ 54 " .set mips0 \n" \
54 " " insn " \n" \ 55 " " insn " \n" \
55 " .set mips3 \n" \ 56 " .set arch=r4000 \n" \
56 "2: sc $1, %2 \n" \ 57 "2: "user_sc("$1", "%2")" \n" \
57 " beqz $1, 1b \n" \ 58 " beqz $1, 1b \n" \
58 __WEAK_LLSC_MB \ 59 __WEAK_LLSC_MB \
59 "3: \n" \ 60 "3: \n" \
@@ -146,12 +147,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
146 "# futex_atomic_cmpxchg_inatomic \n" 147 "# futex_atomic_cmpxchg_inatomic \n"
147 " .set push \n" 148 " .set push \n"
148 " .set noat \n" 149 " .set noat \n"
149 " .set mips3 \n" 150 " .set arch=r4000 \n"
150 "1: ll %1, %3 \n" 151 "1: ll %1, %3 \n"
151 " bne %1, %z4, 3f \n" 152 " bne %1, %z4, 3f \n"
152 " .set mips0 \n" 153 " .set mips0 \n"
153 " move $1, %z5 \n" 154 " move $1, %z5 \n"
154 " .set mips3 \n" 155 " .set arch=r4000 \n"
155 "2: sc $1, %2 \n" 156 "2: sc $1, %2 \n"
156 " beqzl $1, 1b \n" 157 " beqzl $1, 1b \n"
157 __WEAK_LLSC_MB 158 __WEAK_LLSC_MB
@@ -173,13 +174,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
173 "# futex_atomic_cmpxchg_inatomic \n" 174 "# futex_atomic_cmpxchg_inatomic \n"
174 " .set push \n" 175 " .set push \n"
175 " .set noat \n" 176 " .set noat \n"
176 " .set mips3 \n" 177 " .set arch=r4000 \n"
177 "1: ll %1, %3 \n" 178 "1: "user_ll("%1", "%3")" \n"
178 " bne %1, %z4, 3f \n" 179 " bne %1, %z4, 3f \n"
179 " .set mips0 \n" 180 " .set mips0 \n"
180 " move $1, %z5 \n" 181 " move $1, %z5 \n"
181 " .set mips3 \n" 182 " .set arch=r4000 \n"
182 "2: sc $1, %2 \n" 183 "2: "user_sc("$1", "%2")" \n"
183 " beqz $1, 1b \n" 184 " beqz $1, 1b \n"
184 __WEAK_LLSC_MB 185 __WEAK_LLSC_MB
185 "3: \n" 186 "3: \n"
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
index d6c50a7e9ede..f3e6978aad70 100644
--- a/arch/mips/include/asm/fw/fw.h
+++ b/arch/mips/include/asm/fw/fw.h
@@ -38,7 +38,7 @@ extern int *_fw_envp;
38 38
39extern void fw_init_cmdline(void); 39extern void fw_init_cmdline(void);
40extern char *fw_getcmdline(void); 40extern char *fw_getcmdline(void);
41extern fw_memblock_t *fw_getmdesc(void); 41extern fw_memblock_t *fw_getmdesc(int);
42extern void fw_meminit(void); 42extern void fw_meminit(void);
43extern char *fw_getenv(char *name); 43extern char *fw_getenv(char *name);
44extern unsigned long fw_getenvl(char *name); 44extern unsigned long fw_getenvl(char *name);
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
deleted file mode 100644
index a7359f77a48e..000000000000
--- a/arch/mips/include/asm/gcmpregs.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 07 MIPS Technologies, Inc.
7 *
8 * Multiprocessor Subsystem Register Definitions
9 *
10 */
11#ifndef _ASM_GCMPREGS_H
12#define _ASM_GCMPREGS_H
13
14
15/* Offsets to major blocks within GCMP from GCMP base */
16#define GCMP_GCB_OFS 0x0000 /* Global Control Block */
17#define GCMP_CLCB_OFS 0x2000 /* Core Local Control Block */
18#define GCMP_COCB_OFS 0x4000 /* Core Other Control Block */
19#define GCMP_GDB_OFS 0x8000 /* Global Debug Block */
20
21/* Offsets to individual GCMP registers from GCMP base */
22#define GCMPOFS(block, tag, reg) \
23 (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS)
24#define GCMPOFSn(block, tag, reg, n) \
25 (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS(n))
26
27#define GCMPGCBOFS(reg) GCMPOFS(GCB, GCB, reg)
28#define GCMPGCBOFSn(reg, n) GCMPOFSn(GCB, GCB, reg, n)
29#define GCMPCLCBOFS(reg) GCMPOFS(CLCB, CCB, reg)
30#define GCMPCOCBOFS(reg) GCMPOFS(COCB, CCB, reg)
31#define GCMPGDBOFS(reg) GCMPOFS(GDB, GDB, reg)
32
33/* GCMP register access */
34#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg))
35#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
36#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg))
37#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg))
38#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg))
39
40/* Mask generation */
41#define GCMPMSK(block, reg, bits) (MSK(bits)<<GCMP_##block##_##reg##_SHF)
42#define GCMPGCBMSK(reg, bits) GCMPMSK(GCB, reg, bits)
43#define GCMPCCBMSK(reg, bits) GCMPMSK(CCB, reg, bits)
44#define GCMPGDBMSK(reg, bits) GCMPMSK(GDB, reg, bits)
45
46/* GCB registers */
47#define GCMP_GCB_GC_OFS 0x0000 /* Global Config Register */
48#define GCMP_GCB_GC_NUMIOCU_SHF 8
49#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
50#define GCMP_GCB_GC_NUMCORES_SHF 0
51#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
52#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */
53#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
54#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
55#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
56#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
57#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
58#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
59#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
60#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
61#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */
62#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */
63#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
64#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
65#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */
66#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */
67#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */
68#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
69#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
70#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
71#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
72#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */
73#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */
74#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
75#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
76#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
77#define GCMP_GCB_GICBA_BASE_SHF 17
78#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
79#define GCMP_GCB_GICBA_EN_SHF 0
80#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
81
82/* GCB Regions */
83#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */
84#define GCMP_GCB_CMxBASE_BASE_SHF 16
85#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
86#define GCMP_GCB_CMxMASK_OFS(n) (0x0098+16*(n)) /* Global Region[0-3] Address Mask */
87#define GCMP_GCB_CMxMASK_MASK_SHF 16
88#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
89#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
90#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
91#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
92#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
93#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
94#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
95
96
97/* Core local/Core other control block registers */
98#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
99#define GCMP_CCB_RESETR_INRESET_SHF 0
100#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
101#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */
102#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
103#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
104#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
105#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
106#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
107#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
108#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
109#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
110#define GCMP_CCB_CFG_NUMVPE_SHF 0
111#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
112#define GCMP_CCB_OTHER_OFS 0x0018 /* Other Address */
113#define GCMP_CCB_OTHER_CORENUM_SHF 16
114#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
115#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */
116#define GCMP_CCB_RESETBASE_BEV_SHF 12
117#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
118#define GCMP_CCB_ID_OFS 0x0028 /* Identification */
119#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
120#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
121
122extern int __init gcmp_probe(unsigned long, unsigned long);
123extern int __init gcmp_niocu(void);
124extern void __init gcmp_setregion(int, unsigned long, unsigned long, int);
125#endif /* _ASM_GCMPREGS_H */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index b2e3e93dd7d8..082716690589 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -11,6 +11,9 @@
11#ifndef _ASM_GICREGS_H 11#ifndef _ASM_GICREGS_H
12#define _ASM_GICREGS_H 12#define _ASM_GICREGS_H
13 13
14#include <linux/bitmap.h>
15#include <linux/threads.h>
16
14#undef GICISBYTELITTLEENDIAN 17#undef GICISBYTELITTLEENDIAN
15 18
16/* Constants */ 19/* Constants */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 3321dd5a8872..933b50e125a0 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -331,7 +331,7 @@ static inline void pfx##write##bwlq(type val, \
331 if (irq) \ 331 if (irq) \
332 local_irq_save(__flags); \ 332 local_irq_save(__flags); \
333 __asm__ __volatile__( \ 333 __asm__ __volatile__( \
334 ".set mips3" "\t\t# __writeq""\n\t" \ 334 ".set arch=r4000" "\t\t# __writeq""\n\t" \
335 "dsll32 %L0, %L0, 0" "\n\t" \ 335 "dsll32 %L0, %L0, 0" "\n\t" \
336 "dsrl32 %L0, %L0, 0" "\n\t" \ 336 "dsrl32 %L0, %L0, 0" "\n\t" \
337 "dsll32 %M0, %M0, 0" "\n\t" \ 337 "dsll32 %M0, %M0, 0" "\n\t" \
@@ -361,7 +361,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
361 if (irq) \ 361 if (irq) \
362 local_irq_save(__flags); \ 362 local_irq_save(__flags); \
363 __asm__ __volatile__( \ 363 __asm__ __volatile__( \
364 ".set mips3" "\t\t# __readq" "\n\t" \ 364 ".set arch=r4000" "\t\t# __readq" "\n\t" \
365 "ld %L0, %1" "\n\t" \ 365 "ld %L0, %1" "\n\t" \
366 "dsra32 %M0, %L0, 0" "\n\t" \ 366 "dsra32 %M0, %L0, 0" "\n\t" \
367 "sll %L0, %L0, 0" "\n\t" \ 367 "sll %L0, %L0, 0" "\n\t" \
@@ -584,7 +584,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
584 * 584 *
585 * This API used to be exported; it now is for arch code internal use only. 585 * This API used to be exported; it now is for arch code internal use only.
586 */ 586 */
587#ifdef CONFIG_DMA_NONCOHERENT 587#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
588 588
589extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 589extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
590extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); 590extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -603,7 +603,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
603#define dma_cache_inv(start,size) \ 603#define dma_cache_inv(start,size) \
604 do { (void) (start); (void) (size); } while (0) 604 do { (void) (start); (void) (size); } while (0)
605 605
606#endif /* CONFIG_DMA_NONCOHERENT */ 606#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
607 607
608/* 608/*
609 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 609 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index d44622cd74be..46dfc3c1fd49 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -33,7 +33,7 @@ static __inline__ long local_add_return(long i, local_t * l)
33 unsigned long temp; 33 unsigned long temp;
34 34
35 __asm__ __volatile__( 35 __asm__ __volatile__(
36 " .set mips3 \n" 36 " .set arch=r4000 \n"
37 "1:" __LL "%1, %2 # local_add_return \n" 37 "1:" __LL "%1, %2 # local_add_return \n"
38 " addu %0, %1, %3 \n" 38 " addu %0, %1, %3 \n"
39 __SC "%0, %2 \n" 39 __SC "%0, %2 \n"
@@ -47,7 +47,7 @@ static __inline__ long local_add_return(long i, local_t * l)
47 unsigned long temp; 47 unsigned long temp;
48 48
49 __asm__ __volatile__( 49 __asm__ __volatile__(
50 " .set mips3 \n" 50 " .set arch=r4000 \n"
51 "1:" __LL "%1, %2 # local_add_return \n" 51 "1:" __LL "%1, %2 # local_add_return \n"
52 " addu %0, %1, %3 \n" 52 " addu %0, %1, %3 \n"
53 __SC "%0, %2 \n" 53 __SC "%0, %2 \n"
@@ -78,7 +78,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
78 unsigned long temp; 78 unsigned long temp;
79 79
80 __asm__ __volatile__( 80 __asm__ __volatile__(
81 " .set mips3 \n" 81 " .set arch=r4000 \n"
82 "1:" __LL "%1, %2 # local_sub_return \n" 82 "1:" __LL "%1, %2 # local_sub_return \n"
83 " subu %0, %1, %3 \n" 83 " subu %0, %1, %3 \n"
84 __SC "%0, %2 \n" 84 __SC "%0, %2 \n"
@@ -92,7 +92,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
92 unsigned long temp; 92 unsigned long temp;
93 93
94 __asm__ __volatile__( 94 __asm__ __volatile__(
95 " .set mips3 \n" 95 " .set arch=r4000 \n"
96 "1:" __LL "%1, %2 # local_sub_return \n" 96 "1:" __LL "%1, %2 # local_sub_return \n"
97 " subu %0, %1, %3 \n" 97 " subu %0, %1, %3 \n"
98 __SC "%0, %2 \n" 98 __SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 54f9e84db8ac..b4c3ecb17d48 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -1161,18 +1161,6 @@ enum soc_au1200_ints {
1161#define MAC_RX_BUFF3_STATUS 0x30 1161#define MAC_RX_BUFF3_STATUS 0x30
1162#define MAC_RX_BUFF3_ADDR 0x34 1162#define MAC_RX_BUFF3_ADDR 0x34
1163 1163
1164#define UART_RX 0 /* Receive buffer */
1165#define UART_TX 4 /* Transmit buffer */
1166#define UART_IER 8 /* Interrupt Enable Register */
1167#define UART_IIR 0xC /* Interrupt ID Register */
1168#define UART_FCR 0x10 /* FIFO Control Register */
1169#define UART_LCR 0x14 /* Line Control Register */
1170#define UART_MCR 0x18 /* Modem Control Register */
1171#define UART_LSR 0x1C /* Line Status Register */
1172#define UART_MSR 0x20 /* Modem Status Register */
1173#define UART_CLK 0x28 /* Baud Rate Clock Divider */
1174#define UART_MOD_CNTRL 0x100 /* Module Control */
1175
1176/* SSIO */ 1164/* SSIO */
1177#define SSI0_STATUS 0xB1600000 1165#define SSI0_STATUS 0xB1600000
1178# define SSI_STATUS_BF (1 << 4) 1166# define SSI_STATUS_BF (1 << 4)
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index 40005fb39618..bba7399a49a3 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -27,7 +27,11 @@ enum bcm47xx_board {
27 BCM47XX_BOARD_ASUS_WL700GE, 27 BCM47XX_BOARD_ASUS_WL700GE,
28 BCM47XX_BOARD_ASUS_WLHDD, 28 BCM47XX_BOARD_ASUS_WLHDD,
29 29
30 BCM47XX_BOARD_BELKIN_F7D3301,
31 BCM47XX_BOARD_BELKIN_F7D3302,
30 BCM47XX_BOARD_BELKIN_F7D4301, 32 BCM47XX_BOARD_BELKIN_F7D4301,
33 BCM47XX_BOARD_BELKIN_F7D4302,
34 BCM47XX_BOARD_BELKIN_F7D4401,
31 35
32 BCM47XX_BOARD_BUFFALO_WBR2_G54, 36 BCM47XX_BOARD_BUFFALO_WBR2_G54,
33 BCM47XX_BOARD_BUFFALO_WHR2_A54G54, 37 BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
@@ -66,7 +70,7 @@ enum bcm47xx_board {
66 BCM47XX_BOARD_LINKSYS_WRT310NV1, 70 BCM47XX_BOARD_LINKSYS_WRT310NV1,
67 BCM47XX_BOARD_LINKSYS_WRT310NV2, 71 BCM47XX_BOARD_LINKSYS_WRT310NV2,
68 BCM47XX_BOARD_LINKSYS_WRT54G3GV2, 72 BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
69 BCM47XX_BOARD_LINKSYS_WRT54GSV1, 73 BCM47XX_BOARD_LINKSYS_WRT54G,
70 BCM47XX_BOARD_LINKSYS_WRT610NV1, 74 BCM47XX_BOARD_LINKSYS_WRT610NV1,
71 BCM47XX_BOARD_LINKSYS_WRT610NV2, 75 BCM47XX_BOARD_LINKSYS_WRT610NV2,
72 BCM47XX_BOARD_LINKSYS_WRTSL54GS, 76 BCM47XX_BOARD_LINKSYS_WRTSL54GS,
@@ -94,6 +98,8 @@ enum bcm47xx_board {
94 98
95 BCM47XX_BOARD_PHICOMM_M1, 99 BCM47XX_BOARD_PHICOMM_M1,
96 100
101 BCM47XX_BOARD_SIEMENS_SE505V2,
102
97 BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE, 103 BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
98 104
99 BCM47XX_BOARD_ZTE_H218N, 105 BCM47XX_BOARD_ZTE_H218N,
diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h
deleted file mode 100644
index d3cce7326dd4..000000000000
--- a/arch/mips/include/asm/mach-db1x00/db1200.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * AMD Alchemy DBAu1200 Reference Board
3 * Board register defines.
4 *
5 * ########################################################################
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 * ########################################################################
21 *
22 *
23 */
24#ifndef __ASM_DB1200_H
25#define __ASM_DB1200_H
26
27#include <linux/types.h>
28#include <asm/mach-au1x00/au1000.h>
29#include <asm/mach-au1x00/au1xxx_psc.h>
30
31/* Bit positions for the different interrupt sources */
32#define BCSR_INT_IDE 0x0001
33#define BCSR_INT_ETH 0x0002
34#define BCSR_INT_PC0 0x0004
35#define BCSR_INT_PC0STSCHG 0x0008
36#define BCSR_INT_PC1 0x0010
37#define BCSR_INT_PC1STSCHG 0x0020
38#define BCSR_INT_DC 0x0040
39#define BCSR_INT_FLASHBUSY 0x0080
40#define BCSR_INT_PC0INSERT 0x0100
41#define BCSR_INT_PC0EJECT 0x0200
42#define BCSR_INT_PC1INSERT 0x0400
43#define BCSR_INT_PC1EJECT 0x0800
44#define BCSR_INT_SD0INSERT 0x1000
45#define BCSR_INT_SD0EJECT 0x2000
46#define BCSR_INT_SD1INSERT 0x4000
47#define BCSR_INT_SD1EJECT 0x8000
48
49#define IDE_REG_SHIFT 5
50
51#define DB1200_IDE_PHYS_ADDR 0x18800000
52#define DB1200_IDE_PHYS_LEN (16 << IDE_REG_SHIFT)
53#define DB1200_ETH_PHYS_ADDR 0x19000300
54#define DB1200_NAND_PHYS_ADDR 0x20000000
55
56#define PB1200_IDE_PHYS_ADDR 0x0C800000
57#define PB1200_ETH_PHYS_ADDR 0x0D000300
58#define PB1200_NAND_PHYS_ADDR 0x1C000000
59
60/*
61 * External Interrupts for DBAu1200 as of 8/6/2004.
62 * Bit positions in the CPLD registers can be calculated by taking
63 * the interrupt define and subtracting the DB1200_INT_BEGIN value.
64 *
65 * Example: IDE bis pos is = 64 - 64
66 * ETH bit pos is = 65 - 64
67 */
68enum external_db1200_ints {
69 DB1200_INT_BEGIN = AU1000_MAX_INTR + 1,
70
71 DB1200_IDE_INT = DB1200_INT_BEGIN,
72 DB1200_ETH_INT,
73 DB1200_PC0_INT,
74 DB1200_PC0_STSCHG_INT,
75 DB1200_PC1_INT,
76 DB1200_PC1_STSCHG_INT,
77 DB1200_DC_INT,
78 DB1200_FLASHBUSY_INT,
79 DB1200_PC0_INSERT_INT,
80 DB1200_PC0_EJECT_INT,
81 DB1200_PC1_INSERT_INT,
82 DB1200_PC1_EJECT_INT,
83 DB1200_SD0_INSERT_INT,
84 DB1200_SD0_EJECT_INT,
85 PB1200_SD1_INSERT_INT,
86 PB1200_SD1_EJECT_INT,
87
88 DB1200_INT_END = DB1200_INT_BEGIN + 15,
89};
90
91#endif /* __ASM_DB1200_H */
diff --git a/arch/mips/include/asm/mach-db1x00/db1300.h b/arch/mips/include/asm/mach-db1x00/db1300.h
deleted file mode 100644
index 3d1ede46f059..000000000000
--- a/arch/mips/include/asm/mach-db1x00/db1300.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * NetLogic DB1300 board constants
3 */
4
5#ifndef _DB1300_H_
6#define _DB1300_H_
7
8/* FPGA (external mux) interrupt sources */
9#define DB1300_FIRST_INT (ALCHEMY_GPIC_INT_LAST + 1)
10#define DB1300_IDE_INT (DB1300_FIRST_INT + 0)
11#define DB1300_ETH_INT (DB1300_FIRST_INT + 1)
12#define DB1300_CF_INT (DB1300_FIRST_INT + 2)
13#define DB1300_VIDEO_INT (DB1300_FIRST_INT + 4)
14#define DB1300_HDMI_INT (DB1300_FIRST_INT + 5)
15#define DB1300_DC_INT (DB1300_FIRST_INT + 6)
16#define DB1300_FLASH_INT (DB1300_FIRST_INT + 7)
17#define DB1300_CF_INSERT_INT (DB1300_FIRST_INT + 8)
18#define DB1300_CF_EJECT_INT (DB1300_FIRST_INT + 9)
19#define DB1300_AC97_INT (DB1300_FIRST_INT + 10)
20#define DB1300_AC97_PEN_INT (DB1300_FIRST_INT + 11)
21#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
22#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
23#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
24#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
25#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
26
27/* SMSC9210 CS */
28#define DB1300_ETH_PHYS_ADDR 0x19000000
29#define DB1300_ETH_PHYS_END 0x197fffff
30
31/* ATA CS */
32#define DB1300_IDE_PHYS_ADDR 0x18800000
33#define DB1300_IDE_REG_SHIFT 5
34#define DB1300_IDE_PHYS_LEN (16 << DB1300_IDE_REG_SHIFT)
35
36/* NAND CS */
37#define DB1300_NAND_PHYS_ADDR 0x20000000
38#define DB1300_NAND_PHYS_END 0x20000fff
39
40#endif /* _DB1300_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h
new file mode 100644
index 000000000000..829a7ec185fb
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/boot_param.h
@@ -0,0 +1,163 @@
1#ifndef __ASM_MACH_LOONGSON_BOOT_PARAM_H_
2#define __ASM_MACH_LOONGSON_BOOT_PARAM_H_
3
4#define SYSTEM_RAM_LOW 1
5#define SYSTEM_RAM_HIGH 2
6#define MEM_RESERVED 3
7#define PCI_IO 4
8#define PCI_MEM 5
9#define LOONGSON_CFG_REG 6
10#define VIDEO_ROM 7
11#define ADAPTER_ROM 8
12#define ACPI_TABLE 9
13#define MAX_MEMORY_TYPE 10
14
15#define LOONGSON3_BOOT_MEM_MAP_MAX 128
16struct efi_memory_map_loongson {
17 u16 vers; /* version of efi_memory_map */
18 u32 nr_map; /* number of memory_maps */
19 u32 mem_freq; /* memory frequence */
20 struct mem_map {
21 u32 node_id; /* node_id which memory attached to */
22 u32 mem_type; /* system memory, pci memory, pci io, etc. */
23 u64 mem_start; /* memory map start address */
24 u32 mem_size; /* each memory_map size, not the total size */
25 } map[LOONGSON3_BOOT_MEM_MAP_MAX];
26} __packed;
27
28enum loongson_cpu_type {
29 Loongson_2E = 0,
30 Loongson_2F = 1,
31 Loongson_3A = 2,
32 Loongson_3B = 3,
33 Loongson_1A = 4,
34 Loongson_1B = 5
35};
36
37/*
38 * Capability and feature descriptor structure for MIPS CPU
39 */
40struct efi_cpuinfo_loongson {
41 u16 vers; /* version of efi_cpuinfo_loongson */
42 u32 processor_id; /* PRID, e.g. 6305, 6306 */
43 u32 cputype; /* Loongson_3A/3B, etc. */
44 u32 total_node; /* num of total numa nodes */
45 u32 cpu_startup_core_id; /* Core id */
46 u32 cpu_clock_freq; /* cpu_clock */
47 u32 nr_cpus;
48} __packed;
49
50struct system_loongson {
51 u16 vers; /* version of system_loongson */
52 u32 ccnuma_smp; /* 0: no numa; 1: has numa */
53 u32 sing_double_channel; /* 1:single; 2:double */
54} __packed;
55
56struct irq_source_routing_table {
57 u16 vers;
58 u16 size;
59 u16 rtr_bus;
60 u16 rtr_devfn;
61 u32 vendor;
62 u32 device;
63 u32 PIC_type; /* conform use HT or PCI to route to CPU-PIC */
64 u64 ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */
65 u64 ht_enable; /* irqs used in this PIC */
66 u32 node_id; /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */
67 u64 pci_mem_start_addr;
68 u64 pci_mem_end_addr;
69 u64 pci_io_start_addr;
70 u64 pci_io_end_addr;
71 u64 pci_config_addr;
72 u32 dma_mask_bits;
73} __packed;
74
75struct interface_info {
76 u16 vers; /* version of the specificition */
77 u16 size;
78 u8 flag;
79 char description[64];
80} __packed;
81
82#define MAX_RESOURCE_NUMBER 128
83struct resource_loongson {
84 u64 start; /* resource start address */
85 u64 end; /* resource end address */
86 char name[64];
87 u32 flags;
88};
89
90struct archdev_data {}; /* arch specific additions */
91
92struct board_devices {
93 char name[64]; /* hold the device name */
94 u32 num_resources; /* number of device_resource */
95 /* for each device's resource */
96 struct resource_loongson resource[MAX_RESOURCE_NUMBER];
97 /* arch specific additions */
98 struct archdev_data archdata;
99};
100
101struct loongson_special_attribute {
102 u16 vers; /* version of this special */
103 char special_name[64]; /* special_atribute_name */
104 u32 loongson_special_type; /* type of special device */
105 /* for each device's resource */
106 struct resource_loongson resource[MAX_RESOURCE_NUMBER];
107};
108
109struct loongson_params {
110 u64 memory_offset; /* efi_memory_map_loongson struct offset */
111 u64 cpu_offset; /* efi_cpuinfo_loongson struct offset */
112 u64 system_offset; /* system_loongson struct offset */
113 u64 irq_offset; /* irq_source_routing_table struct offset */
114 u64 interface_offset; /* interface_info struct offset */
115 u64 special_offset; /* loongson_special_attribute struct offset */
116 u64 boarddev_table_offset; /* board_devices offset */
117};
118
119struct smbios_tables {
120 u16 vers; /* version of smbios */
121 u64 vga_bios; /* vga_bios address */
122 struct loongson_params lp;
123};
124
125struct efi_reset_system_t {
126 u64 ResetCold;
127 u64 ResetWarm;
128 u64 ResetType;
129 u64 Shutdown;
130 u64 DoSuspend; /* NULL if not support */
131};
132
133struct efi_loongson {
134 u64 mps; /* MPS table */
135 u64 acpi; /* ACPI table (IA64 ext 0.71) */
136 u64 acpi20; /* ACPI table (ACPI 2.0) */
137 struct smbios_tables smbios; /* SM BIOS table */
138 u64 sal_systab; /* SAL system table */
139 u64 boot_info; /* boot info table */
140};
141
142struct boot_params {
143 struct efi_loongson efi;
144 struct efi_reset_system_t reset_system;
145};
146
147struct loongson_system_configuration {
148 u32 nr_cpus;
149 enum loongson_cpu_type cputype;
150 u64 ht_control_base;
151 u64 pci_mem_start_addr;
152 u64 pci_mem_end_addr;
153 u64 pci_io_base;
154 u64 restart_addr;
155 u64 poweroff_addr;
156 u64 suspend_addr;
157 u64 vgabios_addr;
158 u32 dma_mask_bits;
159};
160
161extern struct efi_memory_map_loongson *loongson_memmap;
162extern struct loongson_system_configuration loongson_sysconf;
163#endif
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index aeb2c05d6145..6a902751cc7f 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -11,24 +11,40 @@
11#ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H 11#ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H
12#define __ASM_MACH_LOONGSON_DMA_COHERENCE_H 12#define __ASM_MACH_LOONGSON_DMA_COHERENCE_H
13 13
14#ifdef CONFIG_SWIOTLB
15#include <linux/swiotlb.h>
16#endif
17
14struct device; 18struct device;
15 19
20extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
21extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
16static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, 22static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
17 size_t size) 23 size_t size)
18{ 24{
25#ifdef CONFIG_CPU_LOONGSON3
26 return virt_to_phys(addr);
27#else
19 return virt_to_phys(addr) | 0x80000000; 28 return virt_to_phys(addr) | 0x80000000;
29#endif
20} 30}
21 31
22static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, 32static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
23 struct page *page) 33 struct page *page)
24{ 34{
35#ifdef CONFIG_CPU_LOONGSON3
36 return page_to_phys(page);
37#else
25 return page_to_phys(page) | 0x80000000; 38 return page_to_phys(page) | 0x80000000;
39#endif
26} 40}
27 41
28static inline unsigned long plat_dma_addr_to_phys(struct device *dev, 42static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
29 dma_addr_t dma_addr) 43 dma_addr_t dma_addr)
30{ 44{
31#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) 45#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
46 return dma_addr;
47#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
32 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff); 48 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
33#else 49#else
34 return dma_addr & 0x7fffffff; 50 return dma_addr & 0x7fffffff;
@@ -55,7 +71,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
55 71
56static inline int plat_device_is_coherent(struct device *dev) 72static inline int plat_device_is_coherent(struct device *dev)
57{ 73{
74#ifdef CONFIG_DMA_NONCOHERENT
58 return 0; 75 return 0;
76#else
77 return 1;
78#endif /* CONFIG_DMA_NONCOHERENT */
59} 79}
60 80
61#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */ 81#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h
new file mode 100644
index 000000000000..34560bda6626
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/irq.h
@@ -0,0 +1,44 @@
1#ifndef __ASM_MACH_LOONGSON_IRQ_H_
2#define __ASM_MACH_LOONGSON_IRQ_H_
3
4#include <boot_param.h>
5
6#ifdef CONFIG_CPU_LOONGSON3
7
8/* cpu core interrupt numbers */
9#define MIPS_CPU_IRQ_BASE 56
10
11#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
12#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
13#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
14
15#define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
16#define LOONGSON_HT1_INT_VECTOR_BASE (LOONGSON_HT1_CFG_BASE + 0x80)
17#define LOONGSON_HT1_INT_EN_BASE (LOONGSON_HT1_CFG_BASE + 0xa0)
18#define LOONGSON_HT1_INT_VECTOR(n) \
19 LOONGSON3_REG32(LOONGSON_HT1_INT_VECTOR_BASE, 4 * (n))
20#define LOONGSON_HT1_INTN_EN(n) \
21 LOONGSON3_REG32(LOONGSON_HT1_INT_EN_BASE, 4 * (n))
22
23#define LOONGSON_INT_ROUTER_OFFSET 0x1400
24#define LOONGSON_INT_ROUTER_INTEN \
25 LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x24)
26#define LOONGSON_INT_ROUTER_INTENSET \
27 LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x28)
28#define LOONGSON_INT_ROUTER_INTENCLR \
29 LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x2c)
30#define LOONGSON_INT_ROUTER_ENTRY(n) \
31 LOONGSON3_REG8(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + n)
32#define LOONGSON_INT_ROUTER_LPC LOONGSON_INT_ROUTER_ENTRY(0x0a)
33#define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18)
34
35#define LOONGSON_INT_CORE0_INT0 0x11 /* route to int 0 of core 0 */
36#define LOONGSON_INT_CORE0_INT1 0x21 /* route to int 1 of core 0 */
37
38#endif
39
40extern void fixup_irqs(void);
41extern void loongson3_ipi_interrupt(struct pt_regs *regs);
42
43#include_next <irq.h>
44#endif /* __ASM_MACH_LOONGSON_IRQ_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index b286534fef08..f3fd1eb8e3dd 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/kconfig.h> 17#include <linux/kconfig.h>
18#include <boot_param.h>
18 19
19/* loongson internal northbridge initialization */ 20/* loongson internal northbridge initialization */
20extern void bonito_irq_init(void); 21extern void bonito_irq_init(void);
@@ -24,8 +25,9 @@ extern void mach_prepare_reboot(void);
24extern void mach_prepare_shutdown(void); 25extern void mach_prepare_shutdown(void);
25 26
26/* environment arguments from bootloader */ 27/* environment arguments from bootloader */
27extern unsigned long cpu_clock_freq; 28extern u32 cpu_clock_freq;
28extern unsigned long memsize, highmemsize; 29extern u32 memsize, highmemsize;
30extern struct plat_smp_ops loongson3_smp_ops;
29 31
30/* loongson-specific command line, env and memory initialization */ 32/* loongson-specific command line, env and memory initialization */
31extern void __init prom_init_memory(void); 33extern void __init prom_init_memory(void);
@@ -61,6 +63,12 @@ extern int mach_i8259_irq(void);
61#define LOONGSON_REG(x) \ 63#define LOONGSON_REG(x) \
62 (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x))) 64 (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
63 65
66#define LOONGSON3_REG8(base, x) \
67 (*(volatile u8 *)((char *)TO_UNCAC(base) + (x)))
68
69#define LOONGSON3_REG32(base, x) \
70 (*(volatile u32 *)((char *)TO_UNCAC(base) + (x)))
71
64#define LOONGSON_IRQ_BASE 32 72#define LOONGSON_IRQ_BASE 32
65#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */ 73#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
66 74
@@ -86,6 +94,10 @@ static inline void do_perfcnt_IRQ(void)
86#define LOONGSON_REG_BASE 0x1fe00000 94#define LOONGSON_REG_BASE 0x1fe00000
87#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */ 95#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
88#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1) 96#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
97/* Loongson-3 specific registers */
98#define LOONGSON3_REG_BASE 0x3ff00000
99#define LOONGSON3_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
100#define LOONGSON3_REG_TOP (LOONGSON3_REG_BASE+LOONGSON3_REG_SIZE-1)
89 101
90#define LOONGSON_LIO1_BASE 0x1ff00000 102#define LOONGSON_LIO1_BASE 0x1ff00000
91#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */ 103#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
@@ -101,7 +113,13 @@ static inline void do_perfcnt_IRQ(void)
101#define LOONGSON_PCICFG_BASE 0x1fe80000 113#define LOONGSON_PCICFG_BASE 0x1fe80000
102#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */ 114#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
103#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1) 115#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
116
117#if defined(CONFIG_HT_PCI)
118#define LOONGSON_PCIIO_BASE loongson_sysconf.pci_io_base
119#else
104#define LOONGSON_PCIIO_BASE 0x1fd00000 120#define LOONGSON_PCIIO_BASE 0x1fd00000
121#endif
122
105#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */ 123#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
106#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1) 124#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
107 125
@@ -231,6 +249,9 @@ static inline void do_perfcnt_IRQ(void)
231#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68) 249#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
232#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c) 250#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
233 251
252/* Chip Config */
253#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80)
254
234/* pcimap */ 255/* pcimap */
235 256
236#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f 257#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
@@ -246,9 +267,6 @@ static inline void do_perfcnt_IRQ(void)
246#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ 267#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
247#include <linux/cpufreq.h> 268#include <linux/cpufreq.h>
248extern struct cpufreq_frequency_table loongson2_clockmod_table[]; 269extern struct cpufreq_frequency_table loongson2_clockmod_table[];
249
250/* Chip Config */
251#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80)
252#endif 270#endif
253 271
254/* 272/*
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 3810d5ca84ac..1b1f592fa2be 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -24,4 +24,10 @@
24 24
25#endif 25#endif
26 26
27#ifdef CONFIG_LEMOTE_MACH3A
28
29#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101
30
31#endif /* CONFIG_LEMOTE_MACH3A */
32
27#endif /* __ASM_MACH_LOONGSON_MACHINE_H */ 33#endif /* __ASM_MACH_LOONGSON_MACHINE_H */
diff --git a/arch/mips/include/asm/mach-loongson/pci.h b/arch/mips/include/asm/mach-loongson/pci.h
index bc99dab4ef63..1212774f66ef 100644
--- a/arch/mips/include/asm/mach-loongson/pci.h
+++ b/arch/mips/include/asm/mach-loongson/pci.h
@@ -40,8 +40,13 @@ extern struct pci_ops loongson_pci_ops;
40#else /* loongson2f/32bit & loongson2e */ 40#else /* loongson2f/32bit & loongson2e */
41 41
42/* this pci memory space is mapped by pcimap in pci.c */ 42/* this pci memory space is mapped by pcimap in pci.c */
43#ifdef CONFIG_CPU_LOONGSON3
44#define LOONGSON_PCI_MEM_START 0x40000000UL
45#define LOONGSON_PCI_MEM_END 0x7effffffUL
46#else
43#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE 47#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
44#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2) 48#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
49#endif
45/* this is an offset from mips_io_port_base */ 50/* this is an offset from mips_io_port_base */
46#define LOONGSON_PCI_IO_START 0x00004000UL 51#define LOONGSON_PCI_IO_START 0x00004000UL
47 52
diff --git a/arch/mips/include/asm/mach-loongson/spaces.h b/arch/mips/include/asm/mach-loongson/spaces.h
new file mode 100644
index 000000000000..e2506ee90044
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/spaces.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_MACH_LOONGSON_SPACES_H_
2#define __ASM_MACH_LOONGSON_SPACES_H_
3
4#if defined(CONFIG_64BIT)
5#define CAC_BASE _AC(0x9800000000000000, UL)
6#endif /* CONFIG_64BIT */
7
8#include <asm/mach-generic/spaces.h>
9#endif
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0b793e7bf67e..7c5e17a17849 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -5,10 +5,80 @@
5 * 5 *
6 * Chris Dearman (chris@mips.com) 6 * Chris Dearman (chris@mips.com)
7 * Copyright (C) 2007 Mips Technologies, Inc. 7 * Copyright (C) 2007 Mips Technologies, Inc.
8 * Copyright (C) 2014 Imagination Technologies Ltd.
8 */ 9 */
9#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 10#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
10#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 11#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
11 12
13 /*
14 * Prepare segments for EVA boot:
15 *
16 * This is in case the processor boots in legacy configuration
17 * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
18 *
19 * On entry, t1 is loaded with CP0_CONFIG
20 *
21 * ========================= Mappings =============================
22 * Virtual memory Physical memory Mapping
23 * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg)
24 * Flat 2GB physical memory
25 *
26 * 0x80000000 - 0x9fffffff 0x00000000 - 0x1ffffffff MUSUK (kseg0)
27 * 0xa0000000 - 0xbf000000 0x00000000 - 0x1ffffffff MUSUK (kseg1)
28 * 0xc0000000 - 0xdfffffff - MK (kseg2)
29 * 0xe0000000 - 0xffffffff - MK (kseg3)
30 *
31 *
32 * Lowmem is expanded to 2GB
33 */
34 .macro eva_entry
35 /*
36 * Get Config.K0 value and use it to program
37 * the segmentation registers
38 */
39 andi t1, 0x7 /* CCA */
40 move t2, t1
41 ins t2, t1, 16, 3
42 /* SegCtl0 */
43 li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
44 (0 << MIPS_SEGCFG_PA_SHIFT) | \
45 (1 << MIPS_SEGCFG_EU_SHIFT)) | \
46 (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
47 (0 << MIPS_SEGCFG_PA_SHIFT) | \
48 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
49 or t0, t2
50 mtc0 t0, $5, 2
51
52 /* SegCtl1 */
53 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
54 (0 << MIPS_SEGCFG_PA_SHIFT) | \
55 (2 << MIPS_SEGCFG_C_SHIFT) | \
56 (1 << MIPS_SEGCFG_EU_SHIFT)) | \
57 (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
58 (0 << MIPS_SEGCFG_PA_SHIFT) | \
59 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
60 ins t0, t1, 16, 3
61 mtc0 t0, $5, 3
62
63 /* SegCtl2 */
64 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
65 (6 << MIPS_SEGCFG_PA_SHIFT) | \
66 (1 << MIPS_SEGCFG_EU_SHIFT)) | \
67 (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
68 (4 << MIPS_SEGCFG_PA_SHIFT) | \
69 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
70 or t0, t2
71 mtc0 t0, $5, 4
72
73 jal mips_ihb
74 mfc0 t0, $16, 5
75 li t2, 0x40000000 /* K bit */
76 or t0, t0, t2
77 mtc0 t0, $16, 5
78 sync
79 jal mips_ihb
80 .endm
81
12 .macro kernel_entry_setup 82 .macro kernel_entry_setup
13#ifdef CONFIG_MIPS_MT_SMTC 83#ifdef CONFIG_MIPS_MT_SMTC
14 mfc0 t0, CP0_CONFIG 84 mfc0 t0, CP0_CONFIG
@@ -39,14 +109,57 @@
39nonmt_processor: 109nonmt_processor:
40 .asciz "SMTC kernel requires the MT ASE to run\n" 110 .asciz "SMTC kernel requires the MT ASE to run\n"
41 __FINIT 111 __FINIT
420:
43#endif 112#endif
113
114#ifdef CONFIG_EVA
115 sync
116 ehb
117
118 mfc0 t1, CP0_CONFIG
119 bgez t1, 9f
120 mfc0 t0, CP0_CONFIG, 1
121 bgez t0, 9f
122 mfc0 t0, CP0_CONFIG, 2
123 bgez t0, 9f
124 mfc0 t0, CP0_CONFIG, 3
125 sll t0, t0, 6 /* SC bit */
126 bgez t0, 9f
127
128 eva_entry
129 b 0f
1309:
131 /* Assume we came from YAMON... */
132 PTR_LA v0, 0x9fc00534 /* YAMON print */
133 lw v0, (v0)
134 move a0, zero
135 PTR_LA a1, nonsc_processor
136 jal v0
137
138 PTR_LA v0, 0x9fc00520 /* YAMON exit */
139 lw v0, (v0)
140 li a0, 1
141 jal v0
142
1431: b 1b
144 nop
145 __INITDATA
146nonsc_processor:
147 .asciz "EVA kernel requires a MIPS core with Segment Control implemented\n"
148 __FINIT
149#endif /* CONFIG_EVA */
1500:
44 .endm 151 .endm
45 152
46/* 153/*
47 * Do SMP slave processor setup necessary before we can safely execute C code. 154 * Do SMP slave processor setup necessary before we can safely execute C code.
48 */ 155 */
49 .macro smp_slave_setup 156 .macro smp_slave_setup
157#ifdef CONFIG_EVA
158 sync
159 ehb
160 mfc0 t1, CP0_CONFIG
161 eva_entry
162#endif
50 .endm 163 .endm
51 164
52#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */ 165#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-malta/spaces.h b/arch/mips/include/asm/mach-malta/spaces.h
new file mode 100644
index 000000000000..d7e54971ec66
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/spaces.h
@@ -0,0 +1,46 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2014 Imagination Technologies Ltd.
7 */
8
9#ifndef _ASM_MALTA_SPACES_H
10#define _ASM_MALTA_SPACES_H
11
12#ifdef CONFIG_EVA
13
14/*
15 * Traditional Malta Board Memory Map for EVA
16 *
17 * 0x00000000 - 0x0fffffff: 1st RAM region, 256MB
18 * 0x10000000 - 0x1bffffff: GIC and CPC Control Registers
19 * 0x1c000000 - 0x1fffffff: I/O And Flash
20 * 0x20000000 - 0x7fffffff: 2nd RAM region, 1.5GB
21 * 0x80000000 - 0xffffffff: Physical memory aliases to 0x0 (2GB)
22 *
23 * The kernel is still located in 0x80000000(kseg0). However,
24 * the physical mask has been shifted to 0x80000000 which exploits the alias
25 * on the Malta board. As a result of which, we override the __pa_symbol
26 * to peform direct mapping from virtual to physical addresses. In other
27 * words, the 0x80000000 virtual address maps to 0x80000000 physical address
28 * which in turn aliases to 0x0. We do this in order to be able to use a flat
29 * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in
30 * 0x10000000 - 0x1fffffff.
31 * The last 64KB of physical memory are reserved for correct HIGHMEM
32 * macros arithmetics.
33 *
34 */
35
36#define PAGE_OFFSET _AC(0x0, UL)
37#define PHYS_OFFSET _AC(0x80000000, UL)
38#define HIGHMEM_START _AC(0xffff0000, UL)
39
40#define __pa_symbol(x) (RELOC_HIDE((unsigned long)(x), 0))
41
42#endif /* CONFIG_EVA */
43
44#include <asm/mach-generic/spaces.h>
45
46#endif /* _ASM_MALTA_SPACES_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2dbc7a8cec1a..fc946c835995 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -76,7 +76,7 @@ static inline void set_value_reg32(volatile u32 *const addr,
76 76
77 __asm__ __volatile__( 77 __asm__ __volatile__(
78 " .set push \n" 78 " .set push \n"
79 " .set mips3 \n" 79 " .set arch=r4000 \n"
80 "1: ll %0, %1 # set_value_reg32 \n" 80 "1: ll %0, %1 # set_value_reg32 \n"
81 " and %0, %2 \n" 81 " and %0, %2 \n"
82 " or %0, %3 \n" 82 " or %0, %3 \n"
@@ -98,7 +98,7 @@ static inline void set_reg32(volatile u32 *const addr,
98 98
99 __asm__ __volatile__( 99 __asm__ __volatile__(
100 " .set push \n" 100 " .set push \n"
101 " .set mips3 \n" 101 " .set arch=r4000 \n"
102 "1: ll %0, %1 # set_reg32 \n" 102 "1: ll %0, %1 # set_reg32 \n"
103 " or %0, %2 \n" 103 " or %0, %2 \n"
104 " sc %0, %1 \n" 104 " sc %0, %1 \n"
@@ -119,7 +119,7 @@ static inline void clear_reg32(volatile u32 *const addr,
119 119
120 __asm__ __volatile__( 120 __asm__ __volatile__(
121 " .set push \n" 121 " .set push \n"
122 " .set mips3 \n" 122 " .set arch=r4000 \n"
123 "1: ll %0, %1 # clear_reg32 \n" 123 "1: ll %0, %1 # clear_reg32 \n"
124 " and %0, %2 \n" 124 " and %0, %2 \n"
125 " sc %0, %1 \n" 125 " sc %0, %1 \n"
@@ -140,7 +140,7 @@ static inline void toggle_reg32(volatile u32 *const addr,
140 140
141 __asm__ __volatile__( 141 __asm__ __volatile__(
142 " .set push \n" 142 " .set push \n"
143 " .set mips3 \n" 143 " .set arch=r4000 \n"
144 "1: ll %0, %1 # toggle_reg32 \n" 144 "1: ll %0, %1 # toggle_reg32 \n"
145 " xor %0, %2 \n" 145 " xor %0, %2 \n"
146 " sc %0, %1 \n" 146 " sc %0, %1 \n"
@@ -216,7 +216,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
216#define custom_read_reg32(address, tmp) \ 216#define custom_read_reg32(address, tmp) \
217 __asm__ __volatile__( \ 217 __asm__ __volatile__( \
218 " .set push \n" \ 218 " .set push \n" \
219 " .set mips3 \n" \ 219 " .set arch=r4000 \n" \
220 "1: ll %0, %1 #custom_read_reg32 \n" \ 220 "1: ll %0, %1 #custom_read_reg32 \n" \
221 " .set pop \n" \ 221 " .set pop \n" \
222 : "=r" (tmp), "=m" (*address) \ 222 : "=r" (tmp), "=m" (*address) \
@@ -225,7 +225,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
225#define custom_write_reg32(address, tmp) \ 225#define custom_write_reg32(address, tmp) \
226 __asm__ __volatile__( \ 226 __asm__ __volatile__( \
227 " .set push \n" \ 227 " .set push \n" \
228 " .set mips3 \n" \ 228 " .set arch=r4000 \n" \
229 " sc %0, %1 #custom_write_reg32 \n" \ 229 " sc %0, %1 #custom_write_reg32 \n" \
230 " "__beqz"%0, 1b \n" \ 230 " "__beqz"%0, 1b \n" \
231 " nop \n" \ 231 " nop \n" \
diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h
index 722bc889eab5..fd9774269a5e 100644
--- a/arch/mips/include/asm/mips-boards/malta.h
+++ b/arch/mips/include/asm/mips-boards/malta.h
@@ -64,6 +64,11 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
64#define GIC_ADDRSPACE_SZ (128 * 1024) 64#define GIC_ADDRSPACE_SZ (128 * 1024)
65 65
66/* 66/*
67 * CPC Specific definitions
68 */
69#define CPC_BASE_ADDR 0x1bde0000
70
71/*
67 * MSC01 BIU Specific definitions 72 * MSC01 BIU Specific definitions
68 * FIXME : These should be elsewhere ? 73 * FIXME : These should be elsewhere ?
69 */ 74 */
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index 836e2ede24de..9cf54041d416 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -50,4 +50,9 @@
50#define PIIX4_FUNC1_IDETIM_SECONDARY_HI 0x43 50#define PIIX4_FUNC1_IDETIM_SECONDARY_HI 0x43
51#define PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN (1 << 7) 51#define PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN (1 << 7)
52 52
53/* Power Management Configuration Space */
54#define PIIX4_FUNC3_PMBA 0x40
55#define PIIX4_FUNC3_PMREGMISC 0x80
56#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0)
57
53#endif /* __ASM_MIPS_BOARDS_PIIX4_H */ 58#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
new file mode 100644
index 000000000000..6a9d2dd005ca
--- /dev/null
+++ b/arch/mips/include/asm/mips-cm.h
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MIPS_ASM_MIPS_CM_H__
12#define __MIPS_ASM_MIPS_CM_H__
13
14#include <linux/io.h>
15#include <linux/types.h>
16
17/* The base address of the CM GCR block */
18extern void __iomem *mips_cm_base;
19
20/* The base address of the CM L2-only sync region */
21extern void __iomem *mips_cm_l2sync_base;
22
23/**
24 * __mips_cm_phys_base - retrieve the physical base address of the CM
25 *
26 * This function returns the physical base address of the Coherence Manager
27 * global control block, or 0 if no Coherence Manager is present. It provides
28 * a default implementation which reads the CMGCRBase register where available,
29 * and may be overriden by platforms which determine this address in a
30 * different way by defining a function with the same prototype except for the
31 * name mips_cm_phys_base (without underscores).
32 */
33extern phys_t __mips_cm_phys_base(void);
34
35/**
36 * mips_cm_probe - probe for a Coherence Manager
37 *
38 * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
39 * is successfully detected, else -errno.
40 */
41#ifdef CONFIG_MIPS_CM
42extern int mips_cm_probe(void);
43#else
44static inline int mips_cm_probe(void)
45{
46 return -ENODEV;
47}
48#endif
49
50/**
51 * mips_cm_present - determine whether a Coherence Manager is present
52 *
53 * Returns true if a CM is present in the system, else false.
54 */
55static inline bool mips_cm_present(void)
56{
57#ifdef CONFIG_MIPS_CM
58 return mips_cm_base != NULL;
59#else
60 return false;
61#endif
62}
63
64/**
65 * mips_cm_has_l2sync - determine whether an L2-only sync region is present
66 *
67 * Returns true if the system implements an L2-only sync region, else false.
68 */
69static inline bool mips_cm_has_l2sync(void)
70{
71#ifdef CONFIG_MIPS_CM
72 return mips_cm_l2sync_base != NULL;
73#else
74 return false;
75#endif
76}
77
78/* Offsets to register blocks from the CM base address */
79#define MIPS_CM_GCB_OFS 0x0000 /* Global Control Block */
80#define MIPS_CM_CLCB_OFS 0x2000 /* Core Local Control Block */
81#define MIPS_CM_COCB_OFS 0x4000 /* Core Other Control Block */
82#define MIPS_CM_GDB_OFS 0x6000 /* Global Debug Block */
83
84/* Total size of the CM memory mapped registers */
85#define MIPS_CM_GCR_SIZE 0x8000
86
87/* Size of the L2-only sync region */
88#define MIPS_CM_L2SYNC_SIZE 0x1000
89
90/* Macros to ease the creation of register access functions */
91#define BUILD_CM_R_(name, off) \
92static inline u32 *addr_gcr_##name(void) \
93{ \
94 return (u32 *)(mips_cm_base + (off)); \
95} \
96 \
97static inline u32 read_gcr_##name(void) \
98{ \
99 return __raw_readl(addr_gcr_##name()); \
100}
101
102#define BUILD_CM__W(name, off) \
103static inline void write_gcr_##name(u32 value) \
104{ \
105 __raw_writel(value, addr_gcr_##name()); \
106}
107
108#define BUILD_CM_RW(name, off) \
109 BUILD_CM_R_(name, off) \
110 BUILD_CM__W(name, off)
111
112#define BUILD_CM_Cx_R_(name, off) \
113 BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off)) \
114 BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off))
115
116#define BUILD_CM_Cx__W(name, off) \
117 BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off)) \
118 BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off))
119
120#define BUILD_CM_Cx_RW(name, off) \
121 BUILD_CM_Cx_R_(name, off) \
122 BUILD_CM_Cx__W(name, off)
123
124/* GCB register accessor functions */
125BUILD_CM_R_(config, MIPS_CM_GCB_OFS + 0x00)
126BUILD_CM_RW(base, MIPS_CM_GCB_OFS + 0x08)
127BUILD_CM_RW(access, MIPS_CM_GCB_OFS + 0x20)
128BUILD_CM_R_(rev, MIPS_CM_GCB_OFS + 0x30)
129BUILD_CM_RW(error_mask, MIPS_CM_GCB_OFS + 0x40)
130BUILD_CM_RW(error_cause, MIPS_CM_GCB_OFS + 0x48)
131BUILD_CM_RW(error_addr, MIPS_CM_GCB_OFS + 0x50)
132BUILD_CM_RW(error_mult, MIPS_CM_GCB_OFS + 0x58)
133BUILD_CM_RW(l2_only_sync_base, MIPS_CM_GCB_OFS + 0x70)
134BUILD_CM_RW(gic_base, MIPS_CM_GCB_OFS + 0x80)
135BUILD_CM_RW(cpc_base, MIPS_CM_GCB_OFS + 0x88)
136BUILD_CM_RW(reg0_base, MIPS_CM_GCB_OFS + 0x90)
137BUILD_CM_RW(reg0_mask, MIPS_CM_GCB_OFS + 0x98)
138BUILD_CM_RW(reg1_base, MIPS_CM_GCB_OFS + 0xa0)
139BUILD_CM_RW(reg1_mask, MIPS_CM_GCB_OFS + 0xa8)
140BUILD_CM_RW(reg2_base, MIPS_CM_GCB_OFS + 0xb0)
141BUILD_CM_RW(reg2_mask, MIPS_CM_GCB_OFS + 0xb8)
142BUILD_CM_RW(reg3_base, MIPS_CM_GCB_OFS + 0xc0)
143BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8)
144BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0)
145BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0)
146
147/* Core Local & Core Other register accessor functions */
148BUILD_CM_Cx_RW(reset_release, 0x00)
149BUILD_CM_Cx_RW(coherence, 0x08)
150BUILD_CM_Cx_R_(config, 0x10)
151BUILD_CM_Cx_RW(other, 0x18)
152BUILD_CM_Cx_RW(reset_base, 0x20)
153BUILD_CM_Cx_R_(id, 0x28)
154BUILD_CM_Cx_RW(reset_ext_base, 0x30)
155BUILD_CM_Cx_R_(tcid_0_priority, 0x40)
156BUILD_CM_Cx_R_(tcid_1_priority, 0x48)
157BUILD_CM_Cx_R_(tcid_2_priority, 0x50)
158BUILD_CM_Cx_R_(tcid_3_priority, 0x58)
159BUILD_CM_Cx_R_(tcid_4_priority, 0x60)
160BUILD_CM_Cx_R_(tcid_5_priority, 0x68)
161BUILD_CM_Cx_R_(tcid_6_priority, 0x70)
162BUILD_CM_Cx_R_(tcid_7_priority, 0x78)
163BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
164
165/* GCR_CONFIG register fields */
166#define CM_GCR_CONFIG_NUMIOCU_SHF 8
167#define CM_GCR_CONFIG_NUMIOCU_MSK (_ULCAST_(0xf) << 8)
168#define CM_GCR_CONFIG_PCORES_SHF 0
169#define CM_GCR_CONFIG_PCORES_MSK (_ULCAST_(0xff) << 0)
170
171/* GCR_BASE register fields */
172#define CM_GCR_BASE_GCRBASE_SHF 15
173#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
174#define CM_GCR_BASE_CMDEFTGT_SHF 0
175#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
176#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
177#define CM_GCR_BASE_CMDEFTGT_MEM 1
178#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
179#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
180
181/* GCR_ACCESS register fields */
182#define CM_GCR_ACCESS_ACCESSEN_SHF 0
183#define CM_GCR_ACCESS_ACCESSEN_MSK (_ULCAST_(0xff) << 0)
184
185/* GCR_REV register fields */
186#define CM_GCR_REV_MAJOR_SHF 8
187#define CM_GCR_REV_MAJOR_MSK (_ULCAST_(0xff) << 8)
188#define CM_GCR_REV_MINOR_SHF 0
189#define CM_GCR_REV_MINOR_MSK (_ULCAST_(0xff) << 0)
190
191/* GCR_ERROR_CAUSE register fields */
192#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF 27
193#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK (_ULCAST_(0x1f) << 27)
194#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF 0
195#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK (_ULCAST_(0x7ffffff) << 0)
196
197/* GCR_ERROR_MULT register fields */
198#define CM_GCR_ERROR_MULT_ERR2ND_SHF 0
199#define CM_GCR_ERROR_MULT_ERR2ND_MSK (_ULCAST_(0x1f) << 0)
200
201/* GCR_L2_ONLY_SYNC_BASE register fields */
202#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF 12
203#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK (_ULCAST_(0xfffff) << 12)
204#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF 0
205#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK (_ULCAST_(0x1) << 0)
206
207/* GCR_GIC_BASE register fields */
208#define CM_GCR_GIC_BASE_GICBASE_SHF 17
209#define CM_GCR_GIC_BASE_GICBASE_MSK (_ULCAST_(0x7fff) << 17)
210#define CM_GCR_GIC_BASE_GICEN_SHF 0
211#define CM_GCR_GIC_BASE_GICEN_MSK (_ULCAST_(0x1) << 0)
212
213/* GCR_CPC_BASE register fields */
214#define CM_GCR_CPC_BASE_CPCBASE_SHF 17
215#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x7fff) << 17)
216#define CM_GCR_CPC_BASE_CPCEN_SHF 0
217#define CM_GCR_CPC_BASE_CPCEN_MSK (_ULCAST_(0x1) << 0)
218
219/* GCR_REGn_BASE register fields */
220#define CM_GCR_REGn_BASE_BASEADDR_SHF 16
221#define CM_GCR_REGn_BASE_BASEADDR_MSK (_ULCAST_(0xffff) << 16)
222
223/* GCR_REGn_MASK register fields */
224#define CM_GCR_REGn_MASK_ADDRMASK_SHF 16
225#define CM_GCR_REGn_MASK_ADDRMASK_MSK (_ULCAST_(0xffff) << 16)
226#define CM_GCR_REGn_MASK_CCAOVR_SHF 5
227#define CM_GCR_REGn_MASK_CCAOVR_MSK (_ULCAST_(0x3) << 5)
228#define CM_GCR_REGn_MASK_CCAOVREN_SHF 4
229#define CM_GCR_REGn_MASK_CCAOVREN_MSK (_ULCAST_(0x1) << 4)
230#define CM_GCR_REGn_MASK_DROPL2_SHF 2
231#define CM_GCR_REGn_MASK_DROPL2_MSK (_ULCAST_(0x1) << 2)
232#define CM_GCR_REGn_MASK_CMTGT_SHF 0
233#define CM_GCR_REGn_MASK_CMTGT_MSK (_ULCAST_(0x3) << 0)
234#define CM_GCR_REGn_MASK_CMTGT_DISABLED (_ULCAST_(0x0) << 0)
235#define CM_GCR_REGn_MASK_CMTGT_MEM (_ULCAST_(0x1) << 0)
236#define CM_GCR_REGn_MASK_CMTGT_IOCU0 (_ULCAST_(0x2) << 0)
237#define CM_GCR_REGn_MASK_CMTGT_IOCU1 (_ULCAST_(0x3) << 0)
238
239/* GCR_GIC_STATUS register fields */
240#define CM_GCR_GIC_STATUS_EX_SHF 0
241#define CM_GCR_GIC_STATUS_EX_MSK (_ULCAST_(0x1) << 0)
242
243/* GCR_CPC_STATUS register fields */
244#define CM_GCR_CPC_STATUS_EX_SHF 0
245#define CM_GCR_CPC_STATUS_EX_MSK (_ULCAST_(0x1) << 0)
246
247/* GCR_Cx_COHERENCE register fields */
248#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0
249#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0)
250
251/* GCR_Cx_CONFIG register fields */
252#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF 10
253#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK (_ULCAST_(0x3) << 10)
254#define CM_GCR_Cx_CONFIG_PVPE_SHF 0
255#define CM_GCR_Cx_CONFIG_PVPE_MSK (_ULCAST_(0x1ff) << 0)
256
257/* GCR_Cx_OTHER register fields */
258#define CM_GCR_Cx_OTHER_CORENUM_SHF 16
259#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16)
260
261/* GCR_Cx_RESET_BASE register fields */
262#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12
263#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK (_ULCAST_(0xfffff) << 12)
264
265/* GCR_Cx_RESET_EXT_BASE register fields */
266#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF 31
267#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK (_ULCAST_(0x1) << 31)
268#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF 30
269#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK (_ULCAST_(0x1) << 30)
270#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF 20
271#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK (_ULCAST_(0xff) << 20)
272#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF 1
273#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK (_ULCAST_(0x7f) << 1)
274#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF 0
275#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK (_ULCAST_(0x1) << 0)
276
277/**
278 * mips_cm_numcores - return the number of cores present in the system
279 *
280 * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or
281 * zero if no Coherence Manager is present.
282 */
283static inline unsigned mips_cm_numcores(void)
284{
285 if (!mips_cm_present())
286 return 0;
287
288 return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK)
289 >> CM_GCR_CONFIG_PCORES_SHF) + 1;
290}
291
292/**
293 * mips_cm_numiocu - return the number of IOCUs present in the system
294 *
295 * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero
296 * if no Coherence Manager is present.
297 */
298static inline unsigned mips_cm_numiocu(void)
299{
300 if (!mips_cm_present())
301 return 0;
302
303 return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK)
304 >> CM_GCR_CONFIG_NUMIOCU_SHF;
305}
306
307/**
308 * mips_cm_l2sync - perform an L2-only sync operation
309 *
310 * If an L2-only sync region is present in the system then this function
311 * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
312 */
313static inline int mips_cm_l2sync(void)
314{
315 if (!mips_cm_has_l2sync())
316 return -ENODEV;
317
318 writel(0, mips_cm_l2sync_base);
319 return 0;
320}
321
322#endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
new file mode 100644
index 000000000000..988507e46d42
--- /dev/null
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -0,0 +1,150 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MIPS_ASM_MIPS_CPC_H__
12#define __MIPS_ASM_MIPS_CPC_H__
13
14#include <linux/io.h>
15#include <linux/types.h>
16
17/* The base address of the CPC registers */
18extern void __iomem *mips_cpc_base;
19
20/**
21 * mips_cpc_default_phys_base - retrieve the default physical base address of
22 * the CPC
23 *
24 * Returns the default physical base address of the Cluster Power Controller
25 * memory mapped registers. This is platform dependant & must therefore be
26 * implemented per-platform.
27 */
28extern phys_t mips_cpc_default_phys_base(void);
29
30/**
31 * mips_cpc_phys_base - retrieve the physical base address of the CPC
32 *
33 * This function returns the physical base address of the Cluster Power
34 * Controller memory mapped registers, or 0 if no Cluster Power Controller
35 * is present. It may be overriden by individual platforms which determine
36 * this address in a different way.
37 */
38extern phys_t __weak mips_cpc_phys_base(void);
39
40/**
41 * mips_cpc_probe - probe for a Cluster Power Controller
42 *
43 * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
44 * a CPC is successfully detected, else -errno.
45 */
46#ifdef CONFIG_MIPS_CPC
47extern int mips_cpc_probe(void);
48#else
49static inline int mips_cpc_probe(void)
50{
51 return -ENODEV;
52}
53#endif
54
55/**
56 * mips_cpc_present - determine whether a Cluster Power Controller is present
57 *
58 * Returns true if a CPC is present in the system, else false.
59 */
60static inline bool mips_cpc_present(void)
61{
62#ifdef CONFIG_MIPS_CPC
63 return mips_cpc_base != NULL;
64#else
65 return false;
66#endif
67}
68
69/* Offsets from the CPC base address to various control blocks */
70#define MIPS_CPC_GCB_OFS 0x0000
71#define MIPS_CPC_CLCB_OFS 0x2000
72#define MIPS_CPC_COCB_OFS 0x4000
73
74/* Macros to ease the creation of register access functions */
75#define BUILD_CPC_R_(name, off) \
76static inline u32 read_cpc_##name(void) \
77{ \
78 return __raw_readl(mips_cpc_base + (off)); \
79}
80
81#define BUILD_CPC__W(name, off) \
82static inline void write_cpc_##name(u32 value) \
83{ \
84 __raw_writel(value, mips_cpc_base + (off)); \
85}
86
87#define BUILD_CPC_RW(name, off) \
88 BUILD_CPC_R_(name, off) \
89 BUILD_CPC__W(name, off)
90
91#define BUILD_CPC_Cx_R_(name, off) \
92 BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \
93 BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off))
94
95#define BUILD_CPC_Cx__W(name, off) \
96 BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \
97 BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off))
98
99#define BUILD_CPC_Cx_RW(name, off) \
100 BUILD_CPC_Cx_R_(name, off) \
101 BUILD_CPC_Cx__W(name, off)
102
103/* GCB register accessor functions */
104BUILD_CPC_RW(access, MIPS_CPC_GCB_OFS + 0x00)
105BUILD_CPC_RW(seqdel, MIPS_CPC_GCB_OFS + 0x08)
106BUILD_CPC_RW(rail, MIPS_CPC_GCB_OFS + 0x10)
107BUILD_CPC_RW(resetlen, MIPS_CPC_GCB_OFS + 0x18)
108BUILD_CPC_R_(revision, MIPS_CPC_GCB_OFS + 0x20)
109
110/* Core Local & Core Other accessor functions */
111BUILD_CPC_Cx_RW(cmd, 0x00)
112BUILD_CPC_Cx_RW(stat_conf, 0x08)
113BUILD_CPC_Cx_RW(other, 0x10)
114
115/* CPC_Cx_CMD register fields */
116#define CPC_Cx_CMD_SHF 0
117#define CPC_Cx_CMD_MSK (_ULCAST_(0xf) << 0)
118#define CPC_Cx_CMD_CLOCKOFF (_ULCAST_(0x1) << 0)
119#define CPC_Cx_CMD_PWRDOWN (_ULCAST_(0x2) << 0)
120#define CPC_Cx_CMD_PWRUP (_ULCAST_(0x3) << 0)
121#define CPC_Cx_CMD_RESET (_ULCAST_(0x4) << 0)
122
123/* CPC_Cx_STAT_CONF register fields */
124#define CPC_Cx_STAT_CONF_PWRUPE_SHF 23
125#define CPC_Cx_STAT_CONF_PWRUPE_MSK (_ULCAST_(0x1) << 23)
126#define CPC_Cx_STAT_CONF_SEQSTATE_SHF 19
127#define CPC_Cx_STAT_CONF_SEQSTATE_MSK (_ULCAST_(0xf) << 19)
128#define CPC_Cx_STAT_CONF_SEQSTATE_D0 (_ULCAST_(0x0) << 19)
129#define CPC_Cx_STAT_CONF_SEQSTATE_U0 (_ULCAST_(0x1) << 19)
130#define CPC_Cx_STAT_CONF_SEQSTATE_U1 (_ULCAST_(0x2) << 19)
131#define CPC_Cx_STAT_CONF_SEQSTATE_U2 (_ULCAST_(0x3) << 19)
132#define CPC_Cx_STAT_CONF_SEQSTATE_U3 (_ULCAST_(0x4) << 19)
133#define CPC_Cx_STAT_CONF_SEQSTATE_U4 (_ULCAST_(0x5) << 19)
134#define CPC_Cx_STAT_CONF_SEQSTATE_U5 (_ULCAST_(0x6) << 19)
135#define CPC_Cx_STAT_CONF_SEQSTATE_U6 (_ULCAST_(0x7) << 19)
136#define CPC_Cx_STAT_CONF_SEQSTATE_D1 (_ULCAST_(0x8) << 19)
137#define CPC_Cx_STAT_CONF_SEQSTATE_D3 (_ULCAST_(0x9) << 19)
138#define CPC_Cx_STAT_CONF_SEQSTATE_D2 (_ULCAST_(0xa) << 19)
139#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF 17
140#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK (_ULCAST_(0x1) << 17)
141#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF 16
142#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK (_ULCAST_(0x1) << 16)
143#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF 15
144#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK (_ULCAST_(0x1) << 15)
145
146/* CPC_Cx_OTHER register fields */
147#define CPC_Cx_OTHER_CORENUM_SHF 16
148#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
149
150#endif /* __MIPS_ASM_MIPS_CPC_H__ */
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index ac7935203f89..a3df0c3faa0e 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -18,7 +18,12 @@ extern cpumask_t mt_fpu_cpumask;
18extern unsigned long mt_fpemul_threshold; 18extern unsigned long mt_fpemul_threshold;
19 19
20extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); 20extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
21
22#ifdef CONFIG_MIPS_MT
21extern void mips_mt_set_cpuoptions(void); 23extern void mips_mt_set_cpuoptions(void);
24#else
25static inline void mips_mt_set_cpuoptions(void) { }
26#endif
22 27
23struct class; 28struct class;
24extern struct class *mt_class; 29extern struct class *mt_class;
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 38b7704ee376..6efa79a27b6a 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -176,6 +176,17 @@
176 176
177#ifndef __ASSEMBLY__ 177#ifndef __ASSEMBLY__
178 178
179static inline unsigned core_nvpes(void)
180{
181 unsigned conf0;
182
183 if (!cpu_has_mipsmt)
184 return 1;
185
186 conf0 = read_c0_mvpconf0();
187 return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
188}
189
179static inline unsigned int dvpe(void) 190static inline unsigned int dvpe(void)
180{ 191{
181 int res = 0; 192 int res = 0;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index bbc3dd4294bc..3e025b5311db 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -568,11 +568,23 @@
568#define MIPS_CONF1_PC (_ULCAST_(1) << 4) 568#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
569#define MIPS_CONF1_MD (_ULCAST_(1) << 5) 569#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
570#define MIPS_CONF1_C2 (_ULCAST_(1) << 6) 570#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
571#define MIPS_CONF1_DA_SHF 7
572#define MIPS_CONF1_DA_SZ 3
571#define MIPS_CONF1_DA (_ULCAST_(7) << 7) 573#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
574#define MIPS_CONF1_DL_SHF 10
575#define MIPS_CONF1_DL_SZ 3
572#define MIPS_CONF1_DL (_ULCAST_(7) << 10) 576#define MIPS_CONF1_DL (_ULCAST_(7) << 10)
577#define MIPS_CONF1_DS_SHF 13
578#define MIPS_CONF1_DS_SZ 3
573#define MIPS_CONF1_DS (_ULCAST_(7) << 13) 579#define MIPS_CONF1_DS (_ULCAST_(7) << 13)
580#define MIPS_CONF1_IA_SHF 16
581#define MIPS_CONF1_IA_SZ 3
574#define MIPS_CONF1_IA (_ULCAST_(7) << 16) 582#define MIPS_CONF1_IA (_ULCAST_(7) << 16)
583#define MIPS_CONF1_IL_SHF 19
584#define MIPS_CONF1_IL_SZ 3
575#define MIPS_CONF1_IL (_ULCAST_(7) << 19) 585#define MIPS_CONF1_IL (_ULCAST_(7) << 19)
586#define MIPS_CONF1_IS_SHF 22
587#define MIPS_CONF1_IS_SZ 3
576#define MIPS_CONF1_IS (_ULCAST_(7) << 22) 588#define MIPS_CONF1_IS (_ULCAST_(7) << 22)
577#define MIPS_CONF1_TLBS_SHIFT (25) 589#define MIPS_CONF1_TLBS_SHIFT (25)
578#define MIPS_CONF1_TLBS_SIZE (6) 590#define MIPS_CONF1_TLBS_SIZE (6)
@@ -653,9 +665,16 @@
653 665
654#define MIPS_CONF7_RPS (_ULCAST_(1) << 2) 666#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
655 667
668#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
669#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
670
656/* EntryHI bit definition */ 671/* EntryHI bit definition */
657#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10) 672#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10)
658 673
674/* CMGCRBase bit definitions */
675#define MIPS_CMGCRB_BASE 11
676#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
677
659/* 678/*
660 * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register. 679 * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
661 */ 680 */
@@ -1010,6 +1029,8 @@ do { \
1010 1029
1011#define read_c0_prid() __read_32bit_c0_register($15, 0) 1030#define read_c0_prid() __read_32bit_c0_register($15, 0)
1012 1031
1032#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3)
1033
1013#define read_c0_config() __read_32bit_c0_register($16, 0) 1034#define read_c0_config() __read_32bit_c0_register($16, 0)
1014#define read_c0_config1() __read_32bit_c0_register($16, 1) 1035#define read_c0_config1() __read_32bit_c0_register($16, 1)
1015#define read_c0_config2() __read_32bit_c0_register($16, 2) 1036#define read_c0_config2() __read_32bit_c0_register($16, 2)
@@ -1883,6 +1904,7 @@ change_c0_##name(unsigned int change, unsigned int newbits) \
1883__BUILD_SET_C0(status) 1904__BUILD_SET_C0(status)
1884__BUILD_SET_C0(cause) 1905__BUILD_SET_C0(cause)
1885__BUILD_SET_C0(config) 1906__BUILD_SET_C0(config)
1907__BUILD_SET_C0(config5)
1886__BUILD_SET_C0(intcontrol) 1908__BUILD_SET_C0(intcontrol)
1887__BUILD_SET_C0(intctl) 1909__BUILD_SET_C0(intctl)
1888__BUILD_SET_C0(srsmap) 1910__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 44b705d08262..c2edae382d5d 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -126,6 +126,8 @@ search_module_dbetables(unsigned long addr)
126#define MODULE_PROC_FAMILY "LOONGSON1 " 126#define MODULE_PROC_FAMILY "LOONGSON1 "
127#elif defined CONFIG_CPU_LOONGSON2 127#elif defined CONFIG_CPU_LOONGSON2
128#define MODULE_PROC_FAMILY "LOONGSON2 " 128#define MODULE_PROC_FAMILY "LOONGSON2 "
129#elif defined CONFIG_CPU_LOONGSON3
130#define MODULE_PROC_FAMILY "LOONGSON3 "
129#elif defined CONFIG_CPU_CAVIUM_OCTEON 131#elif defined CONFIG_CPU_CAVIUM_OCTEON
130#define MODULE_PROC_FAMILY "OCTEON " 132#define MODULE_PROC_FAMILY "OCTEON "
131#elif defined CONFIG_CPU_XLR 133#elif defined CONFIG_CPU_XLR
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
new file mode 100644
index 000000000000..a2aba6c3ec05
--- /dev/null
+++ b/arch/mips/include/asm/msa.h
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10#ifndef _ASM_MSA_H
11#define _ASM_MSA_H
12
13#include <asm/mipsregs.h>
14
15extern void _save_msa(struct task_struct *);
16extern void _restore_msa(struct task_struct *);
17
18static inline void enable_msa(void)
19{
20 if (cpu_has_msa) {
21 set_c0_config5(MIPS_CONF5_MSAEN);
22 enable_fpu_hazard();
23 }
24}
25
26static inline void disable_msa(void)
27{
28 if (cpu_has_msa) {
29 clear_c0_config5(MIPS_CONF5_MSAEN);
30 disable_fpu_hazard();
31 }
32}
33
34static inline int is_msa_enabled(void)
35{
36 if (!cpu_has_msa)
37 return 0;
38
39 return read_c0_config5() & MIPS_CONF5_MSAEN;
40}
41
42static inline int thread_msa_context_live(void)
43{
44 /*
45 * Check cpu_has_msa only if it's a constant. This will allow the
46 * compiler to optimise out code for CPUs without MSA without adding
47 * an extra redundant check for CPUs with MSA.
48 */
49 if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
50 return 0;
51
52 return test_thread_flag(TIF_MSA_CTX_LIVE);
53}
54
55static inline void save_msa(struct task_struct *t)
56{
57 if (cpu_has_msa)
58 _save_msa(t);
59}
60
61static inline void restore_msa(struct task_struct *t)
62{
63 if (cpu_has_msa)
64 _restore_msa(t);
65}
66
67#ifdef TOOLCHAIN_SUPPORTS_MSA
68
69#define __BUILD_MSA_CTL_REG(name, cs) \
70static inline unsigned int read_msa_##name(void) \
71{ \
72 unsigned int reg; \
73 __asm__ __volatile__( \
74 " .set push\n" \
75 " .set msa\n" \
76 " cfcmsa %0, $" #cs "\n" \
77 " .set pop\n" \
78 : "=r"(reg)); \
79 return reg; \
80} \
81 \
82static inline void write_msa_##name(unsigned int val) \
83{ \
84 __asm__ __volatile__( \
85 " .set push\n" \
86 " .set msa\n" \
87 " cfcmsa $" #cs ", %0\n" \
88 " .set pop\n" \
89 : : "r"(val)); \
90}
91
92#else /* !TOOLCHAIN_SUPPORTS_MSA */
93
94/*
95 * Define functions using .word for the c[ft]cmsa instructions in order to
96 * allow compilation with toolchains that do not support MSA. Once all
97 * toolchains in use support MSA these can be removed.
98 */
99
100#define __BUILD_MSA_CTL_REG(name, cs) \
101static inline unsigned int read_msa_##name(void) \
102{ \
103 unsigned int reg; \
104 __asm__ __volatile__( \
105 " .set push\n" \
106 " .set noat\n" \
107 " .word 0x787e0059 | (" #cs " << 11)\n" \
108 " move %0, $1\n" \
109 " .set pop\n" \
110 : "=r"(reg)); \
111 return reg; \
112} \
113 \
114static inline void write_msa_##name(unsigned int val) \
115{ \
116 __asm__ __volatile__( \
117 " .set push\n" \
118 " .set noat\n" \
119 " move $1, %0\n" \
120 " .word 0x783e0819 | (" #cs " << 6)\n" \
121 " .set pop\n" \
122 : : "r"(val)); \
123}
124
125#endif /* !TOOLCHAIN_SUPPORTS_MSA */
126
127#define MSA_IR 0
128#define MSA_CSR 1
129#define MSA_ACCESS 2
130#define MSA_SAVE 3
131#define MSA_MODIFY 4
132#define MSA_REQUEST 5
133#define MSA_MAP 6
134#define MSA_UNMAP 7
135
136__BUILD_MSA_CTL_REG(ir, 0)
137__BUILD_MSA_CTL_REG(csr, 1)
138__BUILD_MSA_CTL_REG(access, 2)
139__BUILD_MSA_CTL_REG(save, 3)
140__BUILD_MSA_CTL_REG(modify, 4)
141__BUILD_MSA_CTL_REG(request, 5)
142__BUILD_MSA_CTL_REG(map, 6)
143__BUILD_MSA_CTL_REG(unmap, 7)
144
145/* MSA Implementation Register (MSAIR) */
146#define MSA_IR_REVB 0
147#define MSA_IR_REVF (_ULCAST_(0xff) << MSA_IR_REVB)
148#define MSA_IR_PROCB 8
149#define MSA_IR_PROCF (_ULCAST_(0xff) << MSA_IR_PROCB)
150#define MSA_IR_WRPB 16
151#define MSA_IR_WRPF (_ULCAST_(0x1) << MSA_IR_WRPB)
152
153/* MSA Control & Status Register (MSACSR) */
154#define MSA_CSR_RMB 0
155#define MSA_CSR_RMF (_ULCAST_(0x3) << MSA_CSR_RMB)
156#define MSA_CSR_RM_NEAREST 0
157#define MSA_CSR_RM_TO_ZERO 1
158#define MSA_CSR_RM_TO_POS 2
159#define MSA_CSR_RM_TO_NEG 3
160#define MSA_CSR_FLAGSB 2
161#define MSA_CSR_FLAGSF (_ULCAST_(0x1f) << MSA_CSR_FLAGSB)
162#define MSA_CSR_FLAGS_IB 2
163#define MSA_CSR_FLAGS_IF (_ULCAST_(0x1) << MSA_CSR_FLAGS_IB)
164#define MSA_CSR_FLAGS_UB 3
165#define MSA_CSR_FLAGS_UF (_ULCAST_(0x1) << MSA_CSR_FLAGS_UB)
166#define MSA_CSR_FLAGS_OB 4
167#define MSA_CSR_FLAGS_OF (_ULCAST_(0x1) << MSA_CSR_FLAGS_OB)
168#define MSA_CSR_FLAGS_ZB 5
169#define MSA_CSR_FLAGS_ZF (_ULCAST_(0x1) << MSA_CSR_FLAGS_ZB)
170#define MSA_CSR_FLAGS_VB 6
171#define MSA_CSR_FLAGS_VF (_ULCAST_(0x1) << MSA_CSR_FLAGS_VB)
172#define MSA_CSR_ENABLESB 7
173#define MSA_CSR_ENABLESF (_ULCAST_(0x1f) << MSA_CSR_ENABLESB)
174#define MSA_CSR_ENABLES_IB 7
175#define MSA_CSR_ENABLES_IF (_ULCAST_(0x1) << MSA_CSR_ENABLES_IB)
176#define MSA_CSR_ENABLES_UB 8
177#define MSA_CSR_ENABLES_UF (_ULCAST_(0x1) << MSA_CSR_ENABLES_UB)
178#define MSA_CSR_ENABLES_OB 9
179#define MSA_CSR_ENABLES_OF (_ULCAST_(0x1) << MSA_CSR_ENABLES_OB)
180#define MSA_CSR_ENABLES_ZB 10
181#define MSA_CSR_ENABLES_ZF (_ULCAST_(0x1) << MSA_CSR_ENABLES_ZB)
182#define MSA_CSR_ENABLES_VB 11
183#define MSA_CSR_ENABLES_VF (_ULCAST_(0x1) << MSA_CSR_ENABLES_VB)
184#define MSA_CSR_CAUSEB 12
185#define MSA_CSR_CAUSEF (_ULCAST_(0x3f) << MSA_CSR_CAUSEB)
186#define MSA_CSR_CAUSE_IB 12
187#define MSA_CSR_CAUSE_IF (_ULCAST_(0x1) << MSA_CSR_CAUSE_IB)
188#define MSA_CSR_CAUSE_UB 13
189#define MSA_CSR_CAUSE_UF (_ULCAST_(0x1) << MSA_CSR_CAUSE_UB)
190#define MSA_CSR_CAUSE_OB 14
191#define MSA_CSR_CAUSE_OF (_ULCAST_(0x1) << MSA_CSR_CAUSE_OB)
192#define MSA_CSR_CAUSE_ZB 15
193#define MSA_CSR_CAUSE_ZF (_ULCAST_(0x1) << MSA_CSR_CAUSE_ZB)
194#define MSA_CSR_CAUSE_VB 16
195#define MSA_CSR_CAUSE_VF (_ULCAST_(0x1) << MSA_CSR_CAUSE_VB)
196#define MSA_CSR_CAUSE_EB 17
197#define MSA_CSR_CAUSE_EF (_ULCAST_(0x1) << MSA_CSR_CAUSE_EB)
198#define MSA_CSR_NXB 18
199#define MSA_CSR_NXF (_ULCAST_(0x1) << MSA_CSR_NXB)
200#define MSA_CSR_FSB 24
201#define MSA_CSR_FSF (_ULCAST_(0x1) << MSA_CSR_FSB)
202
203#endif /* _ASM_MSA_H */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 5e08bcc74897..5699ec3a71af 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -190,7 +190,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
190 * https://patchwork.linux-mips.org/patch/1541/ 190 * https://patchwork.linux-mips.org/patch/1541/
191 */ 191 */
192 192
193#ifndef __pa_symbol
193#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 194#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
195#endif
194 196
195#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 197#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
196 198
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 32aea4852fb0..e592f3687d6f 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -235,6 +235,15 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
235#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) 235#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
236#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) 236#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
237 237
238#elif defined(CONFIG_CPU_LOONGSON3)
239
240/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
241
242#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* LOONGSON */
243#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */
244#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */
245#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* LOONGSON */
246
238#else 247#else
239 248
240#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */ 249#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 3605b844ad87..ad70cba8daff 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -97,18 +97,48 @@ extern unsigned int vced_count, vcei_count;
97 97
98#define NUM_FPU_REGS 32 98#define NUM_FPU_REGS 32
99 99
100typedef __u64 fpureg_t; 100#ifdef CONFIG_CPU_HAS_MSA
101# define FPU_REG_WIDTH 128
102#else
103# define FPU_REG_WIDTH 64
104#endif
105
106union fpureg {
107 __u32 val32[FPU_REG_WIDTH / 32];
108 __u64 val64[FPU_REG_WIDTH / 64];
109};
110
111#ifdef CONFIG_CPU_LITTLE_ENDIAN
112# define FPR_IDX(width, idx) (idx)
113#else
114# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
115#endif
116
117#define BUILD_FPR_ACCESS(width) \
118static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
119{ \
120 return fpr->val##width[FPR_IDX(width, idx)]; \
121} \
122 \
123static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
124 u##width val) \
125{ \
126 fpr->val##width[FPR_IDX(width, idx)] = val; \
127}
128
129BUILD_FPR_ACCESS(32)
130BUILD_FPR_ACCESS(64)
101 131
102/* 132/*
103 * It would be nice to add some more fields for emulator statistics, but there 133 * It would be nice to add some more fields for emulator statistics,
104 * are a number of fixed offsets in offset.h and elsewhere that would have to 134 * the additional information is private to the FPU emulator for now.
105 * be recalculated by hand. So the additional information will be private to 135 * See arch/mips/include/asm/fpu_emulator.h.
106 * the FPU emulator for now. See asm-mips/fpu_emulator.h.
107 */ 136 */
108 137
109struct mips_fpu_struct { 138struct mips_fpu_struct {
110 fpureg_t fpr[NUM_FPU_REGS]; 139 union fpureg fpr[NUM_FPU_REGS];
111 unsigned int fcr31; 140 unsigned int fcr31;
141 unsigned int msacsr;
112}; 142};
113 143
114#define NUM_DSP_REGS 6 144#define NUM_DSP_REGS 6
@@ -284,8 +314,9 @@ struct thread_struct {
284 * Saved FPU/FPU emulator stuff \ 314 * Saved FPU/FPU emulator stuff \
285 */ \ 315 */ \
286 .fpu = { \ 316 .fpu = { \
287 .fpr = {0,}, \ 317 .fpr = {{{0,},},}, \
288 .fcr31 = 0, \ 318 .fcr31 = 0, \
319 .msacsr = 0, \
289 }, \ 320 }, \
290 /* \ 321 /* \
291 * FPU affinity state (null if not FPAFF) \ 322 * FPU affinity state (null if not FPAFF) \
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 7bba9da110af..bf1ac8d35783 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -82,7 +82,7 @@ static inline long regs_return_value(struct pt_regs *regs)
82#define instruction_pointer(regs) ((regs)->cp0_epc) 82#define instruction_pointer(regs) ((regs)->cp0_epc)
83#define profile_pc(regs) instruction_pointer(regs) 83#define profile_pc(regs) instruction_pointer(regs)
84 84
85extern asmlinkage void syscall_trace_enter(struct pt_regs *regs); 85extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
86extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); 86extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
87 87
88extern void die(const char *, struct pt_regs *) __noreturn; 88extern void die(const char *, struct pt_regs *) __noreturn;
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index c84caddb8bde..ca64cbe44493 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -17,6 +17,7 @@
17#include <asm/cpu-features.h> 17#include <asm/cpu-features.h>
18#include <asm/cpu-type.h> 18#include <asm/cpu-type.h>
19#include <asm/mipsmtregs.h> 19#include <asm/mipsmtregs.h>
20#include <asm/uaccess.h> /* for segment_eq() */
20 21
21/* 22/*
22 * This macro return a properly sign-extended address suitable as base address 23 * This macro return a properly sign-extended address suitable as base address
@@ -35,7 +36,7 @@
35 __asm__ __volatile__( \ 36 __asm__ __volatile__( \
36 " .set push \n" \ 37 " .set push \n" \
37 " .set noreorder \n" \ 38 " .set noreorder \n" \
38 " .set mips3\n\t \n" \ 39 " .set arch=r4000 \n" \
39 " cache %0, %1 \n" \ 40 " cache %0, %1 \n" \
40 " .set pop \n" \ 41 " .set pop \n" \
41 : \ 42 : \
@@ -203,7 +204,7 @@ static inline void flush_scache_line(unsigned long addr)
203 __asm__ __volatile__( \ 204 __asm__ __volatile__( \
204 " .set push \n" \ 205 " .set push \n" \
205 " .set noreorder \n" \ 206 " .set noreorder \n" \
206 " .set mips3 \n" \ 207 " .set arch=r4000 \n" \
207 "1: cache %0, (%1) \n" \ 208 "1: cache %0, (%1) \n" \
208 "2: .set pop \n" \ 209 "2: .set pop \n" \
209 " .section __ex_table,\"a\" \n" \ 210 " .section __ex_table,\"a\" \n" \
@@ -212,6 +213,20 @@ static inline void flush_scache_line(unsigned long addr)
212 : \ 213 : \
213 : "i" (op), "r" (addr)) 214 : "i" (op), "r" (addr))
214 215
216#define protected_cachee_op(op,addr) \
217 __asm__ __volatile__( \
218 " .set push \n" \
219 " .set noreorder \n" \
220 " .set mips0 \n" \
221 " .set eva \n" \
222 "1: cachee %0, (%1) \n" \
223 "2: .set pop \n" \
224 " .section __ex_table,\"a\" \n" \
225 " "STR(PTR)" 1b, 2b \n" \
226 " .previous" \
227 : \
228 : "i" (op), "r" (addr))
229
215/* 230/*
216 * The next two are for badland addresses like signal trampolines. 231 * The next two are for badland addresses like signal trampolines.
217 */ 232 */
@@ -223,7 +238,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
223 break; 238 break;
224 239
225 default: 240 default:
241#ifdef CONFIG_EVA
242 protected_cachee_op(Hit_Invalidate_I, addr);
243#else
226 protected_cache_op(Hit_Invalidate_I, addr); 244 protected_cache_op(Hit_Invalidate_I, addr);
245#endif
227 break; 246 break;
228 } 247 }
229} 248}
@@ -356,6 +375,91 @@ static inline void invalidate_tcache_page(unsigned long addr)
356 : "r" (base), \ 375 : "r" (base), \
357 "i" (op)); 376 "i" (op));
358 377
378/*
379 * Perform the cache operation specified by op using a user mode virtual
380 * address while in kernel mode.
381 */
382#define cache16_unroll32_user(base,op) \
383 __asm__ __volatile__( \
384 " .set push \n" \
385 " .set noreorder \n" \
386 " .set mips0 \n" \
387 " .set eva \n" \
388 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
389 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
390 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
391 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
392 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
393 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
394 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
395 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
396 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
397 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
398 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
399 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
400 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
401 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
402 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
403 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
404 " .set pop \n" \
405 : \
406 : "r" (base), \
407 "i" (op));
408
409#define cache32_unroll32_user(base, op) \
410 __asm__ __volatile__( \
411 " .set push \n" \
412 " .set noreorder \n" \
413 " .set mips0 \n" \
414 " .set eva \n" \
415 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
416 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
417 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
418 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
419 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
420 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
421 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
422 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
423 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
424 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
425 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
426 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
427 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
428 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
429 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
430 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
431 " .set pop \n" \
432 : \
433 : "r" (base), \
434 "i" (op));
435
436#define cache64_unroll32_user(base, op) \
437 __asm__ __volatile__( \
438 " .set push \n" \
439 " .set noreorder \n" \
440 " .set mips0 \n" \
441 " .set eva \n" \
442 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
443 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
444 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
445 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
446 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
447 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
448 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
449 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
450 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
451 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
452 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
453 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
454 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
455 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
456 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
457 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
458 " .set pop \n" \
459 : \
460 : "r" (base), \
461 "i" (op));
462
359/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ 463/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
360#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ 464#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
361static inline void extra##blast_##pfx##cache##lsize(void) \ 465static inline void extra##blast_##pfx##cache##lsize(void) \
@@ -429,6 +533,32 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32
429__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) 533__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
430__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) 534__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
431 535
536#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
537static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
538{ \
539 unsigned long start = page; \
540 unsigned long end = page + PAGE_SIZE; \
541 \
542 __##pfx##flush_prologue \
543 \
544 do { \
545 cache##lsize##_unroll32_user(start, hitop); \
546 start += lsize * 32; \
547 } while (start < end); \
548 \
549 __##pfx##flush_epilogue \
550}
551
552__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
553 16)
554__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
555__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
556 32)
557__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
558__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
559 64)
560__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
561
432/* build blast_xxx_range, protected_blast_xxx_range */ 562/* build blast_xxx_range, protected_blast_xxx_range */
433#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ 563#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
434static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ 564static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
@@ -450,12 +580,51 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
450 __##pfx##flush_epilogue \ 580 __##pfx##flush_epilogue \
451} 581}
452 582
583#ifndef CONFIG_EVA
584
453__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) 585__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
454__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
455__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) 586__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
587
588#else
589
590#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
591static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
592 unsigned long end) \
593{ \
594 unsigned long lsize = cpu_##desc##_line_size(); \
595 unsigned long addr = start & ~(lsize - 1); \
596 unsigned long aend = (end - 1) & ~(lsize - 1); \
597 \
598 __##pfx##flush_prologue \
599 \
600 if (segment_eq(get_fs(), USER_DS)) { \
601 while (1) { \
602 protected_cachee_op(hitop, addr); \
603 if (addr == aend) \
604 break; \
605 addr += lsize; \
606 } \
607 } else { \
608 while (1) { \
609 protected_cache_op(hitop, addr); \
610 if (addr == aend) \
611 break; \
612 addr += lsize; \
613 } \
614 \
615 } \
616 __##pfx##flush_epilogue \
617}
618
619__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
620__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
621
622#endif
623__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
456__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ 624__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
457 protected_, loongson2_) 625 protected_, loongson2_)
458__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) 626__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
627__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
459__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) 628__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
460/* blast_inv_dcache_range */ 629/* blast_inv_dcache_range */
461__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) 630__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index eeeb0f48c767..f54bdbe85c0d 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,6 +32,8 @@ struct sigcontext32 {
32 __u32 sc_lo2; 32 __u32 sc_lo2;
33 __u32 sc_hi3; 33 __u32 sc_hi3;
34 __u32 sc_lo3; 34 __u32 sc_lo3;
35 __u64 sc_msaregs[32]; /* Most significant 64 bits */
36 __u32 sc_msa_csr;
35}; 37};
36#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 38#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
37#endif /* _ASM_SIGCONTEXT_H */ 39#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
new file mode 100644
index 000000000000..d60d1a2180d1
--- /dev/null
+++ b/arch/mips/include/asm/smp-cps.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MIPS_ASM_SMP_CPS_H__
12#define __MIPS_ASM_SMP_CPS_H__
13
14#ifndef __ASSEMBLY__
15
16struct boot_config {
17 unsigned int core;
18 unsigned int vpe;
19 unsigned long pc;
20 unsigned long sp;
21 unsigned long gp;
22};
23
24extern struct boot_config mips_cps_bootcfg;
25
26extern void mips_cps_core_entry(void);
27
28#else /* __ASSEMBLY__ */
29
30.extern mips_cps_bootcfg;
31
32#endif /* __ASSEMBLY__ */
33#endif /* __MIPS_ASM_SMP_CPS_H__ */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index ef2a8041e78b..73d35b18fb64 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -13,6 +13,8 @@
13 13
14#include <linux/errno.h> 14#include <linux/errno.h>
15 15
16#include <asm/mips-cm.h>
17
16#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
17 19
18#include <linux/cpumask.h> 20#include <linux/cpumask.h>
@@ -43,6 +45,9 @@ static inline void plat_smp_setup(void)
43 mp_ops->smp_setup(); 45 mp_ops->smp_setup();
44} 46}
45 47
48extern void gic_send_ipi_single(int cpu, unsigned int action);
49extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action);
50
46#else /* !CONFIG_SMP */ 51#else /* !CONFIG_SMP */
47 52
48struct plat_smp_ops; 53struct plat_smp_ops;
@@ -76,6 +81,9 @@ static inline int register_cmp_smp_ops(void)
76#ifdef CONFIG_MIPS_CMP 81#ifdef CONFIG_MIPS_CMP
77 extern struct plat_smp_ops cmp_smp_ops; 82 extern struct plat_smp_ops cmp_smp_ops;
78 83
84 if (!mips_cm_present())
85 return -ENODEV;
86
79 register_smp_ops(&cmp_smp_ops); 87 register_smp_ops(&cmp_smp_ops);
80 88
81 return 0; 89 return 0;
@@ -97,4 +105,13 @@ static inline int register_vsmp_smp_ops(void)
97#endif 105#endif
98} 106}
99 107
108#ifdef CONFIG_MIPS_CPS
109extern int register_cps_smp_ops(void);
110#else
111static inline int register_cps_smp_ops(void)
112{
113 return -ENODEV;
114}
115#endif
116
100#endif /* __ASM_SMP_OPS_H */ 117#endif /* __ASM_SMP_OPS_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index eb6008758484..efa02acd3dd5 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -42,6 +42,7 @@ extern int __cpu_logical_map[NR_CPUS];
42#define SMP_ICACHE_FLUSH 0x4 42#define SMP_ICACHE_FLUSH 0x4
43/* Used by kexec crashdump to save all cpu's state */ 43/* Used by kexec crashdump to save all cpu's state */
44#define SMP_DUMP 0x8 44#define SMP_DUMP 0x8
45#define SMP_ASK_C0COUNT 0x10
45 46
46extern volatile cpumask_t cpu_callin_map; 47extern volatile cpumask_t cpu_callin_map;
47 48
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 4857e2c8df5a..d301e108d5b8 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -435,7 +435,7 @@
435 435
436 .macro RESTORE_SP_AND_RET 436 .macro RESTORE_SP_AND_RET
437 LONG_L sp, PT_R29(sp) 437 LONG_L sp, PT_R29(sp)
438 .set mips3 438 .set arch=r4000
439 eret 439 eret
440 .set mips0 440 .set mips0
441 .endm 441 .endm
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 278d45a09728..495c1041a2cc 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -16,22 +16,29 @@
16#include <asm/watch.h> 16#include <asm/watch.h>
17#include <asm/dsp.h> 17#include <asm/dsp.h>
18#include <asm/cop2.h> 18#include <asm/cop2.h>
19#include <asm/msa.h>
19 20
20struct task_struct; 21struct task_struct;
21 22
23enum {
24 FP_SAVE_NONE = 0,
25 FP_SAVE_VECTOR = -1,
26 FP_SAVE_SCALAR = 1,
27};
28
22/** 29/**
23 * resume - resume execution of a task 30 * resume - resume execution of a task
24 * @prev: The task previously executed. 31 * @prev: The task previously executed.
25 * @next: The task to begin executing. 32 * @next: The task to begin executing.
26 * @next_ti: task_thread_info(next). 33 * @next_ti: task_thread_info(next).
27 * @usedfpu: Non-zero if prev's FP context should be saved. 34 * @fp_save: Which, if any, FP context to save for prev.
28 * 35 *
29 * This function is used whilst scheduling to save the context of prev & load 36 * This function is used whilst scheduling to save the context of prev & load
30 * the context of next. Returns prev. 37 * the context of next. Returns prev.
31 */ 38 */
32extern asmlinkage struct task_struct *resume(struct task_struct *prev, 39extern asmlinkage struct task_struct *resume(struct task_struct *prev,
33 struct task_struct *next, struct thread_info *next_ti, 40 struct task_struct *next, struct thread_info *next_ti,
34 u32 usedfpu); 41 s32 fp_save);
35 42
36extern unsigned int ll_bit; 43extern unsigned int ll_bit;
37extern struct task_struct *ll_task; 44extern struct task_struct *ll_task;
@@ -75,7 +82,8 @@ do { \
75 82
76#define switch_to(prev, next, last) \ 83#define switch_to(prev, next, last) \
77do { \ 84do { \
78 u32 __usedfpu, __c0_stat; \ 85 u32 __c0_stat; \
86 s32 __fpsave = FP_SAVE_NONE; \
79 __mips_mt_fpaff_switch_to(prev); \ 87 __mips_mt_fpaff_switch_to(prev); \
80 if (cpu_has_dsp) \ 88 if (cpu_has_dsp) \
81 __save_dsp(prev); \ 89 __save_dsp(prev); \
@@ -88,8 +96,12 @@ do { \
88 write_c0_status(__c0_stat & ~ST0_CU2); \ 96 write_c0_status(__c0_stat & ~ST0_CU2); \
89 } \ 97 } \
90 __clear_software_ll_bit(); \ 98 __clear_software_ll_bit(); \
91 __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \ 99 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
92 (last) = resume(prev, next, task_thread_info(next), __usedfpu); \ 100 __fpsave = FP_SAVE_SCALAR; \
101 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
102 __fpsave = FP_SAVE_VECTOR; \
103 (last) = resume(prev, next, task_thread_info(next), __fpsave); \
104 disable_msa(); \
93} while (0) 105} while (0)
94 106
95#define finish_arch_switch(prev) \ 107#define finish_arch_switch(prev) \
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index f35b131977e6..6c488c85d791 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -20,11 +20,22 @@
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <asm/ptrace.h> 22#include <asm/ptrace.h>
23#include <asm/unistd.h>
24
25#ifndef __NR_syscall /* Only defined if _MIPS_SIM == _MIPS_SIM_ABI32 */
26#define __NR_syscall 4000
27#endif
23 28
24static inline long syscall_get_nr(struct task_struct *task, 29static inline long syscall_get_nr(struct task_struct *task,
25 struct pt_regs *regs) 30 struct pt_regs *regs)
26{ 31{
27 return regs->regs[2]; 32 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
33 if ((config_enabled(CONFIG_32BIT) ||
34 test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
35 (regs->regs[2] == __NR_syscall))
36 return regs->regs[4];
37 else
38 return regs->regs[2];
28} 39}
29 40
30static inline unsigned long mips_get_syscall_arg(unsigned long *arg, 41static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
@@ -68,6 +79,12 @@ static inline long syscall_get_return_value(struct task_struct *task,
68 return regs->regs[2]; 79 return regs->regs[2];
69} 80}
70 81
82static inline void syscall_rollback(struct task_struct *task,
83 struct pt_regs *regs)
84{
85 /* Do nothing */
86}
87
71static inline void syscall_set_return_value(struct task_struct *task, 88static inline void syscall_set_return_value(struct task_struct *task,
72 struct pt_regs *regs, 89 struct pt_regs *regs,
73 int error, long val) 90 int error, long val)
@@ -87,6 +104,13 @@ static inline void syscall_get_arguments(struct task_struct *task,
87 unsigned long *args) 104 unsigned long *args)
88{ 105{
89 int ret; 106 int ret;
107 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
108 if ((config_enabled(CONFIG_32BIT) ||
109 test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
110 (regs->regs[2] == __NR_syscall)) {
111 i++;
112 n++;
113 }
90 114
91 while (n--) 115 while (n--)
92 ret |= mips_get_syscall_arg(args++, task, regs, i++); 116 ret |= mips_get_syscall_arg(args++, task, regs, i++);
@@ -103,11 +127,13 @@ extern const unsigned long sys_call_table[];
103extern const unsigned long sys32_call_table[]; 127extern const unsigned long sys32_call_table[];
104extern const unsigned long sysn32_call_table[]; 128extern const unsigned long sysn32_call_table[];
105 129
106static inline int __syscall_get_arch(void) 130static inline int syscall_get_arch(struct task_struct *task,
131 struct pt_regs *regs)
107{ 132{
108 int arch = EM_MIPS; 133 int arch = EM_MIPS;
109#ifdef CONFIG_64BIT 134#ifdef CONFIG_64BIT
110 arch |= __AUDIT_ARCH_64BIT; 135 if (!test_tsk_thread_flag(task, TIF_32BIT_REGS))
136 arch |= __AUDIT_ARCH_64BIT;
111#endif 137#endif
112#if defined(__LITTLE_ENDIAN) 138#if defined(__LITTLE_ENDIAN)
113 arch |= __AUDIT_ARCH_LE; 139 arch |= __AUDIT_ARCH_LE;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 24846f9053fe..d2d961d6cb86 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
116#define TIF_LOAD_WATCH 25 /* If set, load watch registers */ 116#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
117#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */ 117#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
118#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */ 118#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
119#define TIF_USEDMSA 29 /* MSA has been used this quantum */
120#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */
119#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ 121#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
120 122
121#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 123#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -133,10 +135,13 @@ static inline struct thread_info *current_thread_info(void)
133#define _TIF_FPUBOUND (1<<TIF_FPUBOUND) 135#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
134#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 136#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
135#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS) 137#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
138#define _TIF_USEDMSA (1<<TIF_USEDMSA)
139#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
136#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 140#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
137 141
138#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ 142#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
139 _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) 143 _TIF_SYSCALL_AUDIT | \
144 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
140 145
141/* work to do in syscall_trace_leave() */ 146/* work to do in syscall_trace_leave() */
142#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ 147#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index f3fa3750f577..a10951090234 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -6,6 +6,7 @@
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle 6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki 8 * Copyright (C) 2007 Maciej W. Rozycki
9 * Copyright (C) 2014, Imagination Technologies Ltd.
9 */ 10 */
10#ifndef _ASM_UACCESS_H 11#ifndef _ASM_UACCESS_H
11#define _ASM_UACCESS_H 12#define _ASM_UACCESS_H
@@ -13,6 +14,7 @@
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/errno.h> 15#include <linux/errno.h>
15#include <linux/thread_info.h> 16#include <linux/thread_info.h>
17#include <asm/asm-eva.h>
16 18
17/* 19/*
18 * The fs value determines whether argument validity checking should be 20 * The fs value determines whether argument validity checking should be
@@ -222,11 +224,44 @@ struct __large_struct { unsigned long buf[100]; };
222 * Yuck. We need two variants, one for 64bit operation and one 224 * Yuck. We need two variants, one for 64bit operation and one
223 * for 32 bit mode and old iron. 225 * for 32 bit mode and old iron.
224 */ 226 */
227#ifndef CONFIG_EVA
228#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
229#else
230/*
231 * Kernel specific functions for EVA. We need to use normal load instructions
232 * to read data from kernel when operating in EVA mode. We use these macros to
233 * avoid redefining __get_user_asm for EVA.
234 */
235#undef _loadd
236#undef _loadw
237#undef _loadh
238#undef _loadb
225#ifdef CONFIG_32BIT 239#ifdef CONFIG_32BIT
226#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr) 240#define _loadd _loadw
241#else
242#define _loadd(reg, addr) "ld " reg ", " addr
243#endif
244#define _loadw(reg, addr) "lw " reg ", " addr
245#define _loadh(reg, addr) "lh " reg ", " addr
246#define _loadb(reg, addr) "lb " reg ", " addr
247
248#define __get_kernel_common(val, size, ptr) \
249do { \
250 switch (size) { \
251 case 1: __get_data_asm(val, _loadb, ptr); break; \
252 case 2: __get_data_asm(val, _loadh, ptr); break; \
253 case 4: __get_data_asm(val, _loadw, ptr); break; \
254 case 8: __GET_DW(val, _loadd, ptr); break; \
255 default: __get_user_unknown(); break; \
256 } \
257} while (0)
258#endif
259
260#ifdef CONFIG_32BIT
261#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
227#endif 262#endif
228#ifdef CONFIG_64BIT 263#ifdef CONFIG_64BIT
229#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr) 264#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
230#endif 265#endif
231 266
232extern void __get_user_unknown(void); 267extern void __get_user_unknown(void);
@@ -234,10 +269,10 @@ extern void __get_user_unknown(void);
234#define __get_user_common(val, size, ptr) \ 269#define __get_user_common(val, size, ptr) \
235do { \ 270do { \
236 switch (size) { \ 271 switch (size) { \
237 case 1: __get_user_asm(val, "lb", ptr); break; \ 272 case 1: __get_data_asm(val, user_lb, ptr); break; \
238 case 2: __get_user_asm(val, "lh", ptr); break; \ 273 case 2: __get_data_asm(val, user_lh, ptr); break; \
239 case 4: __get_user_asm(val, "lw", ptr); break; \ 274 case 4: __get_data_asm(val, user_lw, ptr); break; \
240 case 8: __GET_USER_DW(val, ptr); break; \ 275 case 8: __GET_DW(val, user_ld, ptr); break; \
241 default: __get_user_unknown(); break; \ 276 default: __get_user_unknown(); break; \
242 } \ 277 } \
243} while (0) 278} while (0)
@@ -246,8 +281,12 @@ do { \
246({ \ 281({ \
247 int __gu_err; \ 282 int __gu_err; \
248 \ 283 \
249 __chk_user_ptr(ptr); \ 284 if (segment_eq(get_fs(), get_ds())) { \
250 __get_user_common((x), size, ptr); \ 285 __get_kernel_common((x), size, ptr); \
286 } else { \
287 __chk_user_ptr(ptr); \
288 __get_user_common((x), size, ptr); \
289 } \
251 __gu_err; \ 290 __gu_err; \
252}) 291})
253 292
@@ -257,18 +296,22 @@ do { \
257 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 296 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
258 \ 297 \
259 might_fault(); \ 298 might_fault(); \
260 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 299 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
261 __get_user_common((x), size, __gu_ptr); \ 300 if (segment_eq(get_fs(), get_ds())) \
301 __get_kernel_common((x), size, __gu_ptr); \
302 else \
303 __get_user_common((x), size, __gu_ptr); \
304 } \
262 \ 305 \
263 __gu_err; \ 306 __gu_err; \
264}) 307})
265 308
266#define __get_user_asm(val, insn, addr) \ 309#define __get_data_asm(val, insn, addr) \
267{ \ 310{ \
268 long __gu_tmp; \ 311 long __gu_tmp; \
269 \ 312 \
270 __asm__ __volatile__( \ 313 __asm__ __volatile__( \
271 "1: " insn " %1, %3 \n" \ 314 "1: "insn("%1", "%3")" \n" \
272 "2: \n" \ 315 "2: \n" \
273 " .insn \n" \ 316 " .insn \n" \
274 " .section .fixup,\"ax\" \n" \ 317 " .section .fixup,\"ax\" \n" \
@@ -287,7 +330,7 @@ do { \
287/* 330/*
288 * Get a long long 64 using 32 bit registers. 331 * Get a long long 64 using 32 bit registers.
289 */ 332 */
290#define __get_user_asm_ll32(val, addr) \ 333#define __get_data_asm_ll32(val, insn, addr) \
291{ \ 334{ \
292 union { \ 335 union { \
293 unsigned long long l; \ 336 unsigned long long l; \
@@ -295,8 +338,8 @@ do { \
295 } __gu_tmp; \ 338 } __gu_tmp; \
296 \ 339 \
297 __asm__ __volatile__( \ 340 __asm__ __volatile__( \
298 "1: lw %1, (%3) \n" \ 341 "1: " insn("%1", "(%3)")" \n" \
299 "2: lw %D1, 4(%3) \n" \ 342 "2: " insn("%D1", "4(%3)")" \n" \
300 "3: \n" \ 343 "3: \n" \
301 " .insn \n" \ 344 " .insn \n" \
302 " .section .fixup,\"ax\" \n" \ 345 " .section .fixup,\"ax\" \n" \
@@ -315,30 +358,73 @@ do { \
315 (val) = __gu_tmp.t; \ 358 (val) = __gu_tmp.t; \
316} 359}
317 360
361#ifndef CONFIG_EVA
362#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
363#else
364/*
365 * Kernel specific functions for EVA. We need to use normal load instructions
366 * to read data from kernel when operating in EVA mode. We use these macros to
367 * avoid redefining __get_data_asm for EVA.
368 */
369#undef _stored
370#undef _storew
371#undef _storeh
372#undef _storeb
373#ifdef CONFIG_32BIT
374#define _stored _storew
375#else
376#define _stored(reg, addr) "ld " reg ", " addr
377#endif
378
379#define _storew(reg, addr) "sw " reg ", " addr
380#define _storeh(reg, addr) "sh " reg ", " addr
381#define _storeb(reg, addr) "sb " reg ", " addr
382
383#define __put_kernel_common(ptr, size) \
384do { \
385 switch (size) { \
386 case 1: __put_data_asm(_storeb, ptr); break; \
387 case 2: __put_data_asm(_storeh, ptr); break; \
388 case 4: __put_data_asm(_storew, ptr); break; \
389 case 8: __PUT_DW(_stored, ptr); break; \
390 default: __put_user_unknown(); break; \
391 } \
392} while(0)
393#endif
394
318/* 395/*
319 * Yuck. We need two variants, one for 64bit operation and one 396 * Yuck. We need two variants, one for 64bit operation and one
320 * for 32 bit mode and old iron. 397 * for 32 bit mode and old iron.
321 */ 398 */
322#ifdef CONFIG_32BIT 399#ifdef CONFIG_32BIT
323#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) 400#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
324#endif 401#endif
325#ifdef CONFIG_64BIT 402#ifdef CONFIG_64BIT
326#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr) 403#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
327#endif 404#endif
328 405
406#define __put_user_common(ptr, size) \
407do { \
408 switch (size) { \
409 case 1: __put_data_asm(user_sb, ptr); break; \
410 case 2: __put_data_asm(user_sh, ptr); break; \
411 case 4: __put_data_asm(user_sw, ptr); break; \
412 case 8: __PUT_DW(user_sd, ptr); break; \
413 default: __put_user_unknown(); break; \
414 } \
415} while (0)
416
329#define __put_user_nocheck(x, ptr, size) \ 417#define __put_user_nocheck(x, ptr, size) \
330({ \ 418({ \
331 __typeof__(*(ptr)) __pu_val; \ 419 __typeof__(*(ptr)) __pu_val; \
332 int __pu_err = 0; \ 420 int __pu_err = 0; \
333 \ 421 \
334 __chk_user_ptr(ptr); \
335 __pu_val = (x); \ 422 __pu_val = (x); \
336 switch (size) { \ 423 if (segment_eq(get_fs(), get_ds())) { \
337 case 1: __put_user_asm("sb", ptr); break; \ 424 __put_kernel_common(ptr, size); \
338 case 2: __put_user_asm("sh", ptr); break; \ 425 } else { \
339 case 4: __put_user_asm("sw", ptr); break; \ 426 __chk_user_ptr(ptr); \
340 case 8: __PUT_USER_DW(ptr); break; \ 427 __put_user_common(ptr, size); \
341 default: __put_user_unknown(); break; \
342 } \ 428 } \
343 __pu_err; \ 429 __pu_err; \
344}) 430})
@@ -351,21 +437,19 @@ do { \
351 \ 437 \
352 might_fault(); \ 438 might_fault(); \
353 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 439 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
354 switch (size) { \ 440 if (segment_eq(get_fs(), get_ds())) \
355 case 1: __put_user_asm("sb", __pu_addr); break; \ 441 __put_kernel_common(__pu_addr, size); \
356 case 2: __put_user_asm("sh", __pu_addr); break; \ 442 else \
357 case 4: __put_user_asm("sw", __pu_addr); break; \ 443 __put_user_common(__pu_addr, size); \
358 case 8: __PUT_USER_DW(__pu_addr); break; \
359 default: __put_user_unknown(); break; \
360 } \
361 } \ 444 } \
445 \
362 __pu_err; \ 446 __pu_err; \
363}) 447})
364 448
365#define __put_user_asm(insn, ptr) \ 449#define __put_data_asm(insn, ptr) \
366{ \ 450{ \
367 __asm__ __volatile__( \ 451 __asm__ __volatile__( \
368 "1: " insn " %z2, %3 # __put_user_asm\n" \ 452 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
369 "2: \n" \ 453 "2: \n" \
370 " .insn \n" \ 454 " .insn \n" \
371 " .section .fixup,\"ax\" \n" \ 455 " .section .fixup,\"ax\" \n" \
@@ -380,11 +464,11 @@ do { \
380 "i" (-EFAULT)); \ 464 "i" (-EFAULT)); \
381} 465}
382 466
383#define __put_user_asm_ll32(ptr) \ 467#define __put_data_asm_ll32(insn, ptr) \
384{ \ 468{ \
385 __asm__ __volatile__( \ 469 __asm__ __volatile__( \
386 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ 470 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
387 "2: sw %D2, 4(%3) \n" \ 471 "2: "insn("%D2", "4(%3)")" \n" \
388 "3: \n" \ 472 "3: \n" \
389 " .insn \n" \ 473 " .insn \n" \
390 " .section .fixup,\"ax\" \n" \ 474 " .section .fixup,\"ax\" \n" \
@@ -403,6 +487,11 @@ do { \
403extern void __put_user_unknown(void); 487extern void __put_user_unknown(void);
404 488
405/* 489/*
490 * ul{b,h,w} are macros and there are no equivalent macros for EVA.
491 * EVA unaligned access is handled in the ADE exception handler.
492 */
493#ifndef CONFIG_EVA
494/*
406 * put_user_unaligned: - Write a simple value into user space. 495 * put_user_unaligned: - Write a simple value into user space.
407 * @x: Value to copy to user space. 496 * @x: Value to copy to user space.
408 * @ptr: Destination address, in user space. 497 * @ptr: Destination address, in user space.
@@ -504,7 +593,7 @@ extern void __get_user_unaligned_unknown(void);
504#define __get_user_unaligned_common(val, size, ptr) \ 593#define __get_user_unaligned_common(val, size, ptr) \
505do { \ 594do { \
506 switch (size) { \ 595 switch (size) { \
507 case 1: __get_user_asm(val, "lb", ptr); break; \ 596 case 1: __get_data_asm(val, "lb", ptr); break; \
508 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ 597 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
509 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ 598 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
510 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ 599 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
@@ -531,7 +620,7 @@ do { \
531 __gu_err; \ 620 __gu_err; \
532}) 621})
533 622
534#define __get_user_unaligned_asm(val, insn, addr) \ 623#define __get_data_unaligned_asm(val, insn, addr) \
535{ \ 624{ \
536 long __gu_tmp; \ 625 long __gu_tmp; \
537 \ 626 \
@@ -594,19 +683,23 @@ do { \
594#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) 683#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
595#endif 684#endif
596 685
686#define __put_user_unaligned_common(ptr, size) \
687do { \
688 switch (size) { \
689 case 1: __put_data_asm("sb", ptr); break; \
690 case 2: __put_user_unaligned_asm("ush", ptr); break; \
691 case 4: __put_user_unaligned_asm("usw", ptr); break; \
692 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
693 default: __put_user_unaligned_unknown(); break; \
694} while (0)
695
597#define __put_user_unaligned_nocheck(x,ptr,size) \ 696#define __put_user_unaligned_nocheck(x,ptr,size) \
598({ \ 697({ \
599 __typeof__(*(ptr)) __pu_val; \ 698 __typeof__(*(ptr)) __pu_val; \
600 int __pu_err = 0; \ 699 int __pu_err = 0; \
601 \ 700 \
602 __pu_val = (x); \ 701 __pu_val = (x); \
603 switch (size) { \ 702 __put_user_unaligned_common(ptr, size); \
604 case 1: __put_user_asm("sb", ptr); break; \
605 case 2: __put_user_unaligned_asm("ush", ptr); break; \
606 case 4: __put_user_unaligned_asm("usw", ptr); break; \
607 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
608 default: __put_user_unaligned_unknown(); break; \
609 } \
610 __pu_err; \ 703 __pu_err; \
611}) 704})
612 705
@@ -616,15 +709,9 @@ do { \
616 __typeof__(*(ptr)) __pu_val = (x); \ 709 __typeof__(*(ptr)) __pu_val = (x); \
617 int __pu_err = -EFAULT; \ 710 int __pu_err = -EFAULT; \
618 \ 711 \
619 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 712 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
620 switch (size) { \ 713 __put_user_unaligned_common(__pu_addr, size); \
621 case 1: __put_user_asm("sb", __pu_addr); break; \ 714 \
622 case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
623 case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
624 case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
625 default: __put_user_unaligned_unknown(); break; \
626 } \
627 } \
628 __pu_err; \ 715 __pu_err; \
629}) 716})
630 717
@@ -669,6 +756,7 @@ do { \
669} 756}
670 757
671extern void __put_user_unaligned_unknown(void); 758extern void __put_user_unaligned_unknown(void);
759#endif
672 760
673/* 761/*
674 * We're generating jump to subroutines which will be outside the range of 762 * We're generating jump to subroutines which will be outside the range of
@@ -693,6 +781,7 @@ extern void __put_user_unaligned_unknown(void);
693 781
694extern size_t __copy_user(void *__to, const void *__from, size_t __n); 782extern size_t __copy_user(void *__to, const void *__from, size_t __n);
695 783
784#ifndef CONFIG_EVA
696#define __invoke_copy_to_user(to, from, n) \ 785#define __invoke_copy_to_user(to, from, n) \
697({ \ 786({ \
698 register void __user *__cu_to_r __asm__("$4"); \ 787 register void __user *__cu_to_r __asm__("$4"); \
@@ -711,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
711 __cu_len_r; \ 800 __cu_len_r; \
712}) 801})
713 802
803#define __invoke_copy_to_kernel(to, from, n) \
804 __invoke_copy_to_user(to, from, n)
805
806#endif
807
714/* 808/*
715 * __copy_to_user: - Copy a block of data into user space, with less checking. 809 * __copy_to_user: - Copy a block of data into user space, with less checking.
716 * @to: Destination address, in user space. 810 * @to: Destination address, in user space.
@@ -735,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
735 __cu_from = (from); \ 829 __cu_from = (from); \
736 __cu_len = (n); \ 830 __cu_len = (n); \
737 might_fault(); \ 831 might_fault(); \
738 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 832 if (segment_eq(get_fs(), get_ds())) \
833 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
834 __cu_len); \
835 else \
836 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
837 __cu_len); \
739 __cu_len; \ 838 __cu_len; \
740}) 839})
741 840
@@ -750,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
750 __cu_to = (to); \ 849 __cu_to = (to); \
751 __cu_from = (from); \ 850 __cu_from = (from); \
752 __cu_len = (n); \ 851 __cu_len = (n); \
753 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 852 if (segment_eq(get_fs(), get_ds())) \
853 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
854 __cu_len); \
855 else \
856 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
857 __cu_len); \
754 __cu_len; \ 858 __cu_len; \
755}) 859})
756 860
@@ -763,8 +867,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
763 __cu_to = (to); \ 867 __cu_to = (to); \
764 __cu_from = (from); \ 868 __cu_from = (from); \
765 __cu_len = (n); \ 869 __cu_len = (n); \
766 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ 870 if (segment_eq(get_fs(), get_ds())) \
767 __cu_len); \ 871 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
872 __cu_from,\
873 __cu_len);\
874 else \
875 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
876 __cu_from, \
877 __cu_len); \
768 __cu_len; \ 878 __cu_len; \
769}) 879})
770 880
@@ -790,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
790 __cu_to = (to); \ 900 __cu_to = (to); \
791 __cu_from = (from); \ 901 __cu_from = (from); \
792 __cu_len = (n); \ 902 __cu_len = (n); \
793 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ 903 if (segment_eq(get_fs(), get_ds())) { \
794 might_fault(); \ 904 __cu_len = __invoke_copy_to_kernel(__cu_to, \
795 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 905 __cu_from, \
796 __cu_len); \ 906 __cu_len); \
907 } else { \
908 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
909 might_fault(); \
910 __cu_len = __invoke_copy_to_user(__cu_to, \
911 __cu_from, \
912 __cu_len); \
913 } \
797 } \ 914 } \
798 __cu_len; \ 915 __cu_len; \
799}) 916})
800 917
918#ifndef CONFIG_EVA
919
801#define __invoke_copy_from_user(to, from, n) \ 920#define __invoke_copy_from_user(to, from, n) \
802({ \ 921({ \
803 register void *__cu_to_r __asm__("$4"); \ 922 register void *__cu_to_r __asm__("$4"); \
@@ -821,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
821 __cu_len_r; \ 940 __cu_len_r; \
822}) 941})
823 942
943#define __invoke_copy_from_kernel(to, from, n) \
944 __invoke_copy_from_user(to, from, n)
945
946/* For userland <-> userland operations */
947#define ___invoke_copy_in_user(to, from, n) \
948 __invoke_copy_from_user(to, from, n)
949
950/* For kernel <-> kernel operations */
951#define ___invoke_copy_in_kernel(to, from, n) \
952 __invoke_copy_from_user(to, from, n)
953
824#define __invoke_copy_from_user_inatomic(to, from, n) \ 954#define __invoke_copy_from_user_inatomic(to, from, n) \
825({ \ 955({ \
826 register void *__cu_to_r __asm__("$4"); \ 956 register void *__cu_to_r __asm__("$4"); \
@@ -844,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
844 __cu_len_r; \ 974 __cu_len_r; \
845}) 975})
846 976
977#define __invoke_copy_from_kernel_inatomic(to, from, n) \
978 __invoke_copy_from_user_inatomic(to, from, n) \
979
980#else
981
982/* EVA specific functions */
983
984extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
985 size_t __n);
986extern size_t __copy_from_user_eva(void *__to, const void *__from,
987 size_t __n);
988extern size_t __copy_to_user_eva(void *__to, const void *__from,
989 size_t __n);
990extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
991
992#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
993({ \
994 register void *__cu_to_r __asm__("$4"); \
995 register const void __user *__cu_from_r __asm__("$5"); \
996 register long __cu_len_r __asm__("$6"); \
997 \
998 __cu_to_r = (to); \
999 __cu_from_r = (from); \
1000 __cu_len_r = (n); \
1001 __asm__ __volatile__( \
1002 ".set\tnoreorder\n\t" \
1003 __MODULE_JAL(func_ptr) \
1004 ".set\tnoat\n\t" \
1005 __UA_ADDU "\t$1, %1, %2\n\t" \
1006 ".set\tat\n\t" \
1007 ".set\treorder" \
1008 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1009 : \
1010 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1011 DADDI_SCRATCH, "memory"); \
1012 __cu_len_r; \
1013})
1014
1015#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1016({ \
1017 register void *__cu_to_r __asm__("$4"); \
1018 register const void __user *__cu_from_r __asm__("$5"); \
1019 register long __cu_len_r __asm__("$6"); \
1020 \
1021 __cu_to_r = (to); \
1022 __cu_from_r = (from); \
1023 __cu_len_r = (n); \
1024 __asm__ __volatile__( \
1025 __MODULE_JAL(func_ptr) \
1026 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1027 : \
1028 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1029 DADDI_SCRATCH, "memory"); \
1030 __cu_len_r; \
1031})
1032
1033/*
1034 * Source or destination address is in userland. We need to go through
1035 * the TLB
1036 */
1037#define __invoke_copy_from_user(to, from, n) \
1038 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1039
1040#define __invoke_copy_from_user_inatomic(to, from, n) \
1041 __invoke_copy_from_user_eva_generic(to, from, n, \
1042 __copy_user_inatomic_eva)
1043
1044#define __invoke_copy_to_user(to, from, n) \
1045 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1046
1047#define ___invoke_copy_in_user(to, from, n) \
1048 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1049
1050/*
1051 * Source or destination address in the kernel. We are not going through
1052 * the TLB
1053 */
1054#define __invoke_copy_from_kernel(to, from, n) \
1055 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1056
1057#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1058 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1059
1060#define __invoke_copy_to_kernel(to, from, n) \
1061 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1062
1063#define ___invoke_copy_in_kernel(to, from, n) \
1064 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1065
1066#endif /* CONFIG_EVA */
1067
847/* 1068/*
848 * __copy_from_user: - Copy a block of data from user space, with less checking. 1069 * __copy_from_user: - Copy a block of data from user space, with less checking.
849 * @to: Destination address, in kernel space. 1070 * @to: Destination address, in kernel space.
@@ -901,10 +1122,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
901 __cu_to = (to); \ 1122 __cu_to = (to); \
902 __cu_from = (from); \ 1123 __cu_from = (from); \
903 __cu_len = (n); \ 1124 __cu_len = (n); \
904 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ 1125 if (segment_eq(get_fs(), get_ds())) { \
905 might_fault(); \ 1126 __cu_len = __invoke_copy_from_kernel(__cu_to, \
906 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1127 __cu_from, \
907 __cu_len); \ 1128 __cu_len); \
1129 } else { \
1130 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1131 might_fault(); \
1132 __cu_len = __invoke_copy_from_user(__cu_to, \
1133 __cu_from, \
1134 __cu_len); \
1135 } \
908 } \ 1136 } \
909 __cu_len; \ 1137 __cu_len; \
910}) 1138})
@@ -918,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
918 __cu_to = (to); \ 1146 __cu_to = (to); \
919 __cu_from = (from); \ 1147 __cu_from = (from); \
920 __cu_len = (n); \ 1148 __cu_len = (n); \
921 might_fault(); \ 1149 if (segment_eq(get_fs(), get_ds())) { \
922 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1150 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
923 __cu_len); \ 1151 __cu_len); \
1152 } else { \
1153 might_fault(); \
1154 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1155 __cu_len); \
1156 } \
924 __cu_len; \ 1157 __cu_len; \
925}) 1158})
926 1159
@@ -933,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
933 __cu_to = (to); \ 1166 __cu_to = (to); \
934 __cu_from = (from); \ 1167 __cu_from = (from); \
935 __cu_len = (n); \ 1168 __cu_len = (n); \
936 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ 1169 if (segment_eq(get_fs(), get_ds())) { \
937 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ 1170 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
938 might_fault(); \ 1171 __cu_len); \
939 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1172 } else { \
940 __cu_len); \ 1173 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1174 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1175 might_fault(); \
1176 __cu_len = ___invoke_copy_in_user(__cu_to, \
1177 __cu_from, \
1178 __cu_len); \
1179 } \
941 } \ 1180 } \
942 __cu_len; \ 1181 __cu_len; \
943}) 1182})
@@ -1007,16 +1246,28 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
1007{ 1246{
1008 long res; 1247 long res;
1009 1248
1010 might_fault(); 1249 if (segment_eq(get_fs(), get_ds())) {
1011 __asm__ __volatile__( 1250 __asm__ __volatile__(
1012 "move\t$4, %1\n\t" 1251 "move\t$4, %1\n\t"
1013 "move\t$5, %2\n\t" 1252 "move\t$5, %2\n\t"
1014 "move\t$6, %3\n\t" 1253 "move\t$6, %3\n\t"
1015 __MODULE_JAL(__strncpy_from_user_nocheck_asm) 1254 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1016 "move\t%0, $2" 1255 "move\t%0, $2"
1017 : "=r" (res) 1256 : "=r" (res)
1018 : "r" (__to), "r" (__from), "r" (__len) 1257 : "r" (__to), "r" (__from), "r" (__len)
1019 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1258 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1259 } else {
1260 might_fault();
1261 __asm__ __volatile__(
1262 "move\t$4, %1\n\t"
1263 "move\t$5, %2\n\t"
1264 "move\t$6, %3\n\t"
1265 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1266 "move\t%0, $2"
1267 : "=r" (res)
1268 : "r" (__to), "r" (__from), "r" (__len)
1269 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1270 }
1020 1271
1021 return res; 1272 return res;
1022} 1273}
@@ -1044,16 +1295,28 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
1044{ 1295{
1045 long res; 1296 long res;
1046 1297
1047 might_fault(); 1298 if (segment_eq(get_fs(), get_ds())) {
1048 __asm__ __volatile__( 1299 __asm__ __volatile__(
1049 "move\t$4, %1\n\t" 1300 "move\t$4, %1\n\t"
1050 "move\t$5, %2\n\t" 1301 "move\t$5, %2\n\t"
1051 "move\t$6, %3\n\t" 1302 "move\t$6, %3\n\t"
1052 __MODULE_JAL(__strncpy_from_user_asm) 1303 __MODULE_JAL(__strncpy_from_kernel_asm)
1053 "move\t%0, $2" 1304 "move\t%0, $2"
1054 : "=r" (res) 1305 : "=r" (res)
1055 : "r" (__to), "r" (__from), "r" (__len) 1306 : "r" (__to), "r" (__from), "r" (__len)
1056 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1307 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1308 } else {
1309 might_fault();
1310 __asm__ __volatile__(
1311 "move\t$4, %1\n\t"
1312 "move\t$5, %2\n\t"
1313 "move\t$6, %3\n\t"
1314 __MODULE_JAL(__strncpy_from_user_asm)
1315 "move\t%0, $2"
1316 : "=r" (res)
1317 : "r" (__to), "r" (__from), "r" (__len)
1318 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1319 }
1057 1320
1058 return res; 1321 return res;
1059} 1322}
@@ -1063,14 +1326,24 @@ static inline long __strlen_user(const char __user *s)
1063{ 1326{
1064 long res; 1327 long res;
1065 1328
1066 might_fault(); 1329 if (segment_eq(get_fs(), get_ds())) {
1067 __asm__ __volatile__( 1330 __asm__ __volatile__(
1068 "move\t$4, %1\n\t" 1331 "move\t$4, %1\n\t"
1069 __MODULE_JAL(__strlen_user_nocheck_asm) 1332 __MODULE_JAL(__strlen_kernel_nocheck_asm)
1070 "move\t%0, $2" 1333 "move\t%0, $2"
1071 : "=r" (res) 1334 : "=r" (res)
1072 : "r" (s) 1335 : "r" (s)
1073 : "$2", "$4", __UA_t0, "$31"); 1336 : "$2", "$4", __UA_t0, "$31");
1337 } else {
1338 might_fault();
1339 __asm__ __volatile__(
1340 "move\t$4, %1\n\t"
1341 __MODULE_JAL(__strlen_user_nocheck_asm)
1342 "move\t%0, $2"
1343 : "=r" (res)
1344 : "r" (s)
1345 : "$2", "$4", __UA_t0, "$31");
1346 }
1074 1347
1075 return res; 1348 return res;
1076} 1349}
@@ -1093,14 +1366,24 @@ static inline long strlen_user(const char __user *s)
1093{ 1366{
1094 long res; 1367 long res;
1095 1368
1096 might_fault(); 1369 if (segment_eq(get_fs(), get_ds())) {
1097 __asm__ __volatile__( 1370 __asm__ __volatile__(
1098 "move\t$4, %1\n\t" 1371 "move\t$4, %1\n\t"
1099 __MODULE_JAL(__strlen_user_asm) 1372 __MODULE_JAL(__strlen_kernel_asm)
1100 "move\t%0, $2" 1373 "move\t%0, $2"
1101 : "=r" (res) 1374 : "=r" (res)
1102 : "r" (s) 1375 : "r" (s)
1103 : "$2", "$4", __UA_t0, "$31"); 1376 : "$2", "$4", __UA_t0, "$31");
1377 } else {
1378 might_fault();
1379 __asm__ __volatile__(
1380 "move\t$4, %1\n\t"
1381 __MODULE_JAL(__strlen_kernel_asm)
1382 "move\t%0, $2"
1383 : "=r" (res)
1384 : "r" (s)
1385 : "$2", "$4", __UA_t0, "$31");
1386 }
1104 1387
1105 return res; 1388 return res;
1106} 1389}
@@ -1110,15 +1393,26 @@ static inline long __strnlen_user(const char __user *s, long n)
1110{ 1393{
1111 long res; 1394 long res;
1112 1395
1113 might_fault(); 1396 if (segment_eq(get_fs(), get_ds())) {
1114 __asm__ __volatile__( 1397 __asm__ __volatile__(
1115 "move\t$4, %1\n\t" 1398 "move\t$4, %1\n\t"
1116 "move\t$5, %2\n\t" 1399 "move\t$5, %2\n\t"
1117 __MODULE_JAL(__strnlen_user_nocheck_asm) 1400 __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1118 "move\t%0, $2" 1401 "move\t%0, $2"
1119 : "=r" (res) 1402 : "=r" (res)
1120 : "r" (s), "r" (n) 1403 : "r" (s), "r" (n)
1121 : "$2", "$4", "$5", __UA_t0, "$31"); 1404 : "$2", "$4", "$5", __UA_t0, "$31");
1405 } else {
1406 might_fault();
1407 __asm__ __volatile__(
1408 "move\t$4, %1\n\t"
1409 "move\t$5, %2\n\t"
1410 __MODULE_JAL(__strnlen_user_nocheck_asm)
1411 "move\t%0, $2"
1412 : "=r" (res)
1413 : "r" (s), "r" (n)
1414 : "$2", "$4", "$5", __UA_t0, "$31");
1415 }
1122 1416
1123 return res; 1417 return res;
1124} 1418}
@@ -1142,14 +1436,25 @@ static inline long strnlen_user(const char __user *s, long n)
1142 long res; 1436 long res;
1143 1437
1144 might_fault(); 1438 might_fault();
1145 __asm__ __volatile__( 1439 if (segment_eq(get_fs(), get_ds())) {
1146 "move\t$4, %1\n\t" 1440 __asm__ __volatile__(
1147 "move\t$5, %2\n\t" 1441 "move\t$4, %1\n\t"
1148 __MODULE_JAL(__strnlen_user_asm) 1442 "move\t$5, %2\n\t"
1149 "move\t%0, $2" 1443 __MODULE_JAL(__strnlen_kernel_asm)
1150 : "=r" (res) 1444 "move\t%0, $2"
1151 : "r" (s), "r" (n) 1445 : "=r" (res)
1152 : "$2", "$4", "$5", __UA_t0, "$31"); 1446 : "r" (s), "r" (n)
1447 : "$2", "$4", "$5", __UA_t0, "$31");
1448 } else {
1449 __asm__ __volatile__(
1450 "move\t$4, %1\n\t"
1451 "move\t$5, %2\n\t"
1452 __MODULE_JAL(__strnlen_user_asm)
1453 "move\t%0, $2"
1454 : "=r" (res)
1455 : "r" (s), "r" (n)
1456 : "$2", "$4", "$5", __UA_t0, "$31");
1457 }
1153 1458
1154 return res; 1459 return res;
1155} 1460}