aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2015-02-19 10:00:34 -0500
committerRalf Baechle <ralf@linux-mips.org>2015-02-19 10:00:34 -0500
commit661af35e5fd878f915ed05dbbfe383f64133f98c (patch)
tree956b7efd662b682224e61060552fdcf4201101bf /arch
parentca5d25642e212f73492d332d95dc90ef46a0e8dc (diff)
parentf296e7c48d3155991b99f41372e1786c5be03457 (diff)
Merge branch 'mipsr6-for-3.20' of git://git.linux-mips.org/pub/scm/mchandras/linux into mips-for-linux-next
Diffstat (limited to 'arch')
-rw-r--r--arch/mips/Kconfig72
-rw-r--r--arch/mips/Kconfig.debug13
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig193
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/asmmacro.h18
-rw-r--r--arch/mips/include/asm/atomic.h42
-rw-r--r--arch/mips/include/asm/bitops.h64
-rw-r--r--arch/mips/include/asm/checksum.h5
-rw-r--r--arch/mips/include/asm/cmpxchg.h34
-rw-r--r--arch/mips/include/asm/compiler.h24
-rw-r--r--arch/mips/include/asm/cpu-features.h28
-rw-r--r--arch/mips/include/asm/cpu-type.h7
-rw-r--r--arch/mips/include/asm/cpu.h11
-rw-r--r--arch/mips/include/asm/edac.h4
-rw-r--r--arch/mips/include/asm/elf.h10
-rw-r--r--arch/mips/include/asm/fpu.h3
-rw-r--r--arch/mips/include/asm/futex.h24
-rw-r--r--arch/mips/include/asm/hazards.h9
-rw-r--r--arch/mips/include/asm/irqflags.h7
-rw-r--r--arch/mips/include/asm/local.h5
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h24
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h96
-rw-r--r--arch/mips/include/asm/mipsregs.h3
-rw-r--r--arch/mips/include/asm/module.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h150
-rw-r--r--arch/mips/include/asm/spinlock.h55
-rw-r--r--arch/mips/include/asm/spram.h4
-rw-r--r--arch/mips/include/asm/stackframe.h8
-rw-r--r--arch/mips/include/asm/switch_to.h9
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h24
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/asm-offsets.c1
-rw-r--r--arch/mips/kernel/branch.c288
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cps-vec.S16
-rw-r--r--arch/mips/kernel/cpu-bugs64.c11
-rw-r--r--arch/mips/kernel/cpu-probe.c27
-rw-r--r--arch/mips/kernel/elf.c303
-rw-r--r--arch/mips/kernel/entry.S23
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/idle.c1
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2378
-rw-r--r--arch/mips/kernel/mips_ksyms.c2
-rw-r--r--arch/mips/kernel/proc.c8
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/r4k_fpu.S12
-rw-r--r--arch/mips/kernel/r4k_switch.S14
-rw-r--r--arch/mips/kernel/spram.c1
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/kernel/traps.c41
-rw-r--r--arch/mips/kernel/unaligned.c390
-rw-r--r--arch/mips/lib/Makefile1
-rw-r--r--arch/mips/lib/memcpy.S23
-rw-r--r--arch/mips/lib/memset.S47
-rw-r--r--arch/mips/lib/mips-atomic.c2
-rw-r--r--arch/mips/math-emu/cp1emu.c169
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/page.c30
-rw-r--r--arch/mips/mm/sc-mips.c4
-rw-r--r--arch/mips/mm/tlbex.c7
-rw-r--r--arch/mips/mm/uasm-mips.c32
-rw-r--r--arch/mips/mm/uasm.c13
65 files changed, 4405 insertions, 415 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index b1b2de537860..8523db534919 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -377,8 +377,10 @@ config MIPS_MALTA
377 select SYS_HAS_CPU_MIPS32_R1 377 select SYS_HAS_CPU_MIPS32_R1
378 select SYS_HAS_CPU_MIPS32_R2 378 select SYS_HAS_CPU_MIPS32_R2
379 select SYS_HAS_CPU_MIPS32_R3_5 379 select SYS_HAS_CPU_MIPS32_R3_5
380 select SYS_HAS_CPU_MIPS32_R6
380 select SYS_HAS_CPU_MIPS64_R1 381 select SYS_HAS_CPU_MIPS64_R1
381 select SYS_HAS_CPU_MIPS64_R2 382 select SYS_HAS_CPU_MIPS64_R2
383 select SYS_HAS_CPU_MIPS64_R6
382 select SYS_HAS_CPU_NEVADA 384 select SYS_HAS_CPU_NEVADA
383 select SYS_HAS_CPU_RM7000 385 select SYS_HAS_CPU_RM7000
384 select SYS_SUPPORTS_32BIT_KERNEL 386 select SYS_SUPPORTS_32BIT_KERNEL
@@ -1034,6 +1036,9 @@ config MIPS_MACHINE
1034config NO_IOPORT_MAP 1036config NO_IOPORT_MAP
1035 def_bool n 1037 def_bool n
1036 1038
1039config GENERIC_CSUM
1040 bool
1041
1037config GENERIC_ISA_DMA 1042config GENERIC_ISA_DMA
1038 bool 1043 bool
1039 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n 1044 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
@@ -1147,6 +1152,9 @@ config SOC_PNX8335
1147 bool 1152 bool
1148 select SOC_PNX833X 1153 select SOC_PNX833X
1149 1154
1155config MIPS_SPRAM
1156 bool
1157
1150config SWAP_IO_SPACE 1158config SWAP_IO_SPACE
1151 bool 1159 bool
1152 1160
@@ -1305,6 +1313,22 @@ config CPU_MIPS32_R2
1305 specific type of processor in your system, choose those that one 1313 specific type of processor in your system, choose those that one
1306 otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. 1314 otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
1307 1315
1316config CPU_MIPS32_R6
1317 bool "MIPS32 Release 6 (EXPERIMENTAL)"
1318 depends on SYS_HAS_CPU_MIPS32_R6
1319 select CPU_HAS_PREFETCH
1320 select CPU_SUPPORTS_32BIT_KERNEL
1321 select CPU_SUPPORTS_HIGHMEM
1322 select CPU_SUPPORTS_MSA
1323 select GENERIC_CSUM
1324 select HAVE_KVM
1325 select MIPS_O32_FP64_SUPPORT
1326 help
1327 Choose this option to build a kernel for release 6 or later of the
1328 MIPS32 architecture. New MIPS processors, starting with the Warrior
1329 family, are based on a MIPS32r6 processor. If you own an older
1330 processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
1331
1308config CPU_MIPS64_R1 1332config CPU_MIPS64_R1
1309 bool "MIPS64 Release 1" 1333 bool "MIPS64 Release 1"
1310 depends on SYS_HAS_CPU_MIPS64_R1 1334 depends on SYS_HAS_CPU_MIPS64_R1
@@ -1340,6 +1364,21 @@ config CPU_MIPS64_R2
1340 specific type of processor in your system, choose those that one 1364 specific type of processor in your system, choose those that one
1341 otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. 1365 otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
1342 1366
1367config CPU_MIPS64_R6
1368 bool "MIPS64 Release 6 (EXPERIMENTAL)"
1369 depends on SYS_HAS_CPU_MIPS64_R6
1370 select CPU_HAS_PREFETCH
1371 select CPU_SUPPORTS_32BIT_KERNEL
1372 select CPU_SUPPORTS_64BIT_KERNEL
1373 select CPU_SUPPORTS_HIGHMEM
1374 select CPU_SUPPORTS_MSA
1375 select GENERIC_CSUM
1376 help
1377 Choose this option to build a kernel for release 6 or later of the
1378 MIPS64 architecture. New MIPS processors, starting with the Warrior
1379 family, are based on a MIPS64r6 processor. If you own an older
1380 processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
1381
1343config CPU_R3000 1382config CPU_R3000
1344 bool "R3000" 1383 bool "R3000"
1345 depends on SYS_HAS_CPU_R3000 1384 depends on SYS_HAS_CPU_R3000
@@ -1540,7 +1579,7 @@ endchoice
1540config CPU_MIPS32_3_5_FEATURES 1579config CPU_MIPS32_3_5_FEATURES
1541 bool "MIPS32 Release 3.5 Features" 1580 bool "MIPS32 Release 3.5 Features"
1542 depends on SYS_HAS_CPU_MIPS32_R3_5 1581 depends on SYS_HAS_CPU_MIPS32_R3_5
1543 depends on CPU_MIPS32_R2 1582 depends on CPU_MIPS32_R2 || CPU_MIPS32_R6
1544 help 1583 help
1545 Choose this option to build a kernel for release 2 or later of the 1584 Choose this option to build a kernel for release 2 or later of the
1546 MIPS32 architecture including features from the 3.5 release such as 1585 MIPS32 architecture including features from the 3.5 release such as
@@ -1660,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2
1660config SYS_HAS_CPU_MIPS32_R3_5 1699config SYS_HAS_CPU_MIPS32_R3_5
1661 bool 1700 bool
1662 1701
1702config SYS_HAS_CPU_MIPS32_R6
1703 bool
1704
1663config SYS_HAS_CPU_MIPS64_R1 1705config SYS_HAS_CPU_MIPS64_R1
1664 bool 1706 bool
1665 1707
1666config SYS_HAS_CPU_MIPS64_R2 1708config SYS_HAS_CPU_MIPS64_R2
1667 bool 1709 bool
1668 1710
1711config SYS_HAS_CPU_MIPS64_R6
1712 bool
1713
1669config SYS_HAS_CPU_R3000 1714config SYS_HAS_CPU_R3000
1670 bool 1715 bool
1671 1716
@@ -1765,11 +1810,11 @@ endmenu
1765# 1810#
1766config CPU_MIPS32 1811config CPU_MIPS32
1767 bool 1812 bool
1768 default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 1813 default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
1769 1814
1770config CPU_MIPS64 1815config CPU_MIPS64
1771 bool 1816 bool
1772 default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 1817 default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
1773 1818
1774# 1819#
1775# These two indicate the revision of the architecture, either Release 1 or Release 2 1820# These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1781,6 +1826,12 @@ config CPU_MIPSR1
1781config CPU_MIPSR2 1826config CPU_MIPSR2
1782 bool 1827 bool
1783 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON 1828 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
1829 select MIPS_SPRAM
1830
1831config CPU_MIPSR6
1832 bool
1833 default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
1834 select MIPS_SPRAM
1784 1835
1785config EVA 1836config EVA
1786 bool 1837 bool
@@ -2014,6 +2065,19 @@ config MIPS_MT_FPAFF
2014 default y 2065 default y
2015 depends on MIPS_MT_SMP 2066 depends on MIPS_MT_SMP
2016 2067
2068config MIPSR2_TO_R6_EMULATOR
2069 bool "MIPS R2-to-R6 emulator"
2070 depends on CPU_MIPSR6 && !SMP
2071 default y
2072 help
2073 Choose this option if you want to run non-R6 MIPS userland code.
2074 Even if you say 'Y' here, the emulator will still be disabled by
2075 default. You can enable it using the 'mipsr2emul' kernel option.
2076 The only reason this is a build-time option is to save ~14K from the
2077 final kernel image.
2078comment "MIPS R2-to-R6 emulator is only available for UP kernels"
2079 depends on SMP && CPU_MIPSR6
2080
2017config MIPS_VPE_LOADER 2081config MIPS_VPE_LOADER
2018 bool "VPE loader support." 2082 bool "VPE loader support."
2019 depends on SYS_SUPPORTS_MULTITHREADING && MODULES 2083 depends on SYS_SUPPORTS_MULTITHREADING && MODULES
@@ -2149,7 +2213,7 @@ config CPU_HAS_SMARTMIPS
2149 here. 2213 here.
2150 2214
2151config CPU_MICROMIPS 2215config CPU_MICROMIPS
2152 depends on 32BIT && SYS_SUPPORTS_MICROMIPS 2216 depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
2153 bool "microMIPS" 2217 bool "microMIPS"
2154 help 2218 help
2155 When this option is enabled the kernel will be built using the 2219 When this option is enabled the kernel will be built using the
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 88a9f433f6fc..3a2b775e8458 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -122,17 +122,4 @@ config SPINLOCK_TEST
122 help 122 help
123 Add several files to the debugfs to test spinlock speed. 123 Add several files to the debugfs to test spinlock speed.
124 124
125config FP32XX_HYBRID_FPRS
126 bool "Run FP32 & FPXX code with hybrid FPRs"
127 depends on MIPS_O32_FP64_SUPPORT
128 help
129 The hybrid FPR scheme is normally used only when a program needs to
130 execute a mix of FP32 & FP64A code, since the trapping & emulation
131 that it entails is expensive. When enabled, this option will lead
132 to the kernel running programs which use the FP32 & FPXX FP ABIs
133 using the hybrid FPR scheme, which can be useful for debugging
134 purposes.
135
136 If unsure, say N.
137
138endmenu 125endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 67109e53ebc0..b3de8ec06e88 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -138,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS
138 -Wa,-mips32 -Wa,--trap 138 -Wa,-mips32 -Wa,--trap
139cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ 139cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
140 -Wa,-mips32r2 -Wa,--trap 140 -Wa,-mips32r2 -Wa,--trap
141cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap
141cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 142cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
142 -Wa,-mips64 -Wa,--trap 143 -Wa,-mips64 -Wa,--trap
143cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 144cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
144 -Wa,-mips64r2 -Wa,--trap 145 -Wa,-mips64r2 -Wa,--trap
146cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
145cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap 147cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
146cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ 148cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
147 -Wa,--trap 149 -Wa,--trap
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
new file mode 100644
index 000000000000..4bce1f8ebe98
--- /dev/null
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -0,0 +1,193 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R6=y
4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_HZ_100=y
6CONFIG_SYSVIPC=y
7CONFIG_POSIX_MQUEUE=y
8CONFIG_AUDIT=y
9CONFIG_NO_HZ=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12CONFIG_LOG_BUF_SHIFT=15
13CONFIG_SYSCTL_SYSCALL=y
14CONFIG_EMBEDDED=y
15CONFIG_SLAB=y
16CONFIG_MODULES=y
17CONFIG_MODULE_UNLOAD=y
18CONFIG_MODVERSIONS=y
19CONFIG_MODULE_SRCVERSION_ALL=y
20# CONFIG_BLK_DEV_BSG is not set
21CONFIG_PCI=y
22# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
23CONFIG_NET=y
24CONFIG_PACKET=y
25CONFIG_UNIX=y
26CONFIG_XFRM_USER=m
27CONFIG_NET_KEY=y
28CONFIG_INET=y
29CONFIG_IP_MULTICAST=y
30CONFIG_IP_ADVANCED_ROUTER=y
31CONFIG_IP_MULTIPLE_TABLES=y
32CONFIG_IP_ROUTE_MULTIPATH=y
33CONFIG_IP_ROUTE_VERBOSE=y
34CONFIG_IP_PNP=y
35CONFIG_IP_PNP_DHCP=y
36CONFIG_IP_PNP_BOOTP=y
37CONFIG_NET_IPIP=m
38CONFIG_IP_MROUTE=y
39CONFIG_IP_PIMSM_V1=y
40CONFIG_IP_PIMSM_V2=y
41CONFIG_SYN_COOKIES=y
42CONFIG_INET_AH=m
43CONFIG_INET_ESP=m
44CONFIG_INET_IPCOMP=m
45# CONFIG_INET_LRO is not set
46CONFIG_INET6_AH=m
47CONFIG_INET6_ESP=m
48CONFIG_INET6_IPCOMP=m
49CONFIG_IPV6_TUNNEL=m
50CONFIG_BRIDGE=m
51CONFIG_VLAN_8021Q=m
52CONFIG_ATALK=m
53CONFIG_DEV_APPLETALK=m
54CONFIG_IPDDP=m
55CONFIG_IPDDP_ENCAP=y
56CONFIG_NET_SCHED=y
57CONFIG_NET_SCH_CBQ=m
58CONFIG_NET_SCH_HTB=m
59CONFIG_NET_SCH_HFSC=m
60CONFIG_NET_SCH_PRIO=m
61CONFIG_NET_SCH_RED=m
62CONFIG_NET_SCH_SFQ=m
63CONFIG_NET_SCH_TEQL=m
64CONFIG_NET_SCH_TBF=m
65CONFIG_NET_SCH_GRED=m
66CONFIG_NET_SCH_DSMARK=m
67CONFIG_NET_SCH_NETEM=m
68CONFIG_NET_SCH_INGRESS=m
69CONFIG_NET_CLS_BASIC=m
70CONFIG_NET_CLS_TCINDEX=m
71CONFIG_NET_CLS_ROUTE4=m
72CONFIG_NET_CLS_FW=m
73CONFIG_NET_CLS_U32=m
74CONFIG_NET_CLS_RSVP=m
75CONFIG_NET_CLS_RSVP6=m
76CONFIG_NET_CLS_ACT=y
77CONFIG_NET_ACT_POLICE=y
78CONFIG_NET_CLS_IND=y
79# CONFIG_WIRELESS is not set
80CONFIG_DEVTMPFS=y
81CONFIG_BLK_DEV_LOOP=y
82CONFIG_BLK_DEV_CRYPTOLOOP=m
83CONFIG_IDE=y
84# CONFIG_IDE_PROC_FS is not set
85# CONFIG_IDEPCI_PCIBUS_ORDER is not set
86CONFIG_BLK_DEV_GENERIC=y
87CONFIG_BLK_DEV_PIIX=y
88CONFIG_SCSI=y
89CONFIG_BLK_DEV_SD=y
90CONFIG_CHR_DEV_SG=y
91# CONFIG_SCSI_LOWLEVEL is not set
92CONFIG_NETDEVICES=y
93# CONFIG_NET_VENDOR_3COM is not set
94# CONFIG_NET_VENDOR_ADAPTEC is not set
95# CONFIG_NET_VENDOR_ALTEON is not set
96CONFIG_PCNET32=y
97# CONFIG_NET_VENDOR_ATHEROS is not set
98# CONFIG_NET_VENDOR_BROADCOM is not set
99# CONFIG_NET_VENDOR_BROCADE is not set
100# CONFIG_NET_VENDOR_CHELSIO is not set
101# CONFIG_NET_VENDOR_CISCO is not set
102# CONFIG_NET_VENDOR_DEC is not set
103# CONFIG_NET_VENDOR_DLINK is not set
104# CONFIG_NET_VENDOR_EMULEX is not set
105# CONFIG_NET_VENDOR_EXAR is not set
106# CONFIG_NET_VENDOR_HP is not set
107# CONFIG_NET_VENDOR_INTEL is not set
108# CONFIG_NET_VENDOR_MARVELL is not set
109# CONFIG_NET_VENDOR_MELLANOX is not set
110# CONFIG_NET_VENDOR_MICREL is not set
111# CONFIG_NET_VENDOR_MYRI is not set
112# CONFIG_NET_VENDOR_NATSEMI is not set
113# CONFIG_NET_VENDOR_NVIDIA is not set
114# CONFIG_NET_VENDOR_OKI is not set
115# CONFIG_NET_PACKET_ENGINE is not set
116# CONFIG_NET_VENDOR_QLOGIC is not set
117# CONFIG_NET_VENDOR_REALTEK is not set
118# CONFIG_NET_VENDOR_RDC is not set
119# CONFIG_NET_VENDOR_SEEQ is not set
120# CONFIG_NET_VENDOR_SILAN is not set
121# CONFIG_NET_VENDOR_SIS is not set
122# CONFIG_NET_VENDOR_SMSC is not set
123# CONFIG_NET_VENDOR_STMICRO is not set
124# CONFIG_NET_VENDOR_SUN is not set
125# CONFIG_NET_VENDOR_TEHUTI is not set
126# CONFIG_NET_VENDOR_TI is not set
127# CONFIG_NET_VENDOR_TOSHIBA is not set
128# CONFIG_NET_VENDOR_VIA is not set
129# CONFIG_NET_VENDOR_WIZNET is not set
130# CONFIG_WLAN is not set
131# CONFIG_VT is not set
132CONFIG_LEGACY_PTY_COUNT=4
133CONFIG_SERIAL_8250=y
134CONFIG_SERIAL_8250_CONSOLE=y
135CONFIG_HW_RANDOM=y
136# CONFIG_HWMON is not set
137CONFIG_FB=y
138CONFIG_FIRMWARE_EDID=y
139CONFIG_FB_MATROX=y
140CONFIG_FB_MATROX_G=y
141CONFIG_USB=y
142CONFIG_USB_EHCI_HCD=y
143# CONFIG_USB_EHCI_TT_NEWSCHED is not set
144CONFIG_USB_UHCI_HCD=y
145CONFIG_USB_STORAGE=y
146CONFIG_NEW_LEDS=y
147CONFIG_LEDS_CLASS=y
148CONFIG_LEDS_TRIGGERS=y
149CONFIG_LEDS_TRIGGER_TIMER=y
150CONFIG_LEDS_TRIGGER_IDE_DISK=y
151CONFIG_LEDS_TRIGGER_HEARTBEAT=y
152CONFIG_LEDS_TRIGGER_BACKLIGHT=y
153CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
154CONFIG_RTC_CLASS=y
155CONFIG_RTC_DRV_CMOS=y
156CONFIG_EXT2_FS=y
157CONFIG_EXT3_FS=y
158# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
159CONFIG_XFS_FS=y
160CONFIG_XFS_QUOTA=y
161CONFIG_XFS_POSIX_ACL=y
162CONFIG_QUOTA=y
163CONFIG_QFMT_V2=y
164CONFIG_MSDOS_FS=m
165CONFIG_VFAT_FS=m
166CONFIG_PROC_KCORE=y
167CONFIG_TMPFS=y
168CONFIG_NFS_FS=y
169CONFIG_ROOT_NFS=y
170CONFIG_CIFS=m
171CONFIG_CIFS_WEAK_PW_HASH=y
172CONFIG_CIFS_XATTR=y
173CONFIG_CIFS_POSIX=y
174CONFIG_NLS_CODEPAGE_437=m
175CONFIG_NLS_ISO8859_1=m
176# CONFIG_FTRACE is not set
177CONFIG_CRYPTO_NULL=m
178CONFIG_CRYPTO_PCBC=m
179CONFIG_CRYPTO_HMAC=y
180CONFIG_CRYPTO_MICHAEL_MIC=m
181CONFIG_CRYPTO_SHA512=m
182CONFIG_CRYPTO_TGR192=m
183CONFIG_CRYPTO_WP512=m
184CONFIG_CRYPTO_ANUBIS=m
185CONFIG_CRYPTO_BLOWFISH=m
186CONFIG_CRYPTO_CAST5=m
187CONFIG_CRYPTO_CAST6=m
188CONFIG_CRYPTO_KHAZAD=m
189CONFIG_CRYPTO_SERPENT=m
190CONFIG_CRYPTO_TEA=m
191CONFIG_CRYPTO_TWOFISH=m
192# CONFIG_CRYPTO_ANSI_CPRNG is not set
193# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 200efeac4181..526539cbc99f 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,4 +1,5 @@
1# MIPS headers 1# MIPS headers
2generic-(CONFIG_GENERIC_CSUM) += checksum.h
2generic-y += cputime.h 3generic-y += cputime.h
3generic-y += current.h 4generic-y += current.h
4generic-y += dma-contiguous.h 5generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6caf8766b80f..0cae4595e985 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,7 +19,7 @@
19#include <asm/asmmacro-64.h> 19#include <asm/asmmacro-64.h>
20#endif 20#endif
21 21
22#ifdef CONFIG_CPU_MIPSR2 22#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
23 .macro local_irq_enable reg=t0 23 .macro local_irq_enable reg=t0
24 ei 24 ei
25 irq_enable_hazard 25 irq_enable_hazard
@@ -104,7 +104,8 @@
104 .endm 104 .endm
105 105
106 .macro fpu_save_double thread status tmp 106 .macro fpu_save_double thread status tmp
107#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 107#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
108 defined(CONFIG_CPU_MIPS32_R6)
108 sll \tmp, \status, 5 109 sll \tmp, \status, 5
109 bgez \tmp, 10f 110 bgez \tmp, 10f
110 fpu_save_16odd \thread 111 fpu_save_16odd \thread
@@ -160,7 +161,8 @@
160 .endm 161 .endm
161 162
162 .macro fpu_restore_double thread status tmp 163 .macro fpu_restore_double thread status tmp
163#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 164#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
165 defined(CONFIG_CPU_MIPS32_R6)
164 sll \tmp, \status, 5 166 sll \tmp, \status, 5
165 bgez \tmp, 10f # 16 register mode? 167 bgez \tmp, 10f # 16 register mode?
166 168
@@ -170,16 +172,16 @@
170 fpu_restore_16even \thread \tmp 172 fpu_restore_16even \thread \tmp
171 .endm 173 .endm
172 174
173#ifdef CONFIG_CPU_MIPSR2 175#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
174 .macro _EXT rd, rs, p, s 176 .macro _EXT rd, rs, p, s
175 ext \rd, \rs, \p, \s 177 ext \rd, \rs, \p, \s
176 .endm 178 .endm
177#else /* !CONFIG_CPU_MIPSR2 */ 179#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
178 .macro _EXT rd, rs, p, s 180 .macro _EXT rd, rs, p, s
179 srl \rd, \rs, \p 181 srl \rd, \rs, \p
180 andi \rd, \rd, (1 << \s) - 1 182 andi \rd, \rd, (1 << \s) - 1
181 .endm 183 .endm
182#endif /* !CONFIG_CPU_MIPSR2 */ 184#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
183 185
184/* 186/*
185 * Temporary until all gas have MT ASE support 187 * Temporary until all gas have MT ASE support
@@ -304,7 +306,7 @@
304 .set push 306 .set push
305 .set noat 307 .set noat
306 SET_HARDFLOAT 308 SET_HARDFLOAT
307 add $1, \base, \off 309 addu $1, \base, \off
308 .word LDD_MSA_INSN | (\wd << 6) 310 .word LDD_MSA_INSN | (\wd << 6)
309 .set pop 311 .set pop
310 .endm 312 .endm
@@ -313,7 +315,7 @@
313 .set push 315 .set push
314 .set noat 316 .set noat
315 SET_HARDFLOAT 317 SET_HARDFLOAT
316 add $1, \base, \off 318 addu $1, \base, \off
317 .word STD_MSA_INSN | (\wd << 6) 319 .word STD_MSA_INSN | (\wd << 6)
318 .set pop 320 .set pop
319 .endm 321 .endm
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 857da84cfc92..26d436336f2e 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
54 " sc %0, %1 \n" \ 54 " sc %0, %1 \n" \
55 " beqzl %0, 1b \n" \ 55 " beqzl %0, 1b \n" \
56 " .set mips0 \n" \ 56 " .set mips0 \n" \
57 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 57 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
58 : "Ir" (i)); \ 58 : "Ir" (i)); \
59 } else if (kernel_uses_llsc) { \ 59 } else if (kernel_uses_llsc) { \
60 int temp; \ 60 int temp; \
61 \ 61 \
62 do { \ 62 do { \
63 __asm__ __volatile__( \ 63 __asm__ __volatile__( \
64 " .set arch=r4000 \n" \ 64 " .set "MIPS_ISA_LEVEL" \n" \
65 " ll %0, %1 # atomic_" #op "\n" \ 65 " ll %0, %1 # atomic_" #op "\n" \
66 " " #asm_op " %0, %2 \n" \ 66 " " #asm_op " %0, %2 \n" \
67 " sc %0, %1 \n" \ 67 " sc %0, %1 \n" \
68 " .set mips0 \n" \ 68 " .set mips0 \n" \
69 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 69 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
70 : "Ir" (i)); \ 70 : "Ir" (i)); \
71 } while (unlikely(!temp)); \ 71 } while (unlikely(!temp)); \
72 } else { \ 72 } else { \
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
97 " " #asm_op " %0, %1, %3 \n" \ 97 " " #asm_op " %0, %1, %3 \n" \
98 " .set mips0 \n" \ 98 " .set mips0 \n" \
99 : "=&r" (result), "=&r" (temp), \ 99 : "=&r" (result), "=&r" (temp), \
100 "+" GCC_OFF12_ASM() (v->counter) \ 100 "+" GCC_OFF_SMALL_ASM() (v->counter) \
101 : "Ir" (i)); \ 101 : "Ir" (i)); \
102 } else if (kernel_uses_llsc) { \ 102 } else if (kernel_uses_llsc) { \
103 int temp; \ 103 int temp; \
104 \ 104 \
105 do { \ 105 do { \
106 __asm__ __volatile__( \ 106 __asm__ __volatile__( \
107 " .set arch=r4000 \n" \ 107 " .set "MIPS_ISA_LEVEL" \n" \
108 " ll %1, %2 # atomic_" #op "_return \n" \ 108 " ll %1, %2 # atomic_" #op "_return \n" \
109 " " #asm_op " %0, %1, %3 \n" \ 109 " " #asm_op " %0, %1, %3 \n" \
110 " sc %0, %2 \n" \ 110 " sc %0, %2 \n" \
111 " .set mips0 \n" \ 111 " .set mips0 \n" \
112 : "=&r" (result), "=&r" (temp), \ 112 : "=&r" (result), "=&r" (temp), \
113 "+" GCC_OFF12_ASM() (v->counter) \ 113 "+" GCC_OFF_SMALL_ASM() (v->counter) \
114 : "Ir" (i)); \ 114 : "Ir" (i)); \
115 } while (unlikely(!result)); \ 115 } while (unlikely(!result)); \
116 \ 116 \
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
171 "1: \n" 171 "1: \n"
172 " .set mips0 \n" 172 " .set mips0 \n"
173 : "=&r" (result), "=&r" (temp), 173 : "=&r" (result), "=&r" (temp),
174 "+" GCC_OFF12_ASM() (v->counter) 174 "+" GCC_OFF_SMALL_ASM() (v->counter)
175 : "Ir" (i), GCC_OFF12_ASM() (v->counter) 175 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
176 : "memory"); 176 : "memory");
177 } else if (kernel_uses_llsc) { 177 } else if (kernel_uses_llsc) {
178 int temp; 178 int temp;
179 179
180 __asm__ __volatile__( 180 __asm__ __volatile__(
181 " .set arch=r4000 \n" 181 " .set "MIPS_ISA_LEVEL" \n"
182 "1: ll %1, %2 # atomic_sub_if_positive\n" 182 "1: ll %1, %2 # atomic_sub_if_positive\n"
183 " subu %0, %1, %3 \n" 183 " subu %0, %1, %3 \n"
184 " bltz %0, 1f \n" 184 " bltz %0, 1f \n"
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
190 "1: \n" 190 "1: \n"
191 " .set mips0 \n" 191 " .set mips0 \n"
192 : "=&r" (result), "=&r" (temp), 192 : "=&r" (result), "=&r" (temp),
193 "+" GCC_OFF12_ASM() (v->counter) 193 "+" GCC_OFF_SMALL_ASM() (v->counter)
194 : "Ir" (i)); 194 : "Ir" (i));
195 } else { 195 } else {
196 unsigned long flags; 196 unsigned long flags;
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
333 " scd %0, %1 \n" \ 333 " scd %0, %1 \n" \
334 " beqzl %0, 1b \n" \ 334 " beqzl %0, 1b \n" \
335 " .set mips0 \n" \ 335 " .set mips0 \n" \
336 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 336 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
337 : "Ir" (i)); \ 337 : "Ir" (i)); \
338 } else if (kernel_uses_llsc) { \ 338 } else if (kernel_uses_llsc) { \
339 long temp; \ 339 long temp; \
340 \ 340 \
341 do { \ 341 do { \
342 __asm__ __volatile__( \ 342 __asm__ __volatile__( \
343 " .set arch=r4000 \n" \ 343 " .set "MIPS_ISA_LEVEL" \n" \
344 " lld %0, %1 # atomic64_" #op "\n" \ 344 " lld %0, %1 # atomic64_" #op "\n" \
345 " " #asm_op " %0, %2 \n" \ 345 " " #asm_op " %0, %2 \n" \
346 " scd %0, %1 \n" \ 346 " scd %0, %1 \n" \
347 " .set mips0 \n" \ 347 " .set mips0 \n" \
348 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 348 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
349 : "Ir" (i)); \ 349 : "Ir" (i)); \
350 } while (unlikely(!temp)); \ 350 } while (unlikely(!temp)); \
351 } else { \ 351 } else { \
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
376 " " #asm_op " %0, %1, %3 \n" \ 376 " " #asm_op " %0, %1, %3 \n" \
377 " .set mips0 \n" \ 377 " .set mips0 \n" \
378 : "=&r" (result), "=&r" (temp), \ 378 : "=&r" (result), "=&r" (temp), \
379 "+" GCC_OFF12_ASM() (v->counter) \ 379 "+" GCC_OFF_SMALL_ASM() (v->counter) \
380 : "Ir" (i)); \ 380 : "Ir" (i)); \
381 } else if (kernel_uses_llsc) { \ 381 } else if (kernel_uses_llsc) { \
382 long temp; \ 382 long temp; \
383 \ 383 \
384 do { \ 384 do { \
385 __asm__ __volatile__( \ 385 __asm__ __volatile__( \
386 " .set arch=r4000 \n" \ 386 " .set "MIPS_ISA_LEVEL" \n" \
387 " lld %1, %2 # atomic64_" #op "_return\n" \ 387 " lld %1, %2 # atomic64_" #op "_return\n" \
388 " " #asm_op " %0, %1, %3 \n" \ 388 " " #asm_op " %0, %1, %3 \n" \
389 " scd %0, %2 \n" \ 389 " scd %0, %2 \n" \
390 " .set mips0 \n" \ 390 " .set mips0 \n" \
391 : "=&r" (result), "=&r" (temp), \ 391 : "=&r" (result), "=&r" (temp), \
392 "=" GCC_OFF12_ASM() (v->counter) \ 392 "=" GCC_OFF_SMALL_ASM() (v->counter) \
393 : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 393 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
394 : "memory"); \ 394 : "memory"); \
395 } while (unlikely(!result)); \ 395 } while (unlikely(!result)); \
396 \ 396 \
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
452 "1: \n" 452 "1: \n"
453 " .set mips0 \n" 453 " .set mips0 \n"
454 : "=&r" (result), "=&r" (temp), 454 : "=&r" (result), "=&r" (temp),
455 "=" GCC_OFF12_ASM() (v->counter) 455 "=" GCC_OFF_SMALL_ASM() (v->counter)
456 : "Ir" (i), GCC_OFF12_ASM() (v->counter) 456 : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
457 : "memory"); 457 : "memory");
458 } else if (kernel_uses_llsc) { 458 } else if (kernel_uses_llsc) {
459 long temp; 459 long temp;
460 460
461 __asm__ __volatile__( 461 __asm__ __volatile__(
462 " .set arch=r4000 \n" 462 " .set "MIPS_ISA_LEVEL" \n"
463 "1: lld %1, %2 # atomic64_sub_if_positive\n" 463 "1: lld %1, %2 # atomic64_sub_if_positive\n"
464 " dsubu %0, %1, %3 \n" 464 " dsubu %0, %1, %3 \n"
465 " bltz %0, 1f \n" 465 " bltz %0, 1f \n"
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
471 "1: \n" 471 "1: \n"
472 " .set mips0 \n" 472 " .set mips0 \n"
473 : "=&r" (result), "=&r" (temp), 473 : "=&r" (result), "=&r" (temp),
474 "+" GCC_OFF12_ASM() (v->counter) 474 "+" GCC_OFF_SMALL_ASM() (v->counter)
475 : "Ir" (i)); 475 : "Ir" (i));
476 } else { 476 } else {
477 unsigned long flags; 477 unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 6663bcca9d0c..9f935f6aa996 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
79 " " __SC "%0, %1 \n" 79 " " __SC "%0, %1 \n"
80 " beqzl %0, 1b \n" 80 " beqzl %0, 1b \n"
81 " .set mips0 \n" 81 " .set mips0 \n"
82 : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) 82 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
83 : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); 83 : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
84#ifdef CONFIG_CPU_MIPSR2 84#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
86 do { 86 do {
87 __asm__ __volatile__( 87 __asm__ __volatile__(
88 " " __LL "%0, %1 # set_bit \n" 88 " " __LL "%0, %1 # set_bit \n"
89 " " __INS "%0, %3, %2, 1 \n" 89 " " __INS "%0, %3, %2, 1 \n"
90 " " __SC "%0, %1 \n" 90 " " __SC "%0, %1 \n"
91 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 91 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
92 : "ir" (bit), "r" (~0)); 92 : "ir" (bit), "r" (~0));
93 } while (unlikely(!temp)); 93 } while (unlikely(!temp));
94#endif /* CONFIG_CPU_MIPSR2 */ 94#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
95 } else if (kernel_uses_llsc) { 95 } else if (kernel_uses_llsc) {
96 do { 96 do {
97 __asm__ __volatile__( 97 __asm__ __volatile__(
98 " .set arch=r4000 \n" 98 " .set "MIPS_ISA_ARCH_LEVEL" \n"
99 " " __LL "%0, %1 # set_bit \n" 99 " " __LL "%0, %1 # set_bit \n"
100 " or %0, %2 \n" 100 " or %0, %2 \n"
101 " " __SC "%0, %1 \n" 101 " " __SC "%0, %1 \n"
102 " .set mips0 \n" 102 " .set mips0 \n"
103 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 103 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
104 : "ir" (1UL << bit)); 104 : "ir" (1UL << bit));
105 } while (unlikely(!temp)); 105 } while (unlikely(!temp));
106 } else 106 } else
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
131 " " __SC "%0, %1 \n" 131 " " __SC "%0, %1 \n"
132 " beqzl %0, 1b \n" 132 " beqzl %0, 1b \n"
133 " .set mips0 \n" 133 " .set mips0 \n"
134 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 134 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
135 : "ir" (~(1UL << bit))); 135 : "ir" (~(1UL << bit)));
136#ifdef CONFIG_CPU_MIPSR2 136#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
138 do { 138 do {
139 __asm__ __volatile__( 139 __asm__ __volatile__(
140 " " __LL "%0, %1 # clear_bit \n" 140 " " __LL "%0, %1 # clear_bit \n"
141 " " __INS "%0, $0, %2, 1 \n" 141 " " __INS "%0, $0, %2, 1 \n"
142 " " __SC "%0, %1 \n" 142 " " __SC "%0, %1 \n"
143 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 143 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
144 : "ir" (bit)); 144 : "ir" (bit));
145 } while (unlikely(!temp)); 145 } while (unlikely(!temp));
146#endif /* CONFIG_CPU_MIPSR2 */ 146#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
147 } else if (kernel_uses_llsc) { 147 } else if (kernel_uses_llsc) {
148 do { 148 do {
149 __asm__ __volatile__( 149 __asm__ __volatile__(
150 " .set arch=r4000 \n" 150 " .set "MIPS_ISA_ARCH_LEVEL" \n"
151 " " __LL "%0, %1 # clear_bit \n" 151 " " __LL "%0, %1 # clear_bit \n"
152 " and %0, %2 \n" 152 " and %0, %2 \n"
153 " " __SC "%0, %1 \n" 153 " " __SC "%0, %1 \n"
154 " .set mips0 \n" 154 " .set mips0 \n"
155 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 155 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
156 : "ir" (~(1UL << bit))); 156 : "ir" (~(1UL << bit)));
157 } while (unlikely(!temp)); 157 } while (unlikely(!temp));
158 } else 158 } else
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
197 " " __SC "%0, %1 \n" 197 " " __SC "%0, %1 \n"
198 " beqzl %0, 1b \n" 198 " beqzl %0, 1b \n"
199 " .set mips0 \n" 199 " .set mips0 \n"
200 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 200 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
201 : "ir" (1UL << bit)); 201 : "ir" (1UL << bit));
202 } else if (kernel_uses_llsc) { 202 } else if (kernel_uses_llsc) {
203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
205 205
206 do { 206 do {
207 __asm__ __volatile__( 207 __asm__ __volatile__(
208 " .set arch=r4000 \n" 208 " .set "MIPS_ISA_ARCH_LEVEL" \n"
209 " " __LL "%0, %1 # change_bit \n" 209 " " __LL "%0, %1 # change_bit \n"
210 " xor %0, %2 \n" 210 " xor %0, %2 \n"
211 " " __SC "%0, %1 \n" 211 " " __SC "%0, %1 \n"
212 " .set mips0 \n" 212 " .set mips0 \n"
213 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 213 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
214 : "ir" (1UL << bit)); 214 : "ir" (1UL << bit));
215 } while (unlikely(!temp)); 215 } while (unlikely(!temp));
216 } else 216 } else
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
245 " beqzl %2, 1b \n" 245 " beqzl %2, 1b \n"
246 " and %2, %0, %3 \n" 246 " and %2, %0, %3 \n"
247 " .set mips0 \n" 247 " .set mips0 \n"
248 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 248 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
249 : "r" (1UL << bit) 249 : "r" (1UL << bit)
250 : "memory"); 250 : "memory");
251 } else if (kernel_uses_llsc) { 251 } else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr,
254 254
255 do { 255 do {
256 __asm__ __volatile__( 256 __asm__ __volatile__(
257 " .set arch=r4000 \n" 257 " .set "MIPS_ISA_ARCH_LEVEL" \n"
258 " " __LL "%0, %1 # test_and_set_bit \n" 258 " " __LL "%0, %1 # test_and_set_bit \n"
259 " or %2, %0, %3 \n" 259 " or %2, %0, %3 \n"
260 " " __SC "%2, %1 \n" 260 " " __SC "%2, %1 \n"
261 " .set mips0 \n" 261 " .set mips0 \n"
262 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 262 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
263 : "r" (1UL << bit) 263 : "r" (1UL << bit)
264 : "memory"); 264 : "memory");
265 } while (unlikely(!res)); 265 } while (unlikely(!res));
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
308 308
309 do { 309 do {
310 __asm__ __volatile__( 310 __asm__ __volatile__(
311 " .set arch=r4000 \n" 311 " .set "MIPS_ISA_ARCH_LEVEL" \n"
312 " " __LL "%0, %1 # test_and_set_bit \n" 312 " " __LL "%0, %1 # test_and_set_bit \n"
313 " or %2, %0, %3 \n" 313 " or %2, %0, %3 \n"
314 " " __SC "%2, %1 \n" 314 " " __SC "%2, %1 \n"
315 " .set mips0 \n" 315 " .set mips0 \n"
316 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 316 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
317 : "r" (1UL << bit) 317 : "r" (1UL << bit)
318 : "memory"); 318 : "memory");
319 } while (unlikely(!res)); 319 } while (unlikely(!res));
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr,
355 " beqzl %2, 1b \n" 355 " beqzl %2, 1b \n"
356 " and %2, %0, %3 \n" 356 " and %2, %0, %3 \n"
357 " .set mips0 \n" 357 " .set mips0 \n"
358 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 358 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
359 : "r" (1UL << bit) 359 : "r" (1UL << bit)
360 : "memory"); 360 : "memory");
361#ifdef CONFIG_CPU_MIPSR2 361#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { 362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
364 unsigned long temp; 364 unsigned long temp;
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
369 " " __EXT "%2, %0, %3, 1 \n" 369 " " __EXT "%2, %0, %3, 1 \n"
370 " " __INS "%0, $0, %3, 1 \n" 370 " " __INS "%0, $0, %3, 1 \n"
371 " " __SC "%0, %1 \n" 371 " " __SC "%0, %1 \n"
372 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 372 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
373 : "ir" (bit) 373 : "ir" (bit)
374 : "memory"); 374 : "memory");
375 } while (unlikely(!temp)); 375 } while (unlikely(!temp));
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr,
380 380
381 do { 381 do {
382 __asm__ __volatile__( 382 __asm__ __volatile__(
383 " .set arch=r4000 \n" 383 " .set "MIPS_ISA_ARCH_LEVEL" \n"
384 " " __LL "%0, %1 # test_and_clear_bit \n" 384 " " __LL "%0, %1 # test_and_clear_bit \n"
385 " or %2, %0, %3 \n" 385 " or %2, %0, %3 \n"
386 " xor %2, %3 \n" 386 " xor %2, %3 \n"
387 " " __SC "%2, %1 \n" 387 " " __SC "%2, %1 \n"
388 " .set mips0 \n" 388 " .set mips0 \n"
389 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 389 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
390 : "r" (1UL << bit) 390 : "r" (1UL << bit)
391 : "memory"); 391 : "memory");
392 } while (unlikely(!res)); 392 } while (unlikely(!res));
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
428 " beqzl %2, 1b \n" 428 " beqzl %2, 1b \n"
429 " and %2, %0, %3 \n" 429 " and %2, %0, %3 \n"
430 " .set mips0 \n" 430 " .set mips0 \n"
431 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 431 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
432 : "r" (1UL << bit) 432 : "r" (1UL << bit)
433 : "memory"); 433 : "memory");
434 } else if (kernel_uses_llsc) { 434 } else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr,
437 437
438 do { 438 do {
439 __asm__ __volatile__( 439 __asm__ __volatile__(
440 " .set arch=r4000 \n" 440 " .set "MIPS_ISA_ARCH_LEVEL" \n"
441 " " __LL "%0, %1 # test_and_change_bit \n" 441 " " __LL "%0, %1 # test_and_change_bit \n"
442 " xor %2, %0, %3 \n" 442 " xor %2, %0, %3 \n"
443 " " __SC "\t%2, %1 \n" 443 " " __SC "\t%2, %1 \n"
444 " .set mips0 \n" 444 " .set mips0 \n"
445 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 445 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
446 : "r" (1UL << bit) 446 : "r" (1UL << bit)
447 : "memory"); 447 : "memory");
448 } while (unlikely(!res)); 448 } while (unlikely(!res));
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word)
485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
486 __asm__( 486 __asm__(
487 " .set push \n" 487 " .set push \n"
488 " .set mips32 \n" 488 " .set "MIPS_ISA_LEVEL" \n"
489 " clz %0, %1 \n" 489 " clz %0, %1 \n"
490 " .set pop \n" 490 " .set pop \n"
491 : "=r" (num) 491 : "=r" (num)
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word)
498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { 498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
499 __asm__( 499 __asm__(
500 " .set push \n" 500 " .set push \n"
501 " .set mips64 \n" 501 " .set "MIPS_ISA_LEVEL" \n"
502 " dclz %0, %1 \n" 502 " dclz %0, %1 \n"
503 " .set pop \n" 503 " .set pop \n"
504 : "=r" (num) 504 : "=r" (num)
@@ -562,7 +562,7 @@ static inline int fls(int x)
562 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 562 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
563 __asm__( 563 __asm__(
564 " .set push \n" 564 " .set push \n"
565 " .set mips32 \n" 565 " .set "MIPS_ISA_LEVEL" \n"
566 " clz %0, %1 \n" 566 " clz %0, %1 \n"
567 " .set pop \n" 567 " .set pop \n"
568 : "=r" (x) 568 : "=r" (x)
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index 5996252680c6..5c585c5c1c3e 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -12,6 +12,10 @@
12#ifndef _ASM_CHECKSUM_H 12#ifndef _ASM_CHECKSUM_H
13#define _ASM_CHECKSUM_H 13#define _ASM_CHECKSUM_H
14 14
15#ifdef CONFIG_GENERIC_CSUM
16#include <asm-generic/checksum.h>
17#else
18
15#include <linux/in6.h> 19#include <linux/in6.h>
16 20
17#include <asm/uaccess.h> 21#include <asm/uaccess.h>
@@ -274,5 +278,6 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
274} 278}
275 279
276#include <asm-generic/checksum.h> 280#include <asm-generic/checksum.h>
281#endif /* CONFIG_GENERIC_CSUM */
277 282
278#endif /* _ASM_CHECKSUM_H */ 283#endif /* _ASM_CHECKSUM_H */
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 28b1edf19501..d0a2a68ca600 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
31 " sc %2, %1 \n" 31 " sc %2, %1 \n"
32 " beqzl %2, 1b \n" 32 " beqzl %2, 1b \n"
33 " .set mips0 \n" 33 " .set mips0 \n"
34 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 34 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
35 : GCC_OFF12_ASM() (*m), "Jr" (val) 35 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
36 : "memory"); 36 : "memory");
37 } else if (kernel_uses_llsc) { 37 } else if (kernel_uses_llsc) {
38 unsigned long dummy; 38 unsigned long dummy;
39 39
40 do { 40 do {
41 __asm__ __volatile__( 41 __asm__ __volatile__(
42 " .set arch=r4000 \n" 42 " .set "MIPS_ISA_ARCH_LEVEL" \n"
43 " ll %0, %3 # xchg_u32 \n" 43 " ll %0, %3 # xchg_u32 \n"
44 " .set mips0 \n" 44 " .set mips0 \n"
45 " move %2, %z4 \n" 45 " move %2, %z4 \n"
46 " .set arch=r4000 \n" 46 " .set "MIPS_ISA_ARCH_LEVEL" \n"
47 " sc %2, %1 \n" 47 " sc %2, %1 \n"
48 " .set mips0 \n" 48 " .set mips0 \n"
49 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 49 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
50 "=&r" (dummy) 50 "=&r" (dummy)
51 : GCC_OFF12_ASM() (*m), "Jr" (val) 51 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
52 : "memory"); 52 : "memory");
53 } while (unlikely(!dummy)); 53 } while (unlikely(!dummy));
54 } else { 54 } else {
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
82 " scd %2, %1 \n" 82 " scd %2, %1 \n"
83 " beqzl %2, 1b \n" 83 " beqzl %2, 1b \n"
84 " .set mips0 \n" 84 " .set mips0 \n"
85 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 85 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
86 : GCC_OFF12_ASM() (*m), "Jr" (val) 86 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
87 : "memory"); 87 : "memory");
88 } else if (kernel_uses_llsc) { 88 } else if (kernel_uses_llsc) {
89 unsigned long dummy; 89 unsigned long dummy;
90 90
91 do { 91 do {
92 __asm__ __volatile__( 92 __asm__ __volatile__(
93 " .set arch=r4000 \n" 93 " .set "MIPS_ISA_ARCH_LEVEL" \n"
94 " lld %0, %3 # xchg_u64 \n" 94 " lld %0, %3 # xchg_u64 \n"
95 " move %2, %z4 \n" 95 " move %2, %z4 \n"
96 " scd %2, %1 \n" 96 " scd %2, %1 \n"
97 " .set mips0 \n" 97 " .set mips0 \n"
98 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 98 : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
99 "=&r" (dummy) 99 "=&r" (dummy)
100 : GCC_OFF12_ASM() (*m), "Jr" (val) 100 : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
101 : "memory"); 101 : "memory");
102 } while (unlikely(!dummy)); 102 } while (unlikely(!dummy));
103 } else { 103 } else {
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
158 " beqzl $1, 1b \n" \ 158 " beqzl $1, 1b \n" \
159 "2: \n" \ 159 "2: \n" \
160 " .set pop \n" \ 160 " .set pop \n" \
161 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 161 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
162 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 162 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
163 : "memory"); \ 163 : "memory"); \
164 } else if (kernel_uses_llsc) { \ 164 } else if (kernel_uses_llsc) { \
165 __asm__ __volatile__( \ 165 __asm__ __volatile__( \
166 " .set push \n" \ 166 " .set push \n" \
167 " .set noat \n" \ 167 " .set noat \n" \
168 " .set arch=r4000 \n" \ 168 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
169 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 169 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
170 " bne %0, %z3, 2f \n" \ 170 " bne %0, %z3, 2f \n" \
171 " .set mips0 \n" \ 171 " .set mips0 \n" \
172 " move $1, %z4 \n" \ 172 " move $1, %z4 \n" \
173 " .set arch=r4000 \n" \ 173 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
174 " " st " $1, %1 \n" \ 174 " " st " $1, %1 \n" \
175 " beqz $1, 1b \n" \ 175 " beqz $1, 1b \n" \
176 " .set pop \n" \ 176 " .set pop \n" \
177 "2: \n" \ 177 "2: \n" \
178 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 178 : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
179 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 179 : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
180 : "memory"); \ 180 : "memory"); \
181 } else { \ 181 } else { \
182 unsigned long __flags; \ 182 unsigned long __flags; \
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index c73815e0123a..e081a265f422 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -16,12 +16,30 @@
16#define GCC_REG_ACCUM "accum" 16#define GCC_REG_ACCUM "accum"
17#endif 17#endif
18 18
19#ifdef CONFIG_CPU_MIPSR6
20/* All MIPS R6 toolchains support the ZC constrain */
21#define GCC_OFF_SMALL_ASM() "ZC"
22#else
19#ifndef CONFIG_CPU_MICROMIPS 23#ifndef CONFIG_CPU_MICROMIPS
20#define GCC_OFF12_ASM() "R" 24#define GCC_OFF_SMALL_ASM() "R"
21#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) 25#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
22#define GCC_OFF12_ASM() "ZC" 26#define GCC_OFF_SMALL_ASM() "ZC"
23#else 27#else
24#error "microMIPS compilation unsupported with GCC older than 4.9" 28#error "microMIPS compilation unsupported with GCC older than 4.9"
25#endif 29#endif /* CONFIG_CPU_MICROMIPS */
30#endif /* CONFIG_CPU_MIPSR6 */
31
32#ifdef CONFIG_CPU_MIPSR6
33#define MIPS_ISA_LEVEL "mips64r6"
34#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
35#define MIPS_ISA_LEVEL_RAW mips64r6
36#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
37#else
38/* MIPS64 is a superset of MIPS32 */
39#define MIPS_ISA_LEVEL "mips64r2"
40#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
41#define MIPS_ISA_LEVEL_RAW mips64r2
42#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
43#endif /* CONFIG_CPU_MIPSR6 */
26 44
27#endif /* _ASM_COMPILER_H */ 45#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 2897cfafcaf0..0d8208de9a3f 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -38,6 +38,9 @@
38#ifndef cpu_has_maar 38#ifndef cpu_has_maar
39#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) 39#define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR)
40#endif 40#endif
41#ifndef cpu_has_rw_llb
42#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB)
43#endif
41 44
42/* 45/*
43 * For the moment we don't consider R6000 and R8000 so we can assume that 46 * For the moment we don't consider R6000 and R8000 so we can assume that
@@ -171,6 +174,9 @@
171#endif 174#endif
172#endif 175#endif
173 176
177#ifndef cpu_has_mips_1
178# define cpu_has_mips_1 (!cpu_has_mips_r6)
179#endif
174#ifndef cpu_has_mips_2 180#ifndef cpu_has_mips_2
175# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) 181# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
176#endif 182#endif
@@ -189,12 +195,18 @@
189#ifndef cpu_has_mips32r2 195#ifndef cpu_has_mips32r2
190# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) 196# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
191#endif 197#endif
198#ifndef cpu_has_mips32r6
199# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
200#endif
192#ifndef cpu_has_mips64r1 201#ifndef cpu_has_mips64r1
193# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) 202# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
194#endif 203#endif
195#ifndef cpu_has_mips64r2 204#ifndef cpu_has_mips64r2
196# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) 205# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
197#endif 206#endif
207#ifndef cpu_has_mips64r6
208# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
209#endif
198 210
199/* 211/*
200 * Shortcuts ... 212 * Shortcuts ...
@@ -208,17 +220,23 @@
208#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) 220#define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r)
209#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) 221#define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r)
210 222
211#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) 223#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \
224 cpu_has_mips_r6)
212 225
213#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) 226#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
214#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) 227#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
215#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) 228#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
216#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) 229#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
230#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6)
217#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ 231#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
218 cpu_has_mips64r1 | cpu_has_mips64r2) 232 cpu_has_mips32r6 | cpu_has_mips64r1 | \
233 cpu_has_mips64r2 | cpu_has_mips64r6)
234
235/* MIPSR2 and MIPSR6 have a lot of similarities */
236#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6)
219 237
220#ifndef cpu_has_mips_r2_exec_hazard 238#ifndef cpu_has_mips_r2_exec_hazard
221#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 239#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
222#endif 240#endif
223 241
224/* 242/*
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index b4e2bd87df50..8245875f8b33 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
54 case CPU_M5150: 54 case CPU_M5150:
55#endif 55#endif
56 56
57#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
58 defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
59 defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
60 defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
61 case CPU_QEMU_GENERIC:
62#endif
63
57#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 64#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
58 case CPU_5KC: 65 case CPU_5KC:
59 case CPU_5KE: 66 case CPU_5KE:
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 33866fce4d63..15687234d70a 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -93,6 +93,7 @@
93 * These are the PRID's for when 23:16 == PRID_COMP_MIPS 93 * These are the PRID's for when 23:16 == PRID_COMP_MIPS
94 */ 94 */
95 95
96#define PRID_IMP_QEMU_GENERIC 0x0000
96#define PRID_IMP_4KC 0x8000 97#define PRID_IMP_4KC 0x8000
97#define PRID_IMP_5KC 0x8100 98#define PRID_IMP_5KC 0x8100
98#define PRID_IMP_20KC 0x8200 99#define PRID_IMP_20KC 0x8200
@@ -312,6 +313,8 @@ enum cpu_type_enum {
312 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, 313 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
313 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, 314 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
314 315
316 CPU_QEMU_GENERIC,
317
315 CPU_LAST 318 CPU_LAST
316}; 319};
317 320
@@ -329,11 +332,14 @@ enum cpu_type_enum {
329#define MIPS_CPU_ISA_M32R2 0x00000020 332#define MIPS_CPU_ISA_M32R2 0x00000020
330#define MIPS_CPU_ISA_M64R1 0x00000040 333#define MIPS_CPU_ISA_M64R1 0x00000040
331#define MIPS_CPU_ISA_M64R2 0x00000080 334#define MIPS_CPU_ISA_M64R2 0x00000080
335#define MIPS_CPU_ISA_M32R6 0x00000100
336#define MIPS_CPU_ISA_M64R6 0x00000200
332 337
333#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ 338#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
334 MIPS_CPU_ISA_M32R2) 339 MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
335#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ 340#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
336 MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) 341 MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
342 MIPS_CPU_ISA_M64R6)
337 343
338/* 344/*
339 * CPU Option encodings 345 * CPU Option encodings
@@ -370,6 +376,7 @@ enum cpu_type_enum {
370#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ 376#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
371#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 377#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
372#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ 378#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
379#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */
373 380
374/* 381/*
375 * CPU ASE encodings 382 * CPU ASE encodings
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index ae6fedcb0060..94105d3f58f4 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
26 " sc %0, %1 \n" 26 " sc %0, %1 \n"
27 " beqz %0, 1b \n" 27 " beqz %0, 1b \n"
28 " .set mips0 \n" 28 " .set mips0 \n"
29 : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) 29 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
30 : GCC_OFF12_ASM() (*virt_addr)); 30 : GCC_OFF_SMALL_ASM() (*virt_addr));
31 31
32 virt_addr++; 32 virt_addr++;
33 } 33 }
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index eb4d95de619c..535f196ffe02 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
417struct arch_elf_state { 417struct arch_elf_state {
418 int fp_abi; 418 int fp_abi;
419 int interp_fp_abi; 419 int interp_fp_abi;
420 int overall_abi; 420 int overall_fp_mode;
421}; 421};
422 422
423#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */
424
423#define INIT_ARCH_ELF_STATE { \ 425#define INIT_ARCH_ELF_STATE { \
424 .fp_abi = -1, \ 426 .fp_abi = MIPS_ABI_FP_UNKNOWN, \
425 .interp_fp_abi = -1, \ 427 .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \
426 .overall_abi = -1, \ 428 .overall_fp_mode = -1, \
427} 429}
428 430
429extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, 431extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 994d21939676..b96d9d327626 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
68 goto fr_common; 68 goto fr_common;
69 69
70 case FPU_64BIT: 70 case FPU_64BIT:
71#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) 71#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
72 || defined(CONFIG_64BIT))
72 /* we only have a 32-bit FPU */ 73 /* we only have a 32-bit FPU */
73 return SIGFPE; 74 return SIGFPE;
74#endif 75#endif
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index ef9987a61d88..1de190bdfb9c 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -45,19 +45,19 @@
45 " "__UA_ADDR "\t2b, 4b \n" \ 45 " "__UA_ADDR "\t2b, 4b \n" \
46 " .previous \n" \ 46 " .previous \n" \
47 : "=r" (ret), "=&r" (oldval), \ 47 : "=r" (ret), "=&r" (oldval), \
48 "=" GCC_OFF12_ASM() (*uaddr) \ 48 "=" GCC_OFF_SMALL_ASM() (*uaddr) \
49 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 49 : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
50 "i" (-EFAULT) \ 50 "i" (-EFAULT) \
51 : "memory"); \ 51 : "memory"); \
52 } else if (cpu_has_llsc) { \ 52 } else if (cpu_has_llsc) { \
53 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
54 " .set push \n" \ 54 " .set push \n" \
55 " .set noat \n" \ 55 " .set noat \n" \
56 " .set arch=r4000 \n" \ 56 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
57 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ 57 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
58 " .set mips0 \n" \ 58 " .set mips0 \n" \
59 " " insn " \n" \ 59 " " insn " \n" \
60 " .set arch=r4000 \n" \ 60 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
61 "2: "user_sc("$1", "%2")" \n" \ 61 "2: "user_sc("$1", "%2")" \n" \
62 " beqz $1, 1b \n" \ 62 " beqz $1, 1b \n" \
63 __WEAK_LLSC_MB \ 63 __WEAK_LLSC_MB \
@@ -74,8 +74,8 @@
74 " "__UA_ADDR "\t2b, 4b \n" \ 74 " "__UA_ADDR "\t2b, 4b \n" \
75 " .previous \n" \ 75 " .previous \n" \
76 : "=r" (ret), "=&r" (oldval), \ 76 : "=r" (ret), "=&r" (oldval), \
77 "=" GCC_OFF12_ASM() (*uaddr) \ 77 "=" GCC_OFF_SMALL_ASM() (*uaddr) \
78 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 78 : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
79 "i" (-EFAULT) \ 79 "i" (-EFAULT) \
80 : "memory"); \ 80 : "memory"); \
81 } else \ 81 } else \
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
174 " "__UA_ADDR "\t1b, 4b \n" 174 " "__UA_ADDR "\t1b, 4b \n"
175 " "__UA_ADDR "\t2b, 4b \n" 175 " "__UA_ADDR "\t2b, 4b \n"
176 " .previous \n" 176 " .previous \n"
177 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 177 : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
178 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 178 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
179 "i" (-EFAULT) 179 "i" (-EFAULT)
180 : "memory"); 180 : "memory");
181 } else if (cpu_has_llsc) { 181 } else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
183 "# futex_atomic_cmpxchg_inatomic \n" 183 "# futex_atomic_cmpxchg_inatomic \n"
184 " .set push \n" 184 " .set push \n"
185 " .set noat \n" 185 " .set noat \n"
186 " .set arch=r4000 \n" 186 " .set "MIPS_ISA_ARCH_LEVEL" \n"
187 "1: "user_ll("%1", "%3")" \n" 187 "1: "user_ll("%1", "%3")" \n"
188 " bne %1, %z4, 3f \n" 188 " bne %1, %z4, 3f \n"
189 " .set mips0 \n" 189 " .set mips0 \n"
190 " move $1, %z5 \n" 190 " move $1, %z5 \n"
191 " .set arch=r4000 \n" 191 " .set "MIPS_ISA_ARCH_LEVEL" \n"
192 "2: "user_sc("$1", "%2")" \n" 192 "2: "user_sc("$1", "%2")" \n"
193 " beqz $1, 1b \n" 193 " beqz $1, 1b \n"
194 __WEAK_LLSC_MB 194 __WEAK_LLSC_MB
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
203 " "__UA_ADDR "\t1b, 4b \n" 203 " "__UA_ADDR "\t1b, 4b \n"
204 " "__UA_ADDR "\t2b, 4b \n" 204 " "__UA_ADDR "\t2b, 4b \n"
205 " .previous \n" 205 " .previous \n"
206 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 206 : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
207 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 207 : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
208 "i" (-EFAULT) 208 "i" (-EFAULT)
209 : "memory"); 209 : "memory");
210 } else 210 } else
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index e3ee92d4dbe7..4087b47ad1cb 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -11,6 +11,7 @@
11#define _ASM_HAZARDS_H 11#define _ASM_HAZARDS_H
12 12
13#include <linux/stringify.h> 13#include <linux/stringify.h>
14#include <asm/compiler.h>
14 15
15#define ___ssnop \ 16#define ___ssnop \
16 sll $0, $0, 1 17 sll $0, $0, 1
@@ -21,7 +22,7 @@
21/* 22/*
22 * TLB hazards 23 * TLB hazards
23 */ 24 */
24#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 25#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
25 26
26/* 27/*
27 * MIPSR2 defines ehb for hazard avoidance 28 * MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@ do { \
58 unsigned long tmp; \ 59 unsigned long tmp; \
59 \ 60 \
60 __asm__ __volatile__( \ 61 __asm__ __volatile__( \
61 " .set mips64r2 \n" \ 62 " .set "MIPS_ISA_LEVEL" \n" \
62 " dla %0, 1f \n" \ 63 " dla %0, 1f \n" \
63 " jr.hb %0 \n" \ 64 " jr.hb %0 \n" \
64 " .set mips0 \n" \ 65 " .set mips0 \n" \
@@ -132,7 +133,7 @@ do { \
132 133
133#define instruction_hazard() \ 134#define instruction_hazard() \
134do { \ 135do { \
135 if (cpu_has_mips_r2) \ 136 if (cpu_has_mips_r2_r6) \
136 __instruction_hazard(); \ 137 __instruction_hazard(); \
137} while (0) 138} while (0)
138 139
@@ -240,7 +241,7 @@ do { \
240 241
241#define __disable_fpu_hazard 242#define __disable_fpu_hazard
242 243
243#elif defined(CONFIG_CPU_MIPSR2) 244#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
244 245
245#define __enable_fpu_hazard \ 246#define __enable_fpu_hazard \
246 ___ehb 247 ___ehb
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 0fa5fdcd1f01..d60cc68fa31e 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -15,9 +15,10 @@
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/stringify.h> 17#include <linux/stringify.h>
18#include <asm/compiler.h>
18#include <asm/hazards.h> 19#include <asm/hazards.h>
19 20
20#ifdef CONFIG_CPU_MIPSR2 21#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
21 22
22static inline void arch_local_irq_disable(void) 23static inline void arch_local_irq_disable(void)
23{ 24{
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void);
118unsigned long arch_local_irq_save(void); 119unsigned long arch_local_irq_save(void);
119void arch_local_irq_restore(unsigned long flags); 120void arch_local_irq_restore(unsigned long flags);
120void __arch_local_irq_restore(unsigned long flags); 121void __arch_local_irq_restore(unsigned long flags);
121#endif /* CONFIG_CPU_MIPSR2 */ 122#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
122 123
123static inline void arch_local_irq_enable(void) 124static inline void arch_local_irq_enable(void)
124{ 125{
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void)
126 " .set push \n" 127 " .set push \n"
127 " .set reorder \n" 128 " .set reorder \n"
128 " .set noat \n" 129 " .set noat \n"
129#if defined(CONFIG_CPU_MIPSR2) 130#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
130 " ei \n" 131 " ei \n"
131#else 132#else
132 " mfc0 $1,$12 \n" 133 " mfc0 $1,$12 \n"
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 46dfc3c1fd49..8feaed62a2ab 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -5,6 +5,7 @@
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <linux/atomic.h> 6#include <linux/atomic.h>
7#include <asm/cmpxchg.h> 7#include <asm/cmpxchg.h>
8#include <asm/compiler.h>
8#include <asm/war.h> 9#include <asm/war.h>
9 10
10typedef struct 11typedef struct
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l)
47 unsigned long temp; 48 unsigned long temp;
48 49
49 __asm__ __volatile__( 50 __asm__ __volatile__(
50 " .set arch=r4000 \n" 51 " .set "MIPS_ISA_ARCH_LEVEL" \n"
51 "1:" __LL "%1, %2 # local_add_return \n" 52 "1:" __LL "%1, %2 # local_add_return \n"
52 " addu %0, %1, %3 \n" 53 " addu %0, %1, %3 \n"
53 __SC "%0, %2 \n" 54 __SC "%0, %2 \n"
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
92 unsigned long temp; 93 unsigned long temp;
93 94
94 __asm__ __volatile__( 95 __asm__ __volatile__(
95 " .set arch=r4000 \n" 96 " .set "MIPS_ISA_ARCH_LEVEL" \n"
96 "1:" __LL "%1, %2 # local_sub_return \n" 97 "1:" __LL "%1, %2 # local_sub_return \n"
97 " subu %0, %1, %3 \n" 98 " subu %0, %1, %3 \n"
98 __SC "%0, %2 \n" 99 __SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2e54b4bff5cf..90dbe43c8d27 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
85 " "__beqz"%0, 1b \n" 85 " "__beqz"%0, 1b \n"
86 " nop \n" 86 " nop \n"
87 " .set pop \n" 87 " .set pop \n"
88 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 88 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
89 : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); 89 : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
90} 90}
91 91
92/* 92/*
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
106 " "__beqz"%0, 1b \n" 106 " "__beqz"%0, 1b \n"
107 " nop \n" 107 " nop \n"
108 " .set pop \n" 108 " .set pop \n"
109 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 109 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
110 : "ir" (mask), GCC_OFF12_ASM() (*addr)); 110 : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
111} 111}
112 112
113/* 113/*
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
127 " "__beqz"%0, 1b \n" 127 " "__beqz"%0, 1b \n"
128 " nop \n" 128 " nop \n"
129 " .set pop \n" 129 " .set pop \n"
130 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 130 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
131 : "ir" (~mask), GCC_OFF12_ASM() (*addr)); 131 : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
132} 132}
133 133
134/* 134/*
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
148 " "__beqz"%0, 1b \n" 148 " "__beqz"%0, 1b \n"
149 " nop \n" 149 " nop \n"
150 " .set pop \n" 150 " .set pop \n"
151 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 151 : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
152 : "ir" (mask), GCC_OFF12_ASM() (*addr)); 152 : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
153} 153}
154 154
155/* 155/*
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
220 " .set arch=r4000 \n" \ 220 " .set arch=r4000 \n" \
221 "1: ll %0, %1 #custom_read_reg32 \n" \ 221 "1: ll %0, %1 #custom_read_reg32 \n" \
222 " .set pop \n" \ 222 " .set pop \n" \
223 : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 223 : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \
224 : GCC_OFF12_ASM() (*address)) 224 : GCC_OFF_SMALL_ASM() (*address))
225 225
226#define custom_write_reg32(address, tmp) \ 226#define custom_write_reg32(address, tmp) \
227 __asm__ __volatile__( \ 227 __asm__ __volatile__( \
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
231 " "__beqz"%0, 1b \n" \ 231 " "__beqz"%0, 1b \n" \
232 " nop \n" \ 232 " nop \n" \
233 " .set pop \n" \ 233 " .set pop \n" \
234 : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 234 : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \
235 : "0" (tmp), GCC_OFF12_ASM() (*address)) 235 : "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
236 236
237#endif /* __ASM_REGOPS_H__ */ 237#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644
index 000000000000..60570f2c3ba2
--- /dev/null
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -0,0 +1,96 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Markos Chandras <markos.chandras@imgtec.com>
8 */
9
10#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
11#define __ASM_MIPS_R2_TO_R6_EMUL_H
12
13struct mips_r2_emulator_stats {
14 u64 movs;
15 u64 hilo;
16 u64 muls;
17 u64 divs;
18 u64 dsps;
19 u64 bops;
20 u64 traps;
21 u64 fpus;
22 u64 loads;
23 u64 stores;
24 u64 llsc;
25 u64 dsemul;
26};
27
28struct mips_r2br_emulator_stats {
29 u64 jrs;
30 u64 bltzl;
31 u64 bgezl;
32 u64 bltzll;
33 u64 bgezll;
34 u64 bltzall;
35 u64 bgezall;
36 u64 bltzal;
37 u64 bgezal;
38 u64 beql;
39 u64 bnel;
40 u64 blezl;
41 u64 bgtzl;
42};
43
44#ifdef CONFIG_DEBUG_FS
45
46#define MIPS_R2_STATS(M) \
47do { \
48 u32 nir; \
49 int err; \
50 \
51 preempt_disable(); \
52 __this_cpu_inc(mipsr2emustats.M); \
53 err = __get_user(nir, (u32 __user *)regs->cp0_epc); \
54 if (!err) { \
55 if (nir == BREAK_MATH) \
56 __this_cpu_inc(mipsr2bdemustats.M); \
57 } \
58 preempt_enable(); \
59} while (0)
60
61#define MIPS_R2BR_STATS(M) \
62do { \
63 preempt_disable(); \
64 __this_cpu_inc(mipsr2bremustats.M); \
65 preempt_enable(); \
66} while (0)
67
68#else
69
70#define MIPS_R2_STATS(M) do { } while (0)
71#define MIPS_R2BR_STATS(M) do { } while (0)
72
73#endif /* CONFIG_DEBUG_FS */
74
75struct r2_decoder_table {
76 u32 mask;
77 u32 code;
78 int (*func)(struct pt_regs *regs, u32 inst);
79};
80
81
82extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
83 const char *str);
84
85#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
86static int mipsr2_emulation;
87static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
88#else
89/* MIPS R2 Emulator ON/OFF */
90extern int mipsr2_emulation;
91extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
92#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
93
94#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation)
95
96#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 5e4aef304b02..06346001ee4d 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -653,6 +653,7 @@
653#define MIPS_CONF5_NF (_ULCAST_(1) << 0) 653#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
654#define MIPS_CONF5_UFR (_ULCAST_(1) << 2) 654#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
655#define MIPS_CONF5_MRP (_ULCAST_(1) << 3) 655#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
656#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
656#define MIPS_CONF5_MVH (_ULCAST_(1) << 5) 657#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
657#define MIPS_CONF5_FRE (_ULCAST_(1) << 8) 658#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
658#define MIPS_CONF5_UFE (_ULCAST_(1) << 9) 659#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@ do { \
1127#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) 1128#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val)
1128#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) 1129#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val)
1129 1130
1131#define read_c0_lladdr() __read_ulong_c0_register($17, 0)
1132#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val)
1130#define read_c0_maar() __read_ulong_c0_register($17, 1) 1133#define read_c0_maar() __read_ulong_c0_register($17, 1)
1131#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) 1134#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val)
1132#define read_c0_maari() __read_32bit_c0_register($17, 2) 1135#define read_c0_maari() __read_32bit_c0_register($17, 2)
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 800fe578dc99..0aaf9a01ea50 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr)
88#define MODULE_PROC_FAMILY "MIPS32_R1 " 88#define MODULE_PROC_FAMILY "MIPS32_R1 "
89#elif defined CONFIG_CPU_MIPS32_R2 89#elif defined CONFIG_CPU_MIPS32_R2
90#define MODULE_PROC_FAMILY "MIPS32_R2 " 90#define MODULE_PROC_FAMILY "MIPS32_R2 "
91#elif defined CONFIG_CPU_MIPS32_R6
92#define MODULE_PROC_FAMILY "MIPS32_R6 "
91#elif defined CONFIG_CPU_MIPS64_R1 93#elif defined CONFIG_CPU_MIPS64_R1
92#define MODULE_PROC_FAMILY "MIPS64_R1 " 94#define MODULE_PROC_FAMILY "MIPS64_R1 "
93#elif defined CONFIG_CPU_MIPS64_R2 95#elif defined CONFIG_CPU_MIPS64_R2
94#define MODULE_PROC_FAMILY "MIPS64_R2 " 96#define MODULE_PROC_FAMILY "MIPS64_R2 "
97#elif defined CONFIG_CPU_MIPS64_R6
98#define MODULE_PROC_FAMILY "MIPS64_R6 "
95#elif defined CONFIG_CPU_R3000 99#elif defined CONFIG_CPU_R3000
96#define MODULE_PROC_FAMILY "R3000 " 100#define MODULE_PROC_FAMILY "R3000 "
97#elif defined CONFIG_CPU_TX39XX 101#elif defined CONFIG_CPU_TX39XX
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 75739c83f07e..8d05d9069823 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
275 " lbu %[ticket], %[now_serving]\n" 275 " lbu %[ticket], %[now_serving]\n"
276 "4:\n" 276 "4:\n"
277 ".set pop\n" : 277 ".set pop\n" :
278 [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 278 [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
280 [my_ticket] "=r"(my_ticket) 280 [my_ticket] "=r"(my_ticket)
281 ); 281 );
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index e293a8d89a6d..1b22d2da88a1 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -14,6 +14,7 @@
14 14
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/cacheops.h> 16#include <asm/cacheops.h>
17#include <asm/compiler.h>
17#include <asm/cpu-features.h> 18#include <asm/cpu-features.h>
18#include <asm/cpu-type.h> 19#include <asm/cpu-type.h>
19#include <asm/mipsmtregs.h> 20#include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void);
39 __asm__ __volatile__( \ 40 __asm__ __volatile__( \
40 " .set push \n" \ 41 " .set push \n" \
41 " .set noreorder \n" \ 42 " .set noreorder \n" \
42 " .set arch=r4000 \n" \ 43 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
43 " cache %0, %1 \n" \ 44 " cache %0, %1 \n" \
44 " .set pop \n" \ 45 " .set pop \n" \
45 : \ 46 : \
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
147 __asm__ __volatile__( \ 148 __asm__ __volatile__( \
148 " .set push \n" \ 149 " .set push \n" \
149 " .set noreorder \n" \ 150 " .set noreorder \n" \
150 " .set arch=r4000 \n" \ 151 " .set "MIPS_ISA_ARCH_LEVEL" \n" \
151 "1: cache %0, (%1) \n" \ 152 "1: cache %0, (%1) \n" \
152 "2: .set pop \n" \ 153 "2: .set pop \n" \
153 " .section __ex_table,\"a\" \n" \ 154 " .section __ex_table,\"a\" \n" \
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
218 cache_op(Page_Invalidate_T, addr); 219 cache_op(Page_Invalidate_T, addr);
219} 220}
220 221
222#ifndef CONFIG_CPU_MIPSR6
221#define cache16_unroll32(base,op) \ 223#define cache16_unroll32(base,op) \
222 __asm__ __volatile__( \ 224 __asm__ __volatile__( \
223 " .set push \n" \ 225 " .set push \n" \
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr)
322 : "r" (base), \ 324 : "r" (base), \
323 "i" (op)); 325 "i" (op));
324 326
327#else
328/*
329 * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
330 * This means we now need to increment the base register before we flush
331 * more cache lines
332 */
333#define cache16_unroll32(base,op) \
334 __asm__ __volatile__( \
335 " .set push\n" \
336 " .set noreorder\n" \
337 " .set mips64r6\n" \
338 " .set noat\n" \
339 " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \
340 " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \
341 " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \
342 " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \
343 " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \
344 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \
345 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \
346 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \
347 " addiu $1, $0, 0x100 \n" \
348 " cache %1, 0x000($1); cache %1, 0x010($1)\n" \
349 " cache %1, 0x020($1); cache %1, 0x030($1)\n" \
350 " cache %1, 0x040($1); cache %1, 0x050($1)\n" \
351 " cache %1, 0x060($1); cache %1, 0x070($1)\n" \
352 " cache %1, 0x080($1); cache %1, 0x090($1)\n" \
353 " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \
354 " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \
355 " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \
356 " .set pop\n" \
357 : \
358 : "r" (base), \
359 "i" (op));
360
361#define cache32_unroll32(base,op) \
362 __asm__ __volatile__( \
363 " .set push\n" \
364 " .set noreorder\n" \
365 " .set mips64r6\n" \
366 " .set noat\n" \
367 " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \
368 " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \
369 " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \
370 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \
371 " addiu $1, %0, 0x100\n" \
372 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
373 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
374 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
375 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
376 " addiu $1, $1, 0x100\n" \
377 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
378 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
379 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
380 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
381 " addiu $1, $1, 0x100\n" \
382 " cache %1, 0x000($1); cache %1, 0x020($1)\n" \
383 " cache %1, 0x040($1); cache %1, 0x060($1)\n" \
384 " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \
385 " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \
386 " .set pop\n" \
387 : \
388 : "r" (base), \
389 "i" (op));
390
391#define cache64_unroll32(base,op) \
392 __asm__ __volatile__( \
393 " .set push\n" \
394 " .set noreorder\n" \
395 " .set mips64r6\n" \
396 " .set noat\n" \
397 " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \
398 " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \
399 " addiu $1, %0, 0x100\n" \
400 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
401 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
402 " addiu $1, %0, 0x100\n" \
403 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
404 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
405 " addiu $1, %0, 0x100\n" \
406 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
407 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
408 " addiu $1, %0, 0x100\n" \
409 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
410 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
411 " addiu $1, %0, 0x100\n" \
412 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
413 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
414 " addiu $1, %0, 0x100\n" \
415 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
416 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
417 " addiu $1, %0, 0x100\n" \
418 " cache %1, 0x000($1); cache %1, 0x040($1)\n" \
419 " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \
420 " .set pop\n" \
421 : \
422 : "r" (base), \
423 "i" (op));
424
425#define cache128_unroll32(base,op) \
426 __asm__ __volatile__( \
427 " .set push\n" \
428 " .set noreorder\n" \
429 " .set mips64r6\n" \
430 " .set noat\n" \
431 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
432 " addiu $1, %0, 0x100\n" \
433 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
434 " addiu $1, %0, 0x100\n" \
435 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
436 " addiu $1, %0, 0x100\n" \
437 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
438 " addiu $1, %0, 0x100\n" \
439 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
440 " addiu $1, %0, 0x100\n" \
441 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
442 " addiu $1, %0, 0x100\n" \
443 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
444 " addiu $1, %0, 0x100\n" \
445 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
446 " addiu $1, %0, 0x100\n" \
447 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
448 " addiu $1, %0, 0x100\n" \
449 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
450 " addiu $1, %0, 0x100\n" \
451 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
452 " addiu $1, %0, 0x100\n" \
453 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
454 " addiu $1, %0, 0x100\n" \
455 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
456 " addiu $1, %0, 0x100\n" \
457 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
458 " addiu $1, %0, 0x100\n" \
459 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
460 " addiu $1, %0, 0x100\n" \
461 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
462 " addiu $1, %0, 0x100\n" \
463 " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \
464 " addiu $1, %0, 0x100\n" \
465 " .set pop\n" \
466 : \
467 : "r" (base), \
468 "i" (op));
469#endif /* CONFIG_CPU_MIPSR6 */
470
325/* 471/*
326 * Perform the cache operation specified by op using a user mode virtual 472 * Perform the cache operation specified by op using a user mode virtual
327 * address while in kernel mode. 473 * address while in kernel mode.
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index c6d06d383ef9..b4548690ade9 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
89 " subu %[ticket], %[ticket], 1 \n" 89 " subu %[ticket], %[ticket], 1 \n"
90 " .previous \n" 90 " .previous \n"
91 " .set pop \n" 91 " .set pop \n"
92 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 92 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
93 [serving_now_ptr] "+m" (lock->h.serving_now), 93 [serving_now_ptr] "+m" (lock->h.serving_now),
94 [ticket] "=&r" (tmp), 94 [ticket] "=&r" (tmp),
95 [my_ticket] "=&r" (my_ticket) 95 [my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
122 " subu %[ticket], %[ticket], 1 \n" 122 " subu %[ticket], %[ticket], 1 \n"
123 " .previous \n" 123 " .previous \n"
124 " .set pop \n" 124 " .set pop \n"
125 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 125 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
126 [serving_now_ptr] "+m" (lock->h.serving_now), 126 [serving_now_ptr] "+m" (lock->h.serving_now),
127 [ticket] "=&r" (tmp), 127 [ticket] "=&r" (tmp),
128 [my_ticket] "=&r" (my_ticket) 128 [my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
164 " li %[ticket], 0 \n" 164 " li %[ticket], 0 \n"
165 " .previous \n" 165 " .previous \n"
166 " .set pop \n" 166 " .set pop \n"
167 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 167 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
168 [ticket] "=&r" (tmp), 168 [ticket] "=&r" (tmp),
169 [my_ticket] "=&r" (tmp2), 169 [my_ticket] "=&r" (tmp2),
170 [now_serving] "=&r" (tmp3) 170 [now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
188 " li %[ticket], 0 \n" 188 " li %[ticket], 0 \n"
189 " .previous \n" 189 " .previous \n"
190 " .set pop \n" 190 " .set pop \n"
191 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 191 : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
192 [ticket] "=&r" (tmp), 192 [ticket] "=&r" (tmp),
193 [my_ticket] "=&r" (tmp2), 193 [my_ticket] "=&r" (tmp2),
194 [now_serving] "=&r" (tmp3) 194 [now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
235 " beqzl %1, 1b \n" 235 " beqzl %1, 1b \n"
236 " nop \n" 236 " nop \n"
237 " .set reorder \n" 237 " .set reorder \n"
238 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 238 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
239 : GCC_OFF12_ASM() (rw->lock) 239 : GCC_OFF_SMALL_ASM() (rw->lock)
240 : "memory"); 240 : "memory");
241 } else { 241 } else {
242 do { 242 do {
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
245 " bltz %1, 1b \n" 245 " bltz %1, 1b \n"
246 " addu %1, 1 \n" 246 " addu %1, 1 \n"
247 "2: sc %1, %0 \n" 247 "2: sc %1, %0 \n"
248 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 248 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
249 : GCC_OFF12_ASM() (rw->lock) 249 : GCC_OFF_SMALL_ASM() (rw->lock)
250 : "memory"); 250 : "memory");
251 } while (unlikely(!tmp)); 251 } while (unlikely(!tmp));
252 } 252 }
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
254 smp_llsc_mb(); 254 smp_llsc_mb();
255} 255}
256 256
257/* Note the use of sub, not subu which will make the kernel die with an
258 overflow exception if we ever try to unlock an rwlock that is already
259 unlocked or is being held by a writer. */
260static inline void arch_read_unlock(arch_rwlock_t *rw) 257static inline void arch_read_unlock(arch_rwlock_t *rw)
261{ 258{
262 unsigned int tmp; 259 unsigned int tmp;
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
266 if (R10000_LLSC_WAR) { 263 if (R10000_LLSC_WAR) {
267 __asm__ __volatile__( 264 __asm__ __volatile__(
268 "1: ll %1, %2 # arch_read_unlock \n" 265 "1: ll %1, %2 # arch_read_unlock \n"
269 " sub %1, 1 \n" 266 " addiu %1, 1 \n"
270 " sc %1, %0 \n" 267 " sc %1, %0 \n"
271 " beqzl %1, 1b \n" 268 " beqzl %1, 1b \n"
272 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 269 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
273 : GCC_OFF12_ASM() (rw->lock) 270 : GCC_OFF_SMALL_ASM() (rw->lock)
274 : "memory"); 271 : "memory");
275 } else { 272 } else {
276 do { 273 do {
277 __asm__ __volatile__( 274 __asm__ __volatile__(
278 "1: ll %1, %2 # arch_read_unlock \n" 275 "1: ll %1, %2 # arch_read_unlock \n"
279 " sub %1, 1 \n" 276 " addiu %1, -1 \n"
280 " sc %1, %0 \n" 277 " sc %1, %0 \n"
281 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 278 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
282 : GCC_OFF12_ASM() (rw->lock) 279 : GCC_OFF_SMALL_ASM() (rw->lock)
283 : "memory"); 280 : "memory");
284 } while (unlikely(!tmp)); 281 } while (unlikely(!tmp));
285 } 282 }
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
299 " beqzl %1, 1b \n" 296 " beqzl %1, 1b \n"
300 " nop \n" 297 " nop \n"
301 " .set reorder \n" 298 " .set reorder \n"
302 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 299 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
303 : GCC_OFF12_ASM() (rw->lock) 300 : GCC_OFF_SMALL_ASM() (rw->lock)
304 : "memory"); 301 : "memory");
305 } else { 302 } else {
306 do { 303 do {
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
309 " bnez %1, 1b \n" 306 " bnez %1, 1b \n"
310 " lui %1, 0x8000 \n" 307 " lui %1, 0x8000 \n"
311 "2: sc %1, %0 \n" 308 "2: sc %1, %0 \n"
312 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 309 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
313 : GCC_OFF12_ASM() (rw->lock) 310 : GCC_OFF_SMALL_ASM() (rw->lock)
314 : "memory"); 311 : "memory");
315 } while (unlikely(!tmp)); 312 } while (unlikely(!tmp));
316 } 313 }
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
349 __WEAK_LLSC_MB 346 __WEAK_LLSC_MB
350 " li %2, 1 \n" 347 " li %2, 1 \n"
351 "2: \n" 348 "2: \n"
352 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 349 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
353 : GCC_OFF12_ASM() (rw->lock) 350 : GCC_OFF_SMALL_ASM() (rw->lock)
354 : "memory"); 351 : "memory");
355 } else { 352 } else {
356 __asm__ __volatile__( 353 __asm__ __volatile__(
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
366 __WEAK_LLSC_MB 363 __WEAK_LLSC_MB
367 " li %2, 1 \n" 364 " li %2, 1 \n"
368 "2: \n" 365 "2: \n"
369 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 366 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
370 : GCC_OFF12_ASM() (rw->lock) 367 : GCC_OFF_SMALL_ASM() (rw->lock)
371 : "memory"); 368 : "memory");
372 } 369 }
373 370
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
393 " li %2, 1 \n" 390 " li %2, 1 \n"
394 " .set reorder \n" 391 " .set reorder \n"
395 "2: \n" 392 "2: \n"
396 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 393 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
397 : GCC_OFF12_ASM() (rw->lock) 394 : GCC_OFF_SMALL_ASM() (rw->lock)
398 : "memory"); 395 : "memory");
399 } else { 396 } else {
400 do { 397 do {
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
406 " sc %1, %0 \n" 403 " sc %1, %0 \n"
407 " li %2, 1 \n" 404 " li %2, 1 \n"
408 "2: \n" 405 "2: \n"
409 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), 406 : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
410 "=&r" (ret) 407 "=&r" (ret)
411 : GCC_OFF12_ASM() (rw->lock) 408 : GCC_OFF_SMALL_ASM() (rw->lock)
412 : "memory"); 409 : "memory");
413 } while (unlikely(!tmp)); 410 } while (unlikely(!tmp));
414 411
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
index 0b89006e4907..0f90d88e464d 100644
--- a/arch/mips/include/asm/spram.h
+++ b/arch/mips/include/asm/spram.h
@@ -1,10 +1,10 @@
1#ifndef _MIPS_SPRAM_H 1#ifndef _MIPS_SPRAM_H
2#define _MIPS_SPRAM_H 2#define _MIPS_SPRAM_H
3 3
4#ifdef CONFIG_CPU_MIPSR2 4#if defined(CONFIG_MIPS_SPRAM)
5extern __init void spram_config(void); 5extern __init void spram_config(void);
6#else 6#else
7static inline void spram_config(void) { }; 7static inline void spram_config(void) { };
8#endif /* CONFIG_CPU_MIPSR2 */ 8#endif /* CONFIG_MIPS_SPRAM */
9 9
10#endif /* _MIPS_SPRAM_H */ 10#endif /* _MIPS_SPRAM_H */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index b188c797565c..28d6d9364bd1 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -40,7 +40,7 @@
40 LONG_S v1, PT_HI(sp) 40 LONG_S v1, PT_HI(sp)
41 mflhxu v1 41 mflhxu v1
42 LONG_S v1, PT_ACX(sp) 42 LONG_S v1, PT_ACX(sp)
43#else 43#elif !defined(CONFIG_CPU_MIPSR6)
44 mfhi v1 44 mfhi v1
45#endif 45#endif
46#ifdef CONFIG_32BIT 46#ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
50 LONG_S $10, PT_R10(sp) 50 LONG_S $10, PT_R10(sp)
51 LONG_S $11, PT_R11(sp) 51 LONG_S $11, PT_R11(sp)
52 LONG_S $12, PT_R12(sp) 52 LONG_S $12, PT_R12(sp)
53#ifndef CONFIG_CPU_HAS_SMARTMIPS 53#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
54 LONG_S v1, PT_HI(sp) 54 LONG_S v1, PT_HI(sp)
55 mflo v1 55 mflo v1
56#endif 56#endif
@@ -58,7 +58,7 @@
58 LONG_S $14, PT_R14(sp) 58 LONG_S $14, PT_R14(sp)
59 LONG_S $15, PT_R15(sp) 59 LONG_S $15, PT_R15(sp)
60 LONG_S $24, PT_R24(sp) 60 LONG_S $24, PT_R24(sp)
61#ifndef CONFIG_CPU_HAS_SMARTMIPS 61#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
62 LONG_S v1, PT_LO(sp) 62 LONG_S v1, PT_LO(sp)
63#endif 63#endif
64#ifdef CONFIG_CPU_CAVIUM_OCTEON 64#ifdef CONFIG_CPU_CAVIUM_OCTEON
@@ -226,7 +226,7 @@
226 mtlhx $24 226 mtlhx $24
227 LONG_L $24, PT_LO(sp) 227 LONG_L $24, PT_LO(sp)
228 mtlhx $24 228 mtlhx $24
229#else 229#elif !defined(CONFIG_CPU_MIPSR6)
230 LONG_L $24, PT_LO(sp) 230 LONG_L $24, PT_LO(sp)
231 mtlo $24 231 mtlo $24
232 LONG_L $24, PT_HI(sp) 232 LONG_L $24, PT_HI(sp)
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index b928b6f898cd..e92d6c4b5ed1 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -75,9 +75,12 @@ do { \
75#endif 75#endif
76 76
77#define __clear_software_ll_bit() \ 77#define __clear_software_ll_bit() \
78do { \ 78do { if (cpu_has_rw_llb) { \
79 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ 79 write_c0_lladdr(0); \
80 ll_bit = 0; \ 80 } else { \
81 if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
82 ll_bit = 0; \
83 } \
81} while (0) 84} while (0)
82 85
83#define switch_to(prev, next, last) \ 86#define switch_to(prev, next, last) \
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 99eea59604e9..fb68fd2714fb 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -28,7 +28,7 @@ struct thread_info {
28 unsigned long tp_value; /* thread pointer */ 28 unsigned long tp_value; /* thread pointer */
29 __u32 cpu; /* current CPU */ 29 __u32 cpu; /* current CPU */
30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 30 int preempt_count; /* 0 => preemptable, <0 => BUG */
31 31 int r2_emul_return; /* 1 => Returning from R2 emulator */
32 mm_segment_t addr_limit; /* 32 mm_segment_t addr_limit; /*
33 * thread address space limit: 33 * thread address space limit:
34 * 0x7fffffff for user-thead 34 * 0x7fffffff for user-thead
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 89c22433b1c6..fc0cf5ac0cf7 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -21,20 +21,20 @@
21enum major_op { 21enum major_op {
22 spec_op, bcond_op, j_op, jal_op, 22 spec_op, bcond_op, j_op, jal_op,
23 beq_op, bne_op, blez_op, bgtz_op, 23 beq_op, bne_op, blez_op, bgtz_op,
24 addi_op, addiu_op, slti_op, sltiu_op, 24 addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
25 andi_op, ori_op, xori_op, lui_op, 25 andi_op, ori_op, xori_op, lui_op,
26 cop0_op, cop1_op, cop2_op, cop1x_op, 26 cop0_op, cop1_op, cop2_op, cop1x_op,
27 beql_op, bnel_op, blezl_op, bgtzl_op, 27 beql_op, bnel_op, blezl_op, bgtzl_op,
28 daddi_op, daddiu_op, ldl_op, ldr_op, 28 daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
29 spec2_op, jalx_op, mdmx_op, spec3_op, 29 spec2_op, jalx_op, mdmx_op, spec3_op,
30 lb_op, lh_op, lwl_op, lw_op, 30 lb_op, lh_op, lwl_op, lw_op,
31 lbu_op, lhu_op, lwr_op, lwu_op, 31 lbu_op, lhu_op, lwr_op, lwu_op,
32 sb_op, sh_op, swl_op, sw_op, 32 sb_op, sh_op, swl_op, sw_op,
33 sdl_op, sdr_op, swr_op, cache_op, 33 sdl_op, sdr_op, swr_op, cache_op,
34 ll_op, lwc1_op, lwc2_op, pref_op, 34 ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
35 lld_op, ldc1_op, ldc2_op, ld_op, 35 lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
36 sc_op, swc1_op, swc2_op, major_3b_op, 36 sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
37 scd_op, sdc1_op, sdc2_op, sd_op 37 scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
38}; 38};
39 39
40/* 40/*
@@ -83,9 +83,12 @@ enum spec3_op {
83 swe_op = 0x1f, bshfl_op = 0x20, 83 swe_op = 0x1f, bshfl_op = 0x20,
84 swle_op = 0x21, swre_op = 0x22, 84 swle_op = 0x21, swre_op = 0x22,
85 prefe_op = 0x23, dbshfl_op = 0x24, 85 prefe_op = 0x23, dbshfl_op = 0x24,
86 lbue_op = 0x28, lhue_op = 0x29, 86 cache6_op = 0x25, sc6_op = 0x26,
87 lbe_op = 0x2c, lhe_op = 0x2d, 87 scd6_op = 0x27, lbue_op = 0x28,
88 lle_op = 0x2e, lwe_op = 0x2f, 88 lhue_op = 0x29, lbe_op = 0x2c,
89 lhe_op = 0x2d, lle_op = 0x2e,
90 lwe_op = 0x2f, pref6_op = 0x35,
91 ll6_op = 0x36, lld6_op = 0x37,
89 rdhwr_op = 0x3b 92 rdhwr_op = 0x3b
90}; 93};
91 94
@@ -112,7 +115,8 @@ enum cop_op {
112 mfhc_op = 0x03, mtc_op = 0x04, 115 mfhc_op = 0x03, mtc_op = 0x04,
113 dmtc_op = 0x05, ctc_op = 0x06, 116 dmtc_op = 0x05, ctc_op = 0x06,
114 mthc0_op = 0x06, mthc_op = 0x07, 117 mthc0_op = 0x06, mthc_op = 0x07,
115 bc_op = 0x08, cop_op = 0x10, 118 bc_op = 0x08, bc1eqz_op = 0x09,
119 bc1nez_op = 0x0d, cop_op = 0x10,
116 copm_op = 0x18 120 copm_op = 0x18
117}; 121};
118 122
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 92987d1bbe5f..d3d2ff2d76dc 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
52obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 52obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
53obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 53obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
54obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o 54obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
55obj-$(CONFIG_CPU_MIPSR2) += spram.o 55obj-$(CONFIG_MIPS_SPRAM) += spram.o
56 56
57obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 57obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
58obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o 58obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
@@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
90obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o 90obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
91obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 91obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
92obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 92obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
93obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o
93 94
94CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 95CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
95 96
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index b1d84bd4efb3..7b6c11aa1cae 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -97,6 +97,7 @@ void output_thread_info_defines(void)
97 OFFSET(TI_TP_VALUE, thread_info, tp_value); 97 OFFSET(TI_TP_VALUE, thread_info, tp_value);
98 OFFSET(TI_CPU, thread_info, cpu); 98 OFFSET(TI_CPU, thread_info, cpu);
99 OFFSET(TI_PRE_COUNT, thread_info, preempt_count); 99 OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
100 OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return);
100 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); 101 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
101 OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); 102 OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
102 OFFSET(TI_REGS, thread_info, regs); 103 OFFSET(TI_REGS, thread_info, regs);
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d7d99d601cc..c2e0f45ddf6c 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -16,6 +16,7 @@
16#include <asm/fpu.h> 16#include <asm/fpu.h>
17#include <asm/fpu_emulator.h> 17#include <asm/fpu_emulator.h>
18#include <asm/inst.h> 18#include <asm/inst.h>
19#include <asm/mips-r2-to-r6-emul.h>
19#include <asm/ptrace.h> 20#include <asm/ptrace.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21 22
@@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
399 * @returns: -EFAULT on error and forces SIGBUS, and on success 400 * @returns: -EFAULT on error and forces SIGBUS, and on success
400 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after 401 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
401 * evaluating the branch. 402 * evaluating the branch.
403 *
404 * MIPS R6 Compact branches and forbidden slots:
405 * Compact branches do not throw exceptions because they do
406 * not have delay slots. The forbidden slot instruction ($PC+4)
407 * is only executed if the branch was not taken. Otherwise the
408 * forbidden slot is skipped entirely. This means that the
409 * only possible reason to be here because of a MIPS R6 compact
410 * branch instruction is that the forbidden slot has thrown one.
411 * In that case the branch was not taken, so the EPC can be safely
412 * set to EPC + 8.
402 */ 413 */
403int __compute_return_epc_for_insn(struct pt_regs *regs, 414int __compute_return_epc_for_insn(struct pt_regs *regs,
404 union mips_instruction insn) 415 union mips_instruction insn)
405{ 416{
406 unsigned int bit, fcr31, dspcontrol; 417 unsigned int bit, fcr31, dspcontrol, reg;
407 long epc = regs->cp0_epc; 418 long epc = regs->cp0_epc;
408 int ret = 0; 419 int ret = 0;
409 420
@@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
417 regs->regs[insn.r_format.rd] = epc + 8; 428 regs->regs[insn.r_format.rd] = epc + 8;
418 /* Fall through */ 429 /* Fall through */
419 case jr_op: 430 case jr_op:
431 if (NO_R6EMU && insn.r_format.func == jr_op)
432 goto sigill_r6;
420 regs->cp0_epc = regs->regs[insn.r_format.rs]; 433 regs->cp0_epc = regs->regs[insn.r_format.rs];
421 break; 434 break;
422 } 435 }
@@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
429 */ 442 */
430 case bcond_op: 443 case bcond_op:
431 switch (insn.i_format.rt) { 444 switch (insn.i_format.rt) {
432 case bltz_op:
433 case bltzl_op: 445 case bltzl_op:
446 if (NO_R6EMU)
447 goto sigill_r6;
448 case bltz_op:
434 if ((long)regs->regs[insn.i_format.rs] < 0) { 449 if ((long)regs->regs[insn.i_format.rs] < 0) {
435 epc = epc + 4 + (insn.i_format.simmediate << 2); 450 epc = epc + 4 + (insn.i_format.simmediate << 2);
436 if (insn.i_format.rt == bltzl_op) 451 if (insn.i_format.rt == bltzl_op)
@@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
440 regs->cp0_epc = epc; 455 regs->cp0_epc = epc;
441 break; 456 break;
442 457
443 case bgez_op:
444 case bgezl_op: 458 case bgezl_op:
459 if (NO_R6EMU)
460 goto sigill_r6;
461 case bgez_op:
445 if ((long)regs->regs[insn.i_format.rs] >= 0) { 462 if ((long)regs->regs[insn.i_format.rs] >= 0) {
446 epc = epc + 4 + (insn.i_format.simmediate << 2); 463 epc = epc + 4 + (insn.i_format.simmediate << 2);
447 if (insn.i_format.rt == bgezl_op) 464 if (insn.i_format.rt == bgezl_op)
@@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
453 470
454 case bltzal_op: 471 case bltzal_op:
455 case bltzall_op: 472 case bltzall_op:
473 if (NO_R6EMU && (insn.i_format.rs ||
474 insn.i_format.rt == bltzall_op)) {
475 ret = -SIGILL;
476 break;
477 }
456 regs->regs[31] = epc + 8; 478 regs->regs[31] = epc + 8;
479 /*
480 * OK we are here either because we hit a NAL
481 * instruction or because we are emulating an
482 * old bltzal{,l} one. Lets figure out what the
483 * case really is.
484 */
485 if (!insn.i_format.rs) {
486 /*
487 * NAL or BLTZAL with rs == 0
488 * Doesn't matter if we are R6 or not. The
489 * result is the same
490 */
491 regs->cp0_epc += 4 +
492 (insn.i_format.simmediate << 2);
493 break;
494 }
495 /* Now do the real thing for non-R6 BLTZAL{,L} */
457 if ((long)regs->regs[insn.i_format.rs] < 0) { 496 if ((long)regs->regs[insn.i_format.rs] < 0) {
458 epc = epc + 4 + (insn.i_format.simmediate << 2); 497 epc = epc + 4 + (insn.i_format.simmediate << 2);
459 if (insn.i_format.rt == bltzall_op) 498 if (insn.i_format.rt == bltzall_op)
@@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
465 504
466 case bgezal_op: 505 case bgezal_op:
467 case bgezall_op: 506 case bgezall_op:
507 if (NO_R6EMU && (insn.i_format.rs ||
508 insn.i_format.rt == bgezall_op)) {
509 ret = -SIGILL;
510 break;
511 }
468 regs->regs[31] = epc + 8; 512 regs->regs[31] = epc + 8;
513 /*
514 * OK we are here either because we hit a BAL
515 * instruction or because we are emulating an
516 * old bgezal{,l} one. Lets figure out what the
517 * case really is.
518 */
519 if (!insn.i_format.rs) {
520 /*
521 * BAL or BGEZAL with rs == 0
522 * Doesn't matter if we are R6 or not. The
523 * result is the same
524 */
525 regs->cp0_epc += 4 +
526 (insn.i_format.simmediate << 2);
527 break;
528 }
529 /* Now do the real thing for non-R6 BGEZAL{,L} */
469 if ((long)regs->regs[insn.i_format.rs] >= 0) { 530 if ((long)regs->regs[insn.i_format.rs] >= 0) {
470 epc = epc + 4 + (insn.i_format.simmediate << 2); 531 epc = epc + 4 + (insn.i_format.simmediate << 2);
471 if (insn.i_format.rt == bgezall_op) 532 if (insn.i_format.rt == bgezall_op)
@@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
477 538
478 case bposge32_op: 539 case bposge32_op:
479 if (!cpu_has_dsp) 540 if (!cpu_has_dsp)
480 goto sigill; 541 goto sigill_dsp;
481 542
482 dspcontrol = rddsp(0x01); 543 dspcontrol = rddsp(0x01);
483 544
@@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
508 /* 569 /*
509 * These are conditional and in i_format. 570 * These are conditional and in i_format.
510 */ 571 */
511 case beq_op:
512 case beql_op: 572 case beql_op:
573 if (NO_R6EMU)
574 goto sigill_r6;
575 case beq_op:
513 if (regs->regs[insn.i_format.rs] == 576 if (regs->regs[insn.i_format.rs] ==
514 regs->regs[insn.i_format.rt]) { 577 regs->regs[insn.i_format.rt]) {
515 epc = epc + 4 + (insn.i_format.simmediate << 2); 578 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
520 regs->cp0_epc = epc; 583 regs->cp0_epc = epc;
521 break; 584 break;
522 585
523 case bne_op:
524 case bnel_op: 586 case bnel_op:
587 if (NO_R6EMU)
588 goto sigill_r6;
589 case bne_op:
525 if (regs->regs[insn.i_format.rs] != 590 if (regs->regs[insn.i_format.rs] !=
526 regs->regs[insn.i_format.rt]) { 591 regs->regs[insn.i_format.rt]) {
527 epc = epc + 4 + (insn.i_format.simmediate << 2); 592 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
532 regs->cp0_epc = epc; 597 regs->cp0_epc = epc;
533 break; 598 break;
534 599
535 case blez_op: /* not really i_format */ 600 case blezl_op: /* not really i_format */
536 case blezl_op: 601 if (NO_R6EMU)
602 goto sigill_r6;
603 case blez_op:
604 /*
605 * Compact branches for R6 for the
606 * blez and blezl opcodes.
607 * BLEZ | rs = 0 | rt != 0 == BLEZALC
608 * BLEZ | rs = rt != 0 == BGEZALC
609 * BLEZ | rs != 0 | rt != 0 == BGEUC
610 * BLEZL | rs = 0 | rt != 0 == BLEZC
611 * BLEZL | rs = rt != 0 == BGEZC
612 * BLEZL | rs != 0 | rt != 0 == BGEC
613 *
614 * For real BLEZ{,L}, rt is always 0.
615 */
616
617 if (cpu_has_mips_r6 && insn.i_format.rt) {
618 if ((insn.i_format.opcode == blez_op) &&
619 ((!insn.i_format.rs && insn.i_format.rt) ||
620 (insn.i_format.rs == insn.i_format.rt)))
621 regs->regs[31] = epc + 4;
622 regs->cp0_epc += 8;
623 break;
624 }
537 /* rt field assumed to be zero */ 625 /* rt field assumed to be zero */
538 if ((long)regs->regs[insn.i_format.rs] <= 0) { 626 if ((long)regs->regs[insn.i_format.rs] <= 0) {
539 epc = epc + 4 + (insn.i_format.simmediate << 2); 627 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
544 regs->cp0_epc = epc; 632 regs->cp0_epc = epc;
545 break; 633 break;
546 634
547 case bgtz_op:
548 case bgtzl_op: 635 case bgtzl_op:
636 if (NO_R6EMU)
637 goto sigill_r6;
638 case bgtz_op:
639 /*
640 * Compact branches for R6 for the
641 * bgtz and bgtzl opcodes.
642 * BGTZ | rs = 0 | rt != 0 == BGTZALC
643 * BGTZ | rs = rt != 0 == BLTZALC
644 * BGTZ | rs != 0 | rt != 0 == BLTUC
645 * BGTZL | rs = 0 | rt != 0 == BGTZC
646 * BGTZL | rs = rt != 0 == BLTZC
647 * BGTZL | rs != 0 | rt != 0 == BLTC
648 *
649 * *ZALC varint for BGTZ &&& rt != 0
650 * For real GTZ{,L}, rt is always 0.
651 */
652 if (cpu_has_mips_r6 && insn.i_format.rt) {
653 if ((insn.i_format.opcode == blez_op) &&
654 ((!insn.i_format.rs && insn.i_format.rt) ||
655 (insn.i_format.rs == insn.i_format.rt)))
656 regs->regs[31] = epc + 4;
657 regs->cp0_epc += 8;
658 break;
659 }
660
549 /* rt field assumed to be zero */ 661 /* rt field assumed to be zero */
550 if ((long)regs->regs[insn.i_format.rs] > 0) { 662 if ((long)regs->regs[insn.i_format.rs] > 0) {
551 epc = epc + 4 + (insn.i_format.simmediate << 2); 663 epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
560 * And now the FPA/cp1 branch instructions. 672 * And now the FPA/cp1 branch instructions.
561 */ 673 */
562 case cop1_op: 674 case cop1_op:
563 preempt_disable(); 675 if (cpu_has_mips_r6 &&
564 if (is_fpu_owner()) 676 ((insn.i_format.rs == bc1eqz_op) ||
565 fcr31 = read_32bit_cp1_register(CP1_STATUS); 677 (insn.i_format.rs == bc1nez_op))) {
566 else 678 if (!used_math()) { /* First time FPU user */
567 fcr31 = current->thread.fpu.fcr31; 679 ret = init_fpu();
568 preempt_enable(); 680 if (ret && NO_R6EMU) {
569 681 ret = -ret;
570 bit = (insn.i_format.rt >> 2); 682 break;
571 bit += (bit != 0); 683 }
572 bit += 23; 684 ret = 0;
573 switch (insn.i_format.rt & 3) { 685 set_used_math();
574 case 0: /* bc1f */ 686 }
575 case 2: /* bc1fl */ 687 lose_fpu(1); /* Save FPU state for the emulator. */
576 if (~fcr31 & (1 << bit)) { 688 reg = insn.i_format.rt;
577 epc = epc + 4 + (insn.i_format.simmediate << 2); 689 bit = 0;
578 if (insn.i_format.rt == 2) 690 switch (insn.i_format.rs) {
579 ret = BRANCH_LIKELY_TAKEN; 691 case bc1eqz_op:
580 } else 692 /* Test bit 0 */
693 if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
694 & 0x1)
695 bit = 1;
696 break;
697 case bc1nez_op:
698 /* Test bit 0 */
699 if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
700 & 0x1))
701 bit = 1;
702 break;
703 }
704 own_fpu(1);
705 if (bit)
706 epc = epc + 4 +
707 (insn.i_format.simmediate << 2);
708 else
581 epc += 8; 709 epc += 8;
582 regs->cp0_epc = epc; 710 regs->cp0_epc = epc;
711
583 break; 712 break;
713 } else {
584 714
585 case 1: /* bc1t */ 715 preempt_disable();
586 case 3: /* bc1tl */ 716 if (is_fpu_owner())
587 if (fcr31 & (1 << bit)) { 717 fcr31 = read_32bit_cp1_register(CP1_STATUS);
588 epc = epc + 4 + (insn.i_format.simmediate << 2); 718 else
589 if (insn.i_format.rt == 3) 719 fcr31 = current->thread.fpu.fcr31;
590 ret = BRANCH_LIKELY_TAKEN; 720 preempt_enable();
591 } else 721
592 epc += 8; 722 bit = (insn.i_format.rt >> 2);
593 regs->cp0_epc = epc; 723 bit += (bit != 0);
724 bit += 23;
725 switch (insn.i_format.rt & 3) {
726 case 0: /* bc1f */
727 case 2: /* bc1fl */
728 if (~fcr31 & (1 << bit)) {
729 epc = epc + 4 +
730 (insn.i_format.simmediate << 2);
731 if (insn.i_format.rt == 2)
732 ret = BRANCH_LIKELY_TAKEN;
733 } else
734 epc += 8;
735 regs->cp0_epc = epc;
736 break;
737
738 case 1: /* bc1t */
739 case 3: /* bc1tl */
740 if (fcr31 & (1 << bit)) {
741 epc = epc + 4 +
742 (insn.i_format.simmediate << 2);
743 if (insn.i_format.rt == 3)
744 ret = BRANCH_LIKELY_TAKEN;
745 } else
746 epc += 8;
747 regs->cp0_epc = epc;
748 break;
749 }
594 break; 750 break;
595 } 751 }
596 break;
597#ifdef CONFIG_CPU_CAVIUM_OCTEON 752#ifdef CONFIG_CPU_CAVIUM_OCTEON
598 case lwc2_op: /* This is bbit0 on Octeon */ 753 case lwc2_op: /* This is bbit0 on Octeon */
599 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) 754 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
626 epc += 8; 781 epc += 8;
627 regs->cp0_epc = epc; 782 regs->cp0_epc = epc;
628 break; 783 break;
784#else
785 case bc6_op:
786 /* Only valid for MIPS R6 */
787 if (!cpu_has_mips_r6) {
788 ret = -SIGILL;
789 break;
790 }
791 regs->cp0_epc += 8;
792 break;
793 case balc6_op:
794 if (!cpu_has_mips_r6) {
795 ret = -SIGILL;
796 break;
797 }
798 /* Compact branch: BALC */
799 regs->regs[31] = epc + 4;
800 epc += 4 + (insn.i_format.simmediate << 2);
801 regs->cp0_epc = epc;
802 break;
803 case beqzcjic_op:
804 if (!cpu_has_mips_r6) {
805 ret = -SIGILL;
806 break;
807 }
808 /* Compact branch: BEQZC || JIC */
809 regs->cp0_epc += 8;
810 break;
811 case bnezcjialc_op:
812 if (!cpu_has_mips_r6) {
813 ret = -SIGILL;
814 break;
815 }
816 /* Compact branch: BNEZC || JIALC */
817 if (insn.i_format.rs)
818 regs->regs[31] = epc + 4;
819 regs->cp0_epc += 8;
820 break;
629#endif 821#endif
822 case cbcond0_op:
823 case cbcond1_op:
824 /* Only valid for MIPS R6 */
825 if (!cpu_has_mips_r6) {
826 ret = -SIGILL;
827 break;
828 }
829 /*
830 * Compact branches:
831 * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
832 */
833 if (insn.i_format.rt && !insn.i_format.rs)
834 regs->regs[31] = epc + 4;
835 regs->cp0_epc += 8;
836 break;
630 } 837 }
631 838
632 return ret; 839 return ret;
633 840
634sigill: 841sigill_dsp:
635 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); 842 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
636 force_sig(SIGBUS, current); 843 force_sig(SIGBUS, current);
637 return -EFAULT; 844 return -EFAULT;
845sigill_r6:
846 pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
847 current->comm);
848 force_sig(SIGILL, current);
849 return -EFAULT;
638} 850}
639EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); 851EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
640 852
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 28bfdf2c59a5..82bd2b278a24 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -39,7 +39,7 @@ int cp0_timer_irq_installed;
39 39
40irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 40irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
41{ 41{
42 const int r2 = cpu_has_mips_r2; 42 const int r2 = cpu_has_mips_r2_r6;
43 struct clock_event_device *cd; 43 struct clock_event_device *cd;
44 int cpu = smp_processor_id(); 44 int cpu = smp_processor_id();
45 45
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 0384b05ab5a0..55b759a0019e 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -99,11 +99,11 @@ not_nmi:
99 xori t2, t1, 0x7 99 xori t2, t1, 0x7
100 beqz t2, 1f 100 beqz t2, 1f
101 li t3, 32 101 li t3, 32
102 addi t1, t1, 1 102 addiu t1, t1, 1
103 sllv t1, t3, t1 103 sllv t1, t3, t1
1041: /* At this point t1 == I-cache sets per way */ 1041: /* At this point t1 == I-cache sets per way */
105 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 105 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
106 addi t2, t2, 1 106 addiu t2, t2, 1
107 mul t1, t1, t0 107 mul t1, t1, t0
108 mul t1, t1, t2 108 mul t1, t1, t2
109 109
@@ -126,11 +126,11 @@ icache_done:
126 xori t2, t1, 0x7 126 xori t2, t1, 0x7
127 beqz t2, 1f 127 beqz t2, 1f
128 li t3, 32 128 li t3, 32
129 addi t1, t1, 1 129 addiu t1, t1, 1
130 sllv t1, t3, t1 130 sllv t1, t3, t1
1311: /* At this point t1 == D-cache sets per way */ 1311: /* At this point t1 == D-cache sets per way */
132 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 132 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
133 addi t2, t2, 1 133 addiu t2, t2, 1
134 mul t1, t1, t0 134 mul t1, t1, t0
135 mul t1, t1, t2 135 mul t1, t1, t2
136 136
@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
250 mfc0 t0, CP0_MVPCONF0 250 mfc0 t0, CP0_MVPCONF0
251 srl t0, t0, MVPCONF0_PVPE_SHIFT 251 srl t0, t0, MVPCONF0_PVPE_SHIFT
252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
253 addi t7, t0, 1 253 addiu t7, t0, 1
254 254
255 /* If there's only 1, we're done */ 255 /* If there's only 1, we're done */
256 beqz t0, 2f 256 beqz t0, 2f
@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
280 mttc0 t0, CP0_TCHALT 280 mttc0 t0, CP0_TCHALT
281 281
282 /* Next VPE */ 282 /* Next VPE */
283 addi t5, t5, 1 283 addiu t5, t5, 1
284 slt t0, t5, t7 284 slt t0, t5, t7
285 bnez t0, 1b 285 bnez t0, 1b
286 nop 286 nop
@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
317 mfc0 t1, CP0_MVPCONF0 317 mfc0 t1, CP0_MVPCONF0
318 srl t1, t1, MVPCONF0_PVPE_SHIFT 318 srl t1, t1, MVPCONF0_PVPE_SHIFT
319 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 319 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
320 addi t1, t1, 1 320 addiu t1, t1, 1
321 321
322 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 322 /* Calculate a mask for the VPE ID from EBase.CPUNum */
323 clz t1, t1 323 clz t1, t1
@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
424 424
425 /* Next VPE */ 425 /* Next VPE */
4262: srl t6, t6, 1 4262: srl t6, t6, 1
427 addi t5, t5, 1 427 addiu t5, t5, 1
428 bnez t6, 1b 428 bnez t6, 1b
429 nop 429 nop
430 430
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 2d80b5f1aeae..09f4034f239f 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); 244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
245} 245}
246 246
247int daddiu_bug = -1; 247int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
248 248
249static inline void check_daddiu(void) 249static inline void check_daddiu(void)
250{ 250{
@@ -314,11 +314,14 @@ static inline void check_daddiu(void)
314 314
315void __init check_bugs64_early(void) 315void __init check_bugs64_early(void)
316{ 316{
317 check_mult_sh(); 317 if (!config_enabled(CONFIG_CPU_MIPSR6)) {
318 check_daddiu(); 318 check_mult_sh();
319 check_daddiu();
320 }
319} 321}
320 322
321void __init check_bugs64(void) 323void __init check_bugs64(void)
322{ 324{
323 check_daddi(); 325 if (!config_enabled(CONFIG_CPU_MIPSR6))
326 check_daddi();
324} 327}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 228ae864c92e..81f0aedbba0f 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
237 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; 237 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
238 break; 238 break;
239 239
240 /* R6 incompatible with everything else */
241 case MIPS_CPU_ISA_M64R6:
242 c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
243 case MIPS_CPU_ISA_M32R6:
244 c->isa_level |= MIPS_CPU_ISA_M32R6;
245 /* Break here so we don't add incompatible ISAs */
246 break;
240 case MIPS_CPU_ISA_M32R2: 247 case MIPS_CPU_ISA_M32R2:
241 c->isa_level |= MIPS_CPU_ISA_M32R2; 248 c->isa_level |= MIPS_CPU_ISA_M32R2;
242 case MIPS_CPU_ISA_M32R1: 249 case MIPS_CPU_ISA_M32R1:
@@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
326 case 1: 333 case 1:
327 set_isa(c, MIPS_CPU_ISA_M32R2); 334 set_isa(c, MIPS_CPU_ISA_M32R2);
328 break; 335 break;
336 case 2:
337 set_isa(c, MIPS_CPU_ISA_M32R6);
338 break;
329 default: 339 default:
330 goto unknown; 340 goto unknown;
331 } 341 }
@@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
338 case 1: 348 case 1:
339 set_isa(c, MIPS_CPU_ISA_M64R2); 349 set_isa(c, MIPS_CPU_ISA_M64R2);
340 break; 350 break;
351 case 2:
352 set_isa(c, MIPS_CPU_ISA_M64R6);
353 break;
341 default: 354 default:
342 goto unknown; 355 goto unknown;
343 } 356 }
@@ -501,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
501 c->options |= MIPS_CPU_EVA; 514 c->options |= MIPS_CPU_EVA;
502 if (config5 & MIPS_CONF5_MRP) 515 if (config5 & MIPS_CONF5_MRP)
503 c->options |= MIPS_CPU_MAAR; 516 c->options |= MIPS_CPU_MAAR;
517 if (config5 & MIPS_CONF5_LLB)
518 c->options |= MIPS_CPU_RW_LLB;
504 519
505 return config5 & MIPS_CONF_M; 520 return config5 & MIPS_CONF_M;
506} 521}
@@ -543,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c)
543 } 558 }
544 559
545#ifndef CONFIG_MIPS_CPS 560#ifndef CONFIG_MIPS_CPS
546 if (cpu_has_mips_r2) { 561 if (cpu_has_mips_r2_r6) {
547 c->core = get_ebase_cpunum(); 562 c->core = get_ebase_cpunum();
548 if (cpu_has_mipsmt) 563 if (cpu_has_mipsmt)
549 c->core >>= fls(core_nvpes()) - 1; 564 c->core >>= fls(core_nvpes()) - 1;
@@ -898,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
898{ 913{
899 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 914 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
900 switch (c->processor_id & PRID_IMP_MASK) { 915 switch (c->processor_id & PRID_IMP_MASK) {
916 case PRID_IMP_QEMU_GENERIC:
917 c->writecombine = _CACHE_UNCACHED;
918 c->cputype = CPU_QEMU_GENERIC;
919 __cpu_name[cpu] = "MIPS GENERIC QEMU";
920 break;
901 case PRID_IMP_4KC: 921 case PRID_IMP_4KC:
902 c->cputype = CPU_4KC; 922 c->cputype = CPU_4KC;
903 c->writecombine = _CACHE_UNCACHED; 923 c->writecombine = _CACHE_UNCACHED;
@@ -1347,8 +1367,7 @@ void cpu_probe(void)
1347 if (c->options & MIPS_CPU_FPU) { 1367 if (c->options & MIPS_CPU_FPU) {
1348 c->fpu_id = cpu_get_fpu_id(); 1368 c->fpu_id = cpu_get_fpu_id();
1349 1369
1350 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1370 if (c->isa_level & cpu_has_mips_r) {
1351 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1352 if (c->fpu_id & MIPS_FPIR_3D) 1371 if (c->fpu_id & MIPS_FPIR_3D)
1353 c->ases |= MIPS_ASE_MIPS3D; 1372 c->ases |= MIPS_ASE_MIPS3D;
1354 if (c->fpu_id & MIPS_FPIR_FREP) 1373 if (c->fpu_id & MIPS_FPIR_FREP)
@@ -1356,7 +1375,7 @@ void cpu_probe(void)
1356 } 1375 }
1357 } 1376 }
1358 1377
1359 if (cpu_has_mips_r2) { 1378 if (cpu_has_mips_r2_r6) {
1360 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1379 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1361 /* R2 has Performance Counter Interrupt indicator */ 1380 /* R2 has Performance Counter Interrupt indicator */
1362 c->options |= MIPS_CPU_PCI; 1381 c->options |= MIPS_CPU_PCI;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c92b15df6893..d2c09f6475c5 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -11,29 +11,112 @@
11#include <linux/elf.h> 11#include <linux/elf.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13 13
14/* FPU modes */
14enum { 15enum {
15 FP_ERROR = -1, 16 FP_FRE,
16 FP_DOUBLE_64A = -2, 17 FP_FR0,
18 FP_FR1,
17}; 19};
18 20
21/**
22 * struct mode_req - ABI FPU mode requirements
23 * @single: The program being loaded needs an FPU but it will only issue
24 * single precision instructions meaning that it can execute in
25 * either FR0 or FR1.
26 * @soft: The soft(-float) requirement means that the program being
27 * loaded needs has no FPU dependency at all (i.e. it has no
28 * FPU instructions).
29 * @fr1: The program being loaded depends on FPU being in FR=1 mode.
30 * @frdefault: The program being loaded depends on the default FPU mode.
31 * That is FR0 for O32 and FR1 for N32/N64.
32 * @fre: The program being loaded depends on FPU with FRE=1. This mode is
33 * a bridge which uses FR=1 whilst still being able to maintain
34 * full compatibility with pre-existing code using the O32 FP32
35 * ABI.
36 *
37 * More information about the FP ABIs can be found here:
38 *
39 * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
40 *
41 */
42
43struct mode_req {
44 bool single;
45 bool soft;
46 bool fr1;
47 bool frdefault;
48 bool fre;
49};
50
51static const struct mode_req fpu_reqs[] = {
52 [MIPS_ABI_FP_ANY] = { true, true, true, true, true },
53 [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true },
54 [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false },
55 [MIPS_ABI_FP_SOFT] = { false, true, false, false, false },
56 [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
57 [MIPS_ABI_FP_XX] = { false, false, true, true, true },
58 [MIPS_ABI_FP_64] = { false, false, true, false, false },
59 [MIPS_ABI_FP_64A] = { false, false, true, false, true }
60};
61
62/*
63 * Mode requirements when .MIPS.abiflags is not present in the ELF.
64 * Not present means that everything is acceptable except FR1.
65 */
66static struct mode_req none_req = { true, true, false, true, true };
67
19int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 68int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
20 bool is_interp, struct arch_elf_state *state) 69 bool is_interp, struct arch_elf_state *state)
21{ 70{
22 struct elfhdr *ehdr = _ehdr; 71 struct elf32_hdr *ehdr32 = _ehdr;
23 struct elf_phdr *phdr = _phdr; 72 struct elf32_phdr *phdr32 = _phdr;
73 struct elf64_phdr *phdr64 = _phdr;
24 struct mips_elf_abiflags_v0 abiflags; 74 struct mips_elf_abiflags_v0 abiflags;
25 int ret; 75 int ret;
26 76
27 if (config_enabled(CONFIG_64BIT) && 77 /* Lets see if this is an O32 ELF */
28 (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) 78 if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
29 return 0; 79 /* FR = 1 for N32 */
30 if (phdr->p_type != PT_MIPS_ABIFLAGS) 80 if (ehdr32->e_flags & EF_MIPS_ABI2)
31 return 0; 81 state->overall_fp_mode = FP_FR1;
32 if (phdr->p_filesz < sizeof(abiflags)) 82 else
33 return -EINVAL; 83 /* Set a good default FPU mode for O32 */
84 state->overall_fp_mode = cpu_has_mips_r6 ?
85 FP_FRE : FP_FR0;
86
87 if (ehdr32->e_flags & EF_MIPS_FP64) {
88 /*
89 * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
90 * later if needed
91 */
92 if (is_interp)
93 state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
94 else
95 state->fp_abi = MIPS_ABI_FP_OLD_64;
96 }
97 if (phdr32->p_type != PT_MIPS_ABIFLAGS)
98 return 0;
99
100 if (phdr32->p_filesz < sizeof(abiflags))
101 return -EINVAL;
102
103 ret = kernel_read(elf, phdr32->p_offset,
104 (char *)&abiflags,
105 sizeof(abiflags));
106 } else {
107 /* FR=1 is really the only option for 64-bit */
108 state->overall_fp_mode = FP_FR1;
109
110 if (phdr64->p_type != PT_MIPS_ABIFLAGS)
111 return 0;
112 if (phdr64->p_filesz < sizeof(abiflags))
113 return -EINVAL;
114
115 ret = kernel_read(elf, phdr64->p_offset,
116 (char *)&abiflags,
117 sizeof(abiflags));
118 }
34 119
35 ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
36 sizeof(abiflags));
37 if (ret < 0) 120 if (ret < 0)
38 return ret; 121 return ret;
39 if (ret != sizeof(abiflags)) 122 if (ret != sizeof(abiflags))
@@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
48 return 0; 131 return 0;
49} 132}
50 133
51static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) 134static inline unsigned get_fp_abi(int in_abi)
52{ 135{
53 /* If the ABI requirement is provided, simply return that */ 136 /* If the ABI requirement is provided, simply return that */
54 if (in_abi != -1) 137 if (in_abi != MIPS_ABI_FP_UNKNOWN)
55 return in_abi; 138 return in_abi;
56 139
57 /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ 140 /* Unknown ABI */
58 if (ehdr->e_flags & EF_MIPS_FP64) 141 return MIPS_ABI_FP_UNKNOWN;
59 return MIPS_ABI_FP_64;
60
61 /* Default to MIPS_ABI_FP_DOUBLE */
62 return MIPS_ABI_FP_DOUBLE;
63} 142}
64 143
65int arch_check_elf(void *_ehdr, bool has_interpreter, 144int arch_check_elf(void *_ehdr, bool has_interpreter,
66 struct arch_elf_state *state) 145 struct arch_elf_state *state)
67{ 146{
68 struct elfhdr *ehdr = _ehdr; 147 struct elf32_hdr *ehdr = _ehdr;
69 unsigned fp_abi, interp_fp_abi, abi0, abi1; 148 struct mode_req prog_req, interp_req;
149 int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
70 150
71 /* Ignore non-O32 binaries */ 151 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
72 if (config_enabled(CONFIG_64BIT) &&
73 (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
74 return 0; 152 return 0;
75 153
76 fp_abi = get_fp_abi(ehdr, state->fp_abi); 154 fp_abi = get_fp_abi(state->fp_abi);
77 155
78 if (has_interpreter) { 156 if (has_interpreter) {
79 interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); 157 interp_fp_abi = get_fp_abi(state->interp_fp_abi);
80 158
81 abi0 = min(fp_abi, interp_fp_abi); 159 abi0 = min(fp_abi, interp_fp_abi);
82 abi1 = max(fp_abi, interp_fp_abi); 160 abi1 = max(fp_abi, interp_fp_abi);
@@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
84 abi0 = abi1 = fp_abi; 162 abi0 = abi1 = fp_abi;
85 } 163 }
86 164
87 state->overall_abi = FP_ERROR; 165 /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
88 166 max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
89 if (abi0 == abi1) { 167 (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
90 state->overall_abi = abi0; 168 MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
91 } else if (abi0 == MIPS_ABI_FP_ANY) {
92 state->overall_abi = abi1;
93 } else if (abi0 == MIPS_ABI_FP_DOUBLE) {
94 switch (abi1) {
95 case MIPS_ABI_FP_XX:
96 state->overall_abi = MIPS_ABI_FP_DOUBLE;
97 break;
98
99 case MIPS_ABI_FP_64A:
100 state->overall_abi = FP_DOUBLE_64A;
101 break;
102 }
103 } else if (abi0 == MIPS_ABI_FP_SINGLE ||
104 abi0 == MIPS_ABI_FP_SOFT) {
105 /* Cannot link with other ABIs */
106 } else if (abi0 == MIPS_ABI_FP_OLD_64) {
107 switch (abi1) {
108 case MIPS_ABI_FP_XX:
109 case MIPS_ABI_FP_64:
110 case MIPS_ABI_FP_64A:
111 state->overall_abi = MIPS_ABI_FP_64;
112 break;
113 }
114 } else if (abi0 == MIPS_ABI_FP_XX ||
115 abi0 == MIPS_ABI_FP_64 ||
116 abi0 == MIPS_ABI_FP_64A) {
117 state->overall_abi = MIPS_ABI_FP_64;
118 }
119 169
120 switch (state->overall_abi) { 170 if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
121 case MIPS_ABI_FP_64: 171 (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
122 case MIPS_ABI_FP_64A: 172 return -ELIBBAD;
123 case FP_DOUBLE_64A: 173
124 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 174 /* It's time to determine the FPU mode requirements */
125 return -ELIBBAD; 175 prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
126 break; 176 interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
127 177
128 case FP_ERROR: 178 /*
179 * Check whether the program's and interp's ABIs have a matching FPU
180 * mode requirement.
181 */
182 prog_req.single = interp_req.single && prog_req.single;
183 prog_req.soft = interp_req.soft && prog_req.soft;
184 prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
185 prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
186 prog_req.fre = interp_req.fre && prog_req.fre;
187
188 /*
189 * Determine the desired FPU mode
190 *
191 * Decision making:
192 *
193 * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
194 * means that we have a combination of program and interpreter
195 * that inherently require the hybrid FP mode.
196 * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
197 * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
198 * instructions so we don't care about the mode. We will simply use
199 * the one preferred by the hardware. In fpxx case, that ABI can
200 * handle both FR=1 and FR=0, so, again, we simply choose the one
201 * preferred by the hardware. Next, if we only use single-precision
202 * FPU instructions, and the default ABI FPU mode is not good
203 * (ie single + any ABI combination), we set again the FPU mode to the
204 * one is preferred by the hardware. Next, if we know that the code
205 * will only use single-precision instructions, shown by single being
206 * true but frdefault being false, then we again set the FPU mode to
207 * the one that is preferred by the hardware.
208 * - We want FP_FR1 if that's the only matching mode and the default one
209 * is not good.
210 * - Return with -ELIBADD if we can't find a matching FPU mode.
211 */
212 if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
213 state->overall_fp_mode = FP_FRE;
214 else if ((prog_req.fr1 && prog_req.frdefault) ||
215 (prog_req.single && !prog_req.frdefault))
216 /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
217 state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
218 cpu_has_mips_r2_r6) ?
219 FP_FR1 : FP_FR0;
220 else if (prog_req.fr1)
221 state->overall_fp_mode = FP_FR1;
222 else if (!prog_req.fre && !prog_req.frdefault &&
223 !prog_req.fr1 && !prog_req.single && !prog_req.soft)
129 return -ELIBBAD; 224 return -ELIBBAD;
130 }
131 225
132 return 0; 226 return 0;
133} 227}
134 228
135void mips_set_personality_fp(struct arch_elf_state *state) 229static inline void set_thread_fp_mode(int hybrid, int regs32)
136{ 230{
137 if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { 231 if (hybrid)
138 /* 232 set_thread_flag(TIF_HYBRID_FPREGS);
139 * Use hybrid FPRs for all code which can correctly execute 233 else
140 * with that mode.
141 */
142 switch (state->overall_abi) {
143 case MIPS_ABI_FP_DOUBLE:
144 case MIPS_ABI_FP_SINGLE:
145 case MIPS_ABI_FP_SOFT:
146 case MIPS_ABI_FP_XX:
147 case MIPS_ABI_FP_ANY:
148 /* FR=1, FRE=1 */
149 clear_thread_flag(TIF_32BIT_FPREGS);
150 set_thread_flag(TIF_HYBRID_FPREGS);
151 return;
152 }
153 }
154
155 switch (state->overall_abi) {
156 case MIPS_ABI_FP_DOUBLE:
157 case MIPS_ABI_FP_SINGLE:
158 case MIPS_ABI_FP_SOFT:
159 /* FR=0 */
160 set_thread_flag(TIF_32BIT_FPREGS);
161 clear_thread_flag(TIF_HYBRID_FPREGS); 234 clear_thread_flag(TIF_HYBRID_FPREGS);
162 break; 235 if (regs32)
163 236 set_thread_flag(TIF_32BIT_FPREGS);
164 case FP_DOUBLE_64A: 237 else
165 /* FR=1, FRE=1 */
166 clear_thread_flag(TIF_32BIT_FPREGS); 238 clear_thread_flag(TIF_32BIT_FPREGS);
167 set_thread_flag(TIF_HYBRID_FPREGS); 239}
168 break;
169 240
170 case MIPS_ABI_FP_64: 241void mips_set_personality_fp(struct arch_elf_state *state)
171 case MIPS_ABI_FP_64A: 242{
172 /* FR=1, FRE=0 */ 243 /*
173 clear_thread_flag(TIF_32BIT_FPREGS); 244 * This function is only ever called for O32 ELFs so we should
174 clear_thread_flag(TIF_HYBRID_FPREGS); 245 * not be worried about N32/N64 binaries.
175 break; 246 */
176 247
177 case MIPS_ABI_FP_XX: 248 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
178 case MIPS_ABI_FP_ANY: 249 return;
179 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
180 set_thread_flag(TIF_32BIT_FPREGS);
181 else
182 clear_thread_flag(TIF_32BIT_FPREGS);
183 250
184 clear_thread_flag(TIF_HYBRID_FPREGS); 251 switch (state->overall_fp_mode) {
252 case FP_FRE:
253 set_thread_fp_mode(1, 0);
254 break;
255 case FP_FR0:
256 set_thread_fp_mode(0, 1);
257 break;
258 case FP_FR1:
259 set_thread_fp_mode(0, 0);
185 break; 260 break;
186
187 default: 261 default:
188 case FP_ERROR:
189 BUG(); 262 BUG();
190 } 263 }
191} 264}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 4353d323f017..af41ba6db960 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -46,6 +46,11 @@ resume_userspace:
46 local_irq_disable # make sure we dont miss an 46 local_irq_disable # make sure we dont miss an
47 # interrupt setting need_resched 47 # interrupt setting need_resched
48 # between sampling and return 48 # between sampling and return
49#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
50 lw k0, TI_R2_EMUL_RET($28)
51 bnez k0, restore_all_from_r2_emul
52#endif
53
49 LONG_L a2, TI_FLAGS($28) # current->work 54 LONG_L a2, TI_FLAGS($28) # current->work
50 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) 55 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
51 bnez t0, work_pending 56 bnez t0, work_pending
@@ -114,6 +119,19 @@ restore_partial: # restore partial frame
114 RESTORE_SP_AND_RET 119 RESTORE_SP_AND_RET
115 .set at 120 .set at
116 121
122#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
123restore_all_from_r2_emul: # restore full frame
124 .set noat
125 sw zero, TI_R2_EMUL_RET($28) # reset it
126 RESTORE_TEMP
127 RESTORE_AT
128 RESTORE_STATIC
129 RESTORE_SOME
130 LONG_L sp, PT_R29(sp)
131 eretnc
132 .set at
133#endif
134
117work_pending: 135work_pending:
118 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 136 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
119 beqz t0, work_notifysig 137 beqz t0, work_notifysig
@@ -158,7 +176,8 @@ syscall_exit_work:
158 jal syscall_trace_leave 176 jal syscall_trace_leave
159 b resume_userspace 177 b resume_userspace
160 178
161#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) 179#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
180 defined(CONFIG_MIPS_MT)
162 181
163/* 182/*
164 * MIPS32R2 Instruction Hazard Barrier - must be called 183 * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -171,4 +190,4 @@ LEAF(mips_ihb)
171 nop 190 nop
172 END(mips_ihb) 191 END(mips_ihb)
173 192
174#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ 193#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index a5e26dd90592..2ebaabe3af15 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -125,7 +125,7 @@ LEAF(__r4k_wait)
125 nop 125 nop
126 nop 126 nop
127#endif 127#endif
128 .set arch=r4000 128 .set MIPS_ISA_ARCH_LEVEL_RAW
129 wait 129 wait
130 /* end of rollback region (the region size must be power of two) */ 130 /* end of rollback region (the region size must be power of two) */
1311: 1311:
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 0b9082b6b683..368c88b7eb6c 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -186,6 +186,7 @@ void __init check_wait(void)
186 case CPU_PROAPTIV: 186 case CPU_PROAPTIV:
187 case CPU_P5600: 187 case CPU_P5600:
188 case CPU_M5150: 188 case CPU_M5150:
189 case CPU_QEMU_GENERIC:
189 cpu_wait = r4k_wait; 190 cpu_wait = r4k_wait;
190 if (read_c0_config7() & MIPS_CONF7_WII) 191 if (read_c0_config7() & MIPS_CONF7_WII)
191 cpu_wait = r4k_wait_irqoff; 192 cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644
index 000000000000..64d17e41093b
--- /dev/null
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -0,0 +1,2378 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 2014 Imagination Technologies Ltd.
7 * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
8 * Author: Markos Chandras <markos.chandras@imgtec.com>
9 *
10 * MIPS R2 user space instruction emulator for MIPS R6
11 *
12 */
13#include <linux/bug.h>
14#include <linux/compiler.h>
15#include <linux/debugfs.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/ptrace.h>
20#include <linux/seq_file.h>
21
22#include <asm/asm.h>
23#include <asm/branch.h>
24#include <asm/break.h>
25#include <asm/fpu.h>
26#include <asm/fpu_emulator.h>
27#include <asm/inst.h>
28#include <asm/mips-r2-to-r6-emul.h>
29#include <asm/local.h>
30#include <asm/ptrace.h>
31#include <asm/uaccess.h>
32
33#ifdef CONFIG_64BIT
34#define ADDIU "daddiu "
35#define INS "dins "
36#define EXT "dext "
37#else
38#define ADDIU "addiu "
39#define INS "ins "
40#define EXT "ext "
41#endif /* CONFIG_64BIT */
42
43#define SB "sb "
44#define LB "lb "
45#define LL "ll "
46#define SC "sc "
47
48DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
49DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
50DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
51
52extern const unsigned int fpucondbit[8];
53
54#define MIPS_R2_EMUL_TOTAL_PASS 10
55
56int mipsr2_emulation = 0;
57
58static int __init mipsr2emu_enable(char *s)
59{
60 mipsr2_emulation = 1;
61
62 pr_info("MIPS R2-to-R6 Emulator Enabled!");
63
64 return 1;
65}
66__setup("mipsr2emu", mipsr2emu_enable);
67
68/**
69 * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
70 * for performance instead of the traditional way of using a stack trampoline
71 * which is rather slow.
72 * @regs: Process register set
73 * @ir: Instruction
74 */
75static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
76{
77 switch (MIPSInst_OPCODE(ir)) {
78 case addiu_op:
79 if (MIPSInst_RT(ir))
80 regs->regs[MIPSInst_RT(ir)] =
81 (s32)regs->regs[MIPSInst_RS(ir)] +
82 (s32)MIPSInst_SIMM(ir);
83 return 0;
84 case daddiu_op:
85 if (config_enabled(CONFIG_32BIT))
86 break;
87
88 if (MIPSInst_RT(ir))
89 regs->regs[MIPSInst_RT(ir)] =
90 (s64)regs->regs[MIPSInst_RS(ir)] +
91 (s64)MIPSInst_SIMM(ir);
92 return 0;
93 case lwc1_op:
94 case swc1_op:
95 case cop1_op:
96 case cop1x_op:
97 /* FPU instructions in delay slot */
98 return -SIGFPE;
99 case spec_op:
100 switch (MIPSInst_FUNC(ir)) {
101 case or_op:
102 if (MIPSInst_RD(ir))
103 regs->regs[MIPSInst_RD(ir)] =
104 regs->regs[MIPSInst_RS(ir)] |
105 regs->regs[MIPSInst_RT(ir)];
106 return 0;
107 case sll_op:
108 if (MIPSInst_RS(ir))
109 break;
110
111 if (MIPSInst_RD(ir))
112 regs->regs[MIPSInst_RD(ir)] =
113 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
114 MIPSInst_FD(ir));
115 return 0;
116 case srl_op:
117 if (MIPSInst_RS(ir))
118 break;
119
120 if (MIPSInst_RD(ir))
121 regs->regs[MIPSInst_RD(ir)] =
122 (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
123 MIPSInst_FD(ir));
124 return 0;
125 case addu_op:
126 if (MIPSInst_FD(ir))
127 break;
128
129 if (MIPSInst_RD(ir))
130 regs->regs[MIPSInst_RD(ir)] =
131 (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
132 (u32)regs->regs[MIPSInst_RT(ir)]);
133 return 0;
134 case subu_op:
135 if (MIPSInst_FD(ir))
136 break;
137
138 if (MIPSInst_RD(ir))
139 regs->regs[MIPSInst_RD(ir)] =
140 (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
141 (u32)regs->regs[MIPSInst_RT(ir)]);
142 return 0;
143 case dsll_op:
144 if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
145 break;
146
147 if (MIPSInst_RD(ir))
148 regs->regs[MIPSInst_RD(ir)] =
149 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
150 MIPSInst_FD(ir));
151 return 0;
152 case dsrl_op:
153 if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
154 break;
155
156 if (MIPSInst_RD(ir))
157 regs->regs[MIPSInst_RD(ir)] =
158 (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
159 MIPSInst_FD(ir));
160 return 0;
161 case daddu_op:
162 if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
163 break;
164
165 if (MIPSInst_RD(ir))
166 regs->regs[MIPSInst_RD(ir)] =
167 (u64)regs->regs[MIPSInst_RS(ir)] +
168 (u64)regs->regs[MIPSInst_RT(ir)];
169 return 0;
170 case dsubu_op:
171 if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
172 break;
173
174 if (MIPSInst_RD(ir))
175 regs->regs[MIPSInst_RD(ir)] =
176 (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
177 (u64)regs->regs[MIPSInst_RT(ir)]);
178 return 0;
179 }
180 break;
181 default:
182 pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
183 ir, MIPSInst_OPCODE(ir));
184 }
185
186 return SIGILL;
187}
188
189/**
190 * movt_func - Emulate a MOVT instruction
191 * @regs: Process register set
192 * @ir: Instruction
193 *
194 * Returns 0 since it always succeeds.
195 */
196static int movf_func(struct pt_regs *regs, u32 ir)
197{
198 u32 csr;
199 u32 cond;
200
201 csr = current->thread.fpu.fcr31;
202 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
203 if (((csr & cond) == 0) && MIPSInst_RD(ir))
204 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
205 MIPS_R2_STATS(movs);
206 return 0;
207}
208
209/**
210 * movt_func - Emulate a MOVT instruction
211 * @regs: Process register set
212 * @ir: Instruction
213 *
214 * Returns 0 since it always succeeds.
215 */
216static int movt_func(struct pt_regs *regs, u32 ir)
217{
218 u32 csr;
219 u32 cond;
220
221 csr = current->thread.fpu.fcr31;
222 cond = fpucondbit[MIPSInst_RT(ir) >> 2];
223
224 if (((csr & cond) != 0) && MIPSInst_RD(ir))
225 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
226
227 MIPS_R2_STATS(movs);
228
229 return 0;
230}
231
232/**
233 * jr_func - Emulate a JR instruction.
234 * @pt_regs: Process register set
235 * @ir: Instruction
236 *
237 * Returns SIGILL if JR was in delay slot, SIGEMT if we
238 * can't compute the EPC, SIGSEGV if we can't access the
239 * userland instruction or 0 on success.
240 */
241static int jr_func(struct pt_regs *regs, u32 ir)
242{
243 int err;
244 unsigned long cepc, epc, nepc;
245 u32 nir;
246
247 if (delay_slot(regs))
248 return SIGILL;
249
250 /* EPC after the RI/JR instruction */
251 nepc = regs->cp0_epc;
252 /* Roll back to the reserved R2 JR instruction */
253 regs->cp0_epc -= 4;
254 epc = regs->cp0_epc;
255 err = __compute_return_epc(regs);
256
257 if (err < 0)
258 return SIGEMT;
259
260
261 /* Computed EPC */
262 cepc = regs->cp0_epc;
263
264 /* Get DS instruction */
265 err = __get_user(nir, (u32 __user *)nepc);
266 if (err)
267 return SIGSEGV;
268
269 MIPS_R2BR_STATS(jrs);
270
271 /* If nir == 0(NOP), then nothing else to do */
272 if (nir) {
273 /*
274 * Negative err means FPU instruction in BD-slot,
275 * Zero err means 'BD-slot emulation done'
276 * For anything else we go back to trampoline emulation.
277 */
278 err = mipsr6_emul(regs, nir);
279 if (err > 0) {
280 regs->cp0_epc = nepc;
281 err = mips_dsemul(regs, nir, cepc);
282 if (err == SIGILL)
283 err = SIGEMT;
284 MIPS_R2_STATS(dsemul);
285 }
286 }
287
288 return err;
289}
290
291/**
292 * movz_func - Emulate a MOVZ instruction
293 * @regs: Process register set
294 * @ir: Instruction
295 *
296 * Returns 0 since it always succeeds.
297 */
298static int movz_func(struct pt_regs *regs, u32 ir)
299{
300 if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
301 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
302 MIPS_R2_STATS(movs);
303
304 return 0;
305}
306
307/**
308 * movn_func - Emulate a MOVZ instruction
309 * @regs: Process register set
310 * @ir: Instruction
311 *
312 * Returns 0 since it always succeeds.
313 */
314static int movn_func(struct pt_regs *regs, u32 ir)
315{
316 if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
317 regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
318 MIPS_R2_STATS(movs);
319
320 return 0;
321}
322
323/**
324 * mfhi_func - Emulate a MFHI instruction
325 * @regs: Process register set
326 * @ir: Instruction
327 *
328 * Returns 0 since it always succeeds.
329 */
330static int mfhi_func(struct pt_regs *regs, u32 ir)
331{
332 if (MIPSInst_RD(ir))
333 regs->regs[MIPSInst_RD(ir)] = regs->hi;
334
335 MIPS_R2_STATS(hilo);
336
337 return 0;
338}
339
340/**
341 * mthi_func - Emulate a MTHI instruction
342 * @regs: Process register set
343 * @ir: Instruction
344 *
345 * Returns 0 since it always succeeds.
346 */
347static int mthi_func(struct pt_regs *regs, u32 ir)
348{
349 regs->hi = regs->regs[MIPSInst_RS(ir)];
350
351 MIPS_R2_STATS(hilo);
352
353 return 0;
354}
355
356/**
357 * mflo_func - Emulate a MFLO instruction
358 * @regs: Process register set
359 * @ir: Instruction
360 *
361 * Returns 0 since it always succeeds.
362 */
363static int mflo_func(struct pt_regs *regs, u32 ir)
364{
365 if (MIPSInst_RD(ir))
366 regs->regs[MIPSInst_RD(ir)] = regs->lo;
367
368 MIPS_R2_STATS(hilo);
369
370 return 0;
371}
372
373/**
374 * mtlo_func - Emulate a MTLO instruction
375 * @regs: Process register set
376 * @ir: Instruction
377 *
378 * Returns 0 since it always succeeds.
379 */
380static int mtlo_func(struct pt_regs *regs, u32 ir)
381{
382 regs->lo = regs->regs[MIPSInst_RS(ir)];
383
384 MIPS_R2_STATS(hilo);
385
386 return 0;
387}
388
389/**
390 * mult_func - Emulate a MULT instruction
391 * @regs: Process register set
392 * @ir: Instruction
393 *
394 * Returns 0 since it always succeeds.
395 */
396static int mult_func(struct pt_regs *regs, u32 ir)
397{
398 s64 res;
399 s32 rt, rs;
400
401 rt = regs->regs[MIPSInst_RT(ir)];
402 rs = regs->regs[MIPSInst_RS(ir)];
403 res = (s64)rt * (s64)rs;
404
405 rs = res;
406 regs->lo = (s64)rs;
407 rt = res >> 32;
408 res = (s64)rt;
409 regs->hi = res;
410
411 MIPS_R2_STATS(muls);
412
413 return 0;
414}
415
416/**
417 * multu_func - Emulate a MULTU instruction
418 * @regs: Process register set
419 * @ir: Instruction
420 *
421 * Returns 0 since it always succeeds.
422 */
423static int multu_func(struct pt_regs *regs, u32 ir)
424{
425 u64 res;
426 u32 rt, rs;
427
428 rt = regs->regs[MIPSInst_RT(ir)];
429 rs = regs->regs[MIPSInst_RS(ir)];
430 res = (u64)rt * (u64)rs;
431 rt = res;
432 regs->lo = (s64)rt;
433 regs->hi = (s64)(res >> 32);
434
435 MIPS_R2_STATS(muls);
436
437 return 0;
438}
439
440/**
441 * div_func - Emulate a DIV instruction
442 * @regs: Process register set
443 * @ir: Instruction
444 *
445 * Returns 0 since it always succeeds.
446 */
447static int div_func(struct pt_regs *regs, u32 ir)
448{
449 s32 rt, rs;
450
451 rt = regs->regs[MIPSInst_RT(ir)];
452 rs = regs->regs[MIPSInst_RS(ir)];
453
454 regs->lo = (s64)(rs / rt);
455 regs->hi = (s64)(rs % rt);
456
457 MIPS_R2_STATS(divs);
458
459 return 0;
460}
461
462/**
463 * divu_func - Emulate a DIVU instruction
464 * @regs: Process register set
465 * @ir: Instruction
466 *
467 * Returns 0 since it always succeeds.
468 */
469static int divu_func(struct pt_regs *regs, u32 ir)
470{
471 u32 rt, rs;
472
473 rt = regs->regs[MIPSInst_RT(ir)];
474 rs = regs->regs[MIPSInst_RS(ir)];
475
476 regs->lo = (s64)(rs / rt);
477 regs->hi = (s64)(rs % rt);
478
479 MIPS_R2_STATS(divs);
480
481 return 0;
482}
483
484/**
485 * dmult_func - Emulate a DMULT instruction
486 * @regs: Process register set
487 * @ir: Instruction
488 *
489 * Returns 0 on success or SIGILL for 32-bit kernels.
490 */
491static int dmult_func(struct pt_regs *regs, u32 ir)
492{
493 s64 res;
494 s64 rt, rs;
495
496 if (config_enabled(CONFIG_32BIT))
497 return SIGILL;
498
499 rt = regs->regs[MIPSInst_RT(ir)];
500 rs = regs->regs[MIPSInst_RS(ir)];
501 res = rt * rs;
502
503 regs->lo = res;
504 __asm__ __volatile__(
505 "dmuh %0, %1, %2\t\n"
506 : "=r"(res)
507 : "r"(rt), "r"(rs));
508
509 regs->hi = res;
510
511 MIPS_R2_STATS(muls);
512
513 return 0;
514}
515
516/**
517 * dmultu_func - Emulate a DMULTU instruction
518 * @regs: Process register set
519 * @ir: Instruction
520 *
521 * Returns 0 on success or SIGILL for 32-bit kernels.
522 */
523static int dmultu_func(struct pt_regs *regs, u32 ir)
524{
525 u64 res;
526 u64 rt, rs;
527
528 if (config_enabled(CONFIG_32BIT))
529 return SIGILL;
530
531 rt = regs->regs[MIPSInst_RT(ir)];
532 rs = regs->regs[MIPSInst_RS(ir)];
533 res = rt * rs;
534
535 regs->lo = res;
536 __asm__ __volatile__(
537 "dmuhu %0, %1, %2\t\n"
538 : "=r"(res)
539 : "r"(rt), "r"(rs));
540
541 regs->hi = res;
542
543 MIPS_R2_STATS(muls);
544
545 return 0;
546}
547
548/**
549 * ddiv_func - Emulate a DDIV instruction
550 * @regs: Process register set
551 * @ir: Instruction
552 *
553 * Returns 0 on success or SIGILL for 32-bit kernels.
554 */
555static int ddiv_func(struct pt_regs *regs, u32 ir)
556{
557 s64 rt, rs;
558
559 if (config_enabled(CONFIG_32BIT))
560 return SIGILL;
561
562 rt = regs->regs[MIPSInst_RT(ir)];
563 rs = regs->regs[MIPSInst_RS(ir)];
564
565 regs->lo = rs / rt;
566 regs->hi = rs % rt;
567
568 MIPS_R2_STATS(divs);
569
570 return 0;
571}
572
573/**
574 * ddivu_func - Emulate a DDIVU instruction
575 * @regs: Process register set
576 * @ir: Instruction
577 *
578 * Returns 0 on success or SIGILL for 32-bit kernels.
579 */
580static int ddivu_func(struct pt_regs *regs, u32 ir)
581{
582 u64 rt, rs;
583
584 if (config_enabled(CONFIG_32BIT))
585 return SIGILL;
586
587 rt = regs->regs[MIPSInst_RT(ir)];
588 rs = regs->regs[MIPSInst_RS(ir)];
589
590 regs->lo = rs / rt;
591 regs->hi = rs % rt;
592
593 MIPS_R2_STATS(divs);
594
595 return 0;
596}
597
598/* R6 removed instructions for the SPECIAL opcode */
599static struct r2_decoder_table spec_op_table[] = {
600 { 0xfc1ff83f, 0x00000008, jr_func },
601 { 0xfc00ffff, 0x00000018, mult_func },
602 { 0xfc00ffff, 0x00000019, multu_func },
603 { 0xfc00ffff, 0x0000001c, dmult_func },
604 { 0xfc00ffff, 0x0000001d, dmultu_func },
605 { 0xffff07ff, 0x00000010, mfhi_func },
606 { 0xfc1fffff, 0x00000011, mthi_func },
607 { 0xffff07ff, 0x00000012, mflo_func },
608 { 0xfc1fffff, 0x00000013, mtlo_func },
609 { 0xfc0307ff, 0x00000001, movf_func },
610 { 0xfc0307ff, 0x00010001, movt_func },
611 { 0xfc0007ff, 0x0000000a, movz_func },
612 { 0xfc0007ff, 0x0000000b, movn_func },
613 { 0xfc00ffff, 0x0000001a, div_func },
614 { 0xfc00ffff, 0x0000001b, divu_func },
615 { 0xfc00ffff, 0x0000001e, ddiv_func },
616 { 0xfc00ffff, 0x0000001f, ddivu_func },
617 {}
618};
619
620/**
621 * madd_func - Emulate a MADD instruction
622 * @regs: Process register set
623 * @ir: Instruction
624 *
625 * Returns 0 since it always succeeds.
626 */
627static int madd_func(struct pt_regs *regs, u32 ir)
628{
629 s64 res;
630 s32 rt, rs;
631
632 rt = regs->regs[MIPSInst_RT(ir)];
633 rs = regs->regs[MIPSInst_RS(ir)];
634 res = (s64)rt * (s64)rs;
635 rt = regs->hi;
636 rs = regs->lo;
637 res += ((((s64)rt) << 32) | (u32)rs);
638
639 rt = res;
640 regs->lo = (s64)rt;
641 rs = res >> 32;
642 regs->hi = (s64)rs;
643
644 MIPS_R2_STATS(dsps);
645
646 return 0;
647}
648
649/**
650 * maddu_func - Emulate a MADDU instruction
651 * @regs: Process register set
652 * @ir: Instruction
653 *
654 * Returns 0 since it always succeeds.
655 */
656static int maddu_func(struct pt_regs *regs, u32 ir)
657{
658 u64 res;
659 u32 rt, rs;
660
661 rt = regs->regs[MIPSInst_RT(ir)];
662 rs = regs->regs[MIPSInst_RS(ir)];
663 res = (u64)rt * (u64)rs;
664 rt = regs->hi;
665 rs = regs->lo;
666 res += ((((s64)rt) << 32) | (u32)rs);
667
668 rt = res;
669 regs->lo = (s64)rt;
670 rs = res >> 32;
671 regs->hi = (s64)rs;
672
673 MIPS_R2_STATS(dsps);
674
675 return 0;
676}
677
678/**
679 * msub_func - Emulate a MSUB instruction
680 * @regs: Process register set
681 * @ir: Instruction
682 *
683 * Returns 0 since it always succeeds.
684 */
685static int msub_func(struct pt_regs *regs, u32 ir)
686{
687 s64 res;
688 s32 rt, rs;
689
690 rt = regs->regs[MIPSInst_RT(ir)];
691 rs = regs->regs[MIPSInst_RS(ir)];
692 res = (s64)rt * (s64)rs;
693 rt = regs->hi;
694 rs = regs->lo;
695 res = ((((s64)rt) << 32) | (u32)rs) - res;
696
697 rt = res;
698 regs->lo = (s64)rt;
699 rs = res >> 32;
700 regs->hi = (s64)rs;
701
702 MIPS_R2_STATS(dsps);
703
704 return 0;
705}
706
707/**
708 * msubu_func - Emulate a MSUBU instruction
709 * @regs: Process register set
710 * @ir: Instruction
711 *
712 * Returns 0 since it always succeeds.
713 */
714static int msubu_func(struct pt_regs *regs, u32 ir)
715{
716 u64 res;
717 u32 rt, rs;
718
719 rt = regs->regs[MIPSInst_RT(ir)];
720 rs = regs->regs[MIPSInst_RS(ir)];
721 res = (u64)rt * (u64)rs;
722 rt = regs->hi;
723 rs = regs->lo;
724 res = ((((s64)rt) << 32) | (u32)rs) - res;
725
726 rt = res;
727 regs->lo = (s64)rt;
728 rs = res >> 32;
729 regs->hi = (s64)rs;
730
731 MIPS_R2_STATS(dsps);
732
733 return 0;
734}
735
736/**
737 * mul_func - Emulate a MUL instruction
738 * @regs: Process register set
739 * @ir: Instruction
740 *
741 * Returns 0 since it always succeeds.
742 */
743static int mul_func(struct pt_regs *regs, u32 ir)
744{
745 s64 res;
746 s32 rt, rs;
747
748 if (!MIPSInst_RD(ir))
749 return 0;
750 rt = regs->regs[MIPSInst_RT(ir)];
751 rs = regs->regs[MIPSInst_RS(ir)];
752 res = (s64)rt * (s64)rs;
753
754 rs = res;
755 regs->regs[MIPSInst_RD(ir)] = (s64)rs;
756
757 MIPS_R2_STATS(muls);
758
759 return 0;
760}
761
762/**
763 * clz_func - Emulate a CLZ instruction
764 * @regs: Process register set
765 * @ir: Instruction
766 *
767 * Returns 0 since it always succeeds.
768 */
769static int clz_func(struct pt_regs *regs, u32 ir)
770{
771 u32 res;
772 u32 rs;
773
774 if (!MIPSInst_RD(ir))
775 return 0;
776
777 rs = regs->regs[MIPSInst_RS(ir)];
778 __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
779 regs->regs[MIPSInst_RD(ir)] = res;
780
781 MIPS_R2_STATS(bops);
782
783 return 0;
784}
785
786/**
787 * clo_func - Emulate a CLO instruction
788 * @regs: Process register set
789 * @ir: Instruction
790 *
791 * Returns 0 since it always succeeds.
792 */
793
794static int clo_func(struct pt_regs *regs, u32 ir)
795{
796 u32 res;
797 u32 rs;
798
799 if (!MIPSInst_RD(ir))
800 return 0;
801
802 rs = regs->regs[MIPSInst_RS(ir)];
803 __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
804 regs->regs[MIPSInst_RD(ir)] = res;
805
806 MIPS_R2_STATS(bops);
807
808 return 0;
809}
810
811/**
812 * dclz_func - Emulate a DCLZ instruction
813 * @regs: Process register set
814 * @ir: Instruction
815 *
816 * Returns 0 since it always succeeds.
817 */
818static int dclz_func(struct pt_regs *regs, u32 ir)
819{
820 u64 res;
821 u64 rs;
822
823 if (config_enabled(CONFIG_32BIT))
824 return SIGILL;
825
826 if (!MIPSInst_RD(ir))
827 return 0;
828
829 rs = regs->regs[MIPSInst_RS(ir)];
830 __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
831 regs->regs[MIPSInst_RD(ir)] = res;
832
833 MIPS_R2_STATS(bops);
834
835 return 0;
836}
837
838/**
839 * dclo_func - Emulate a DCLO instruction
840 * @regs: Process register set
841 * @ir: Instruction
842 *
843 * Returns 0 since it always succeeds.
844 */
845static int dclo_func(struct pt_regs *regs, u32 ir)
846{
847 u64 res;
848 u64 rs;
849
850 if (config_enabled(CONFIG_32BIT))
851 return SIGILL;
852
853 if (!MIPSInst_RD(ir))
854 return 0;
855
856 rs = regs->regs[MIPSInst_RS(ir)];
857 __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
858 regs->regs[MIPSInst_RD(ir)] = res;
859
860 MIPS_R2_STATS(bops);
861
862 return 0;
863}
864
865/* R6 removed instructions for the SPECIAL2 opcode */
866static struct r2_decoder_table spec2_op_table[] = {
867 { 0xfc00ffff, 0x70000000, madd_func },
868 { 0xfc00ffff, 0x70000001, maddu_func },
869 { 0xfc0007ff, 0x70000002, mul_func },
870 { 0xfc00ffff, 0x70000004, msub_func },
871 { 0xfc00ffff, 0x70000005, msubu_func },
872 { 0xfc0007ff, 0x70000020, clz_func },
873 { 0xfc0007ff, 0x70000021, clo_func },
874 { 0xfc0007ff, 0x70000024, dclz_func },
875 { 0xfc0007ff, 0x70000025, dclo_func },
876 { }
877};
878
879static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
880 struct r2_decoder_table *table)
881{
882 struct r2_decoder_table *p;
883 int err;
884
885 for (p = table; p->func; p++) {
886 if ((inst & p->mask) == p->code) {
887 err = (p->func)(regs, inst);
888 return err;
889 }
890 }
891 return SIGILL;
892}
893
894/**
895 * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
896 * @regs: Process register set
897 * @inst: Instruction to decode and emulate
898 */
899int mipsr2_decoder(struct pt_regs *regs, u32 inst)
900{
901 int err = 0;
902 unsigned long vaddr;
903 u32 nir;
904 unsigned long cpc, epc, nepc, r31, res, rs, rt;
905
906 void __user *fault_addr = NULL;
907 int pass = 0;
908
909repeat:
910 r31 = regs->regs[31];
911 epc = regs->cp0_epc;
912 err = compute_return_epc(regs);
913 if (err < 0) {
914 BUG();
915 return SIGEMT;
916 }
917 pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
918 inst, epc, pass);
919
920 switch (MIPSInst_OPCODE(inst)) {
921 case spec_op:
922 err = mipsr2_find_op_func(regs, inst, spec_op_table);
923 if (err < 0) {
924 /* FPU instruction under JR */
925 regs->cp0_cause |= CAUSEF_BD;
926 goto fpu_emul;
927 }
928 break;
929 case spec2_op:
930 err = mipsr2_find_op_func(regs, inst, spec2_op_table);
931 break;
932 case bcond_op:
933 rt = MIPSInst_RT(inst);
934 rs = MIPSInst_RS(inst);
935 switch (rt) {
936 case tgei_op:
937 if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
938 do_trap_or_bp(regs, 0, "TGEI");
939
940 MIPS_R2_STATS(traps);
941
942 break;
943 case tgeiu_op:
944 if (regs->regs[rs] >= MIPSInst_UIMM(inst))
945 do_trap_or_bp(regs, 0, "TGEIU");
946
947 MIPS_R2_STATS(traps);
948
949 break;
950 case tlti_op:
951 if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
952 do_trap_or_bp(regs, 0, "TLTI");
953
954 MIPS_R2_STATS(traps);
955
956 break;
957 case tltiu_op:
958 if (regs->regs[rs] < MIPSInst_UIMM(inst))
959 do_trap_or_bp(regs, 0, "TLTIU");
960
961 MIPS_R2_STATS(traps);
962
963 break;
964 case teqi_op:
965 if (regs->regs[rs] == MIPSInst_SIMM(inst))
966 do_trap_or_bp(regs, 0, "TEQI");
967
968 MIPS_R2_STATS(traps);
969
970 break;
971 case tnei_op:
972 if (regs->regs[rs] != MIPSInst_SIMM(inst))
973 do_trap_or_bp(regs, 0, "TNEI");
974
975 MIPS_R2_STATS(traps);
976
977 break;
978 case bltzl_op:
979 case bgezl_op:
980 case bltzall_op:
981 case bgezall_op:
982 if (delay_slot(regs)) {
983 err = SIGILL;
984 break;
985 }
986 regs->regs[31] = r31;
987 regs->cp0_epc = epc;
988 err = __compute_return_epc(regs);
989 if (err < 0)
990 return SIGEMT;
991 if (err != BRANCH_LIKELY_TAKEN)
992 break;
993 cpc = regs->cp0_epc;
994 nepc = epc + 4;
995 err = __get_user(nir, (u32 __user *)nepc);
996 if (err) {
997 err = SIGSEGV;
998 break;
999 }
1000 /*
1001 * This will probably be optimized away when
1002 * CONFIG_DEBUG_FS is not enabled
1003 */
1004 switch (rt) {
1005 case bltzl_op:
1006 MIPS_R2BR_STATS(bltzl);
1007 break;
1008 case bgezl_op:
1009 MIPS_R2BR_STATS(bgezl);
1010 break;
1011 case bltzall_op:
1012 MIPS_R2BR_STATS(bltzall);
1013 break;
1014 case bgezall_op:
1015 MIPS_R2BR_STATS(bgezall);
1016 break;
1017 }
1018
1019 switch (MIPSInst_OPCODE(nir)) {
1020 case cop1_op:
1021 case cop1x_op:
1022 case lwc1_op:
1023 case swc1_op:
1024 regs->cp0_cause |= CAUSEF_BD;
1025 goto fpu_emul;
1026 }
1027 if (nir) {
1028 err = mipsr6_emul(regs, nir);
1029 if (err > 0) {
1030 err = mips_dsemul(regs, nir, cpc);
1031 if (err == SIGILL)
1032 err = SIGEMT;
1033 MIPS_R2_STATS(dsemul);
1034 }
1035 }
1036 break;
1037 case bltzal_op:
1038 case bgezal_op:
1039 if (delay_slot(regs)) {
1040 err = SIGILL;
1041 break;
1042 }
1043 regs->regs[31] = r31;
1044 regs->cp0_epc = epc;
1045 err = __compute_return_epc(regs);
1046 if (err < 0)
1047 return SIGEMT;
1048 cpc = regs->cp0_epc;
1049 nepc = epc + 4;
1050 err = __get_user(nir, (u32 __user *)nepc);
1051 if (err) {
1052 err = SIGSEGV;
1053 break;
1054 }
1055 /*
1056 * This will probably be optimized away when
1057 * CONFIG_DEBUG_FS is not enabled
1058 */
1059 switch (rt) {
1060 case bltzal_op:
1061 MIPS_R2BR_STATS(bltzal);
1062 break;
1063 case bgezal_op:
1064 MIPS_R2BR_STATS(bgezal);
1065 break;
1066 }
1067
1068 switch (MIPSInst_OPCODE(nir)) {
1069 case cop1_op:
1070 case cop1x_op:
1071 case lwc1_op:
1072 case swc1_op:
1073 regs->cp0_cause |= CAUSEF_BD;
1074 goto fpu_emul;
1075 }
1076 if (nir) {
1077 err = mipsr6_emul(regs, nir);
1078 if (err > 0) {
1079 err = mips_dsemul(regs, nir, cpc);
1080 if (err == SIGILL)
1081 err = SIGEMT;
1082 MIPS_R2_STATS(dsemul);
1083 }
1084 }
1085 break;
1086 default:
1087 regs->regs[31] = r31;
1088 regs->cp0_epc = epc;
1089 err = SIGILL;
1090 break;
1091 }
1092 break;
1093
1094 case beql_op:
1095 case bnel_op:
1096 case blezl_op:
1097 case bgtzl_op:
1098 if (delay_slot(regs)) {
1099 err = SIGILL;
1100 break;
1101 }
1102 regs->regs[31] = r31;
1103 regs->cp0_epc = epc;
1104 err = __compute_return_epc(regs);
1105 if (err < 0)
1106 return SIGEMT;
1107 if (err != BRANCH_LIKELY_TAKEN)
1108 break;
1109 cpc = regs->cp0_epc;
1110 nepc = epc + 4;
1111 err = __get_user(nir, (u32 __user *)nepc);
1112 if (err) {
1113 err = SIGSEGV;
1114 break;
1115 }
1116 /*
1117 * This will probably be optimized away when
1118 * CONFIG_DEBUG_FS is not enabled
1119 */
1120 switch (MIPSInst_OPCODE(inst)) {
1121 case beql_op:
1122 MIPS_R2BR_STATS(beql);
1123 break;
1124 case bnel_op:
1125 MIPS_R2BR_STATS(bnel);
1126 break;
1127 case blezl_op:
1128 MIPS_R2BR_STATS(blezl);
1129 break;
1130 case bgtzl_op:
1131 MIPS_R2BR_STATS(bgtzl);
1132 break;
1133 }
1134
1135 switch (MIPSInst_OPCODE(nir)) {
1136 case cop1_op:
1137 case cop1x_op:
1138 case lwc1_op:
1139 case swc1_op:
1140 regs->cp0_cause |= CAUSEF_BD;
1141 goto fpu_emul;
1142 }
1143 if (nir) {
1144 err = mipsr6_emul(regs, nir);
1145 if (err > 0) {
1146 err = mips_dsemul(regs, nir, cpc);
1147 if (err == SIGILL)
1148 err = SIGEMT;
1149 MIPS_R2_STATS(dsemul);
1150 }
1151 }
1152 break;
1153 case lwc1_op:
1154 case swc1_op:
1155 case cop1_op:
1156 case cop1x_op:
1157fpu_emul:
1158 regs->regs[31] = r31;
1159 regs->cp0_epc = epc;
1160 if (!used_math()) { /* First time FPU user. */
1161 err = init_fpu();
1162 set_used_math();
1163 }
1164 lose_fpu(1); /* Save FPU state for the emulator. */
1165
1166 err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1167 &fault_addr);
1168
1169 /*
1170 * this is a tricky issue - lose_fpu() uses LL/SC atomics
1171 * if FPU is owned and effectively cancels user level LL/SC.
1172 * So, it could be logical to don't restore FPU ownership here.
1173 * But the sequence of multiple FPU instructions is much much
1174 * more often than LL-FPU-SC and I prefer loop here until
1175 * next scheduler cycle cancels FPU ownership
1176 */
1177 own_fpu(1); /* Restore FPU state. */
1178
1179 if (err)
1180 current->thread.cp0_baduaddr = (unsigned long)fault_addr;
1181
1182 MIPS_R2_STATS(fpus);
1183
1184 break;
1185
1186 case lwl_op:
1187 rt = regs->regs[MIPSInst_RT(inst)];
1188 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1189 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1190 current->thread.cp0_baduaddr = vaddr;
1191 err = SIGSEGV;
1192 break;
1193 }
1194 __asm__ __volatile__(
1195 " .set push\n"
1196 " .set reorder\n"
1197#ifdef CONFIG_CPU_LITTLE_ENDIAN
1198 "1:" LB "%1, 0(%2)\n"
1199 INS "%0, %1, 24, 8\n"
1200 " andi %1, %2, 0x3\n"
1201 " beq $0, %1, 9f\n"
1202 ADDIU "%2, %2, -1\n"
1203 "2:" LB "%1, 0(%2)\n"
1204 INS "%0, %1, 16, 8\n"
1205 " andi %1, %2, 0x3\n"
1206 " beq $0, %1, 9f\n"
1207 ADDIU "%2, %2, -1\n"
1208 "3:" LB "%1, 0(%2)\n"
1209 INS "%0, %1, 8, 8\n"
1210 " andi %1, %2, 0x3\n"
1211 " beq $0, %1, 9f\n"
1212 ADDIU "%2, %2, -1\n"
1213 "4:" LB "%1, 0(%2)\n"
1214 INS "%0, %1, 0, 8\n"
1215#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1216 "1:" LB "%1, 0(%2)\n"
1217 INS "%0, %1, 24, 8\n"
1218 ADDIU "%2, %2, 1\n"
1219 " andi %1, %2, 0x3\n"
1220 " beq $0, %1, 9f\n"
1221 "2:" LB "%1, 0(%2)\n"
1222 INS "%0, %1, 16, 8\n"
1223 ADDIU "%2, %2, 1\n"
1224 " andi %1, %2, 0x3\n"
1225 " beq $0, %1, 9f\n"
1226 "3:" LB "%1, 0(%2)\n"
1227 INS "%0, %1, 8, 8\n"
1228 ADDIU "%2, %2, 1\n"
1229 " andi %1, %2, 0x3\n"
1230 " beq $0, %1, 9f\n"
1231 "4:" LB "%1, 0(%2)\n"
1232 INS "%0, %1, 0, 8\n"
1233#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1234 "9: sll %0, %0, 0\n"
1235 "10:\n"
1236 " .insn\n"
1237 " .section .fixup,\"ax\"\n"
1238 "8: li %3,%4\n"
1239 " j 10b\n"
1240 " .previous\n"
1241 " .section __ex_table,\"a\"\n"
1242 " .word 1b,8b\n"
1243 " .word 2b,8b\n"
1244 " .word 3b,8b\n"
1245 " .word 4b,8b\n"
1246 " .previous\n"
1247 " .set pop\n"
1248 : "+&r"(rt), "=&r"(rs),
1249 "+&r"(vaddr), "+&r"(err)
1250 : "i"(SIGSEGV));
1251
1252 if (MIPSInst_RT(inst) && !err)
1253 regs->regs[MIPSInst_RT(inst)] = rt;
1254
1255 MIPS_R2_STATS(loads);
1256
1257 break;
1258
1259 case lwr_op:
1260 rt = regs->regs[MIPSInst_RT(inst)];
1261 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1262 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1263 current->thread.cp0_baduaddr = vaddr;
1264 err = SIGSEGV;
1265 break;
1266 }
1267 __asm__ __volatile__(
1268 " .set push\n"
1269 " .set reorder\n"
1270#ifdef CONFIG_CPU_LITTLE_ENDIAN
1271 "1:" LB "%1, 0(%2)\n"
1272 INS "%0, %1, 0, 8\n"
1273 ADDIU "%2, %2, 1\n"
1274 " andi %1, %2, 0x3\n"
1275 " beq $0, %1, 9f\n"
1276 "2:" LB "%1, 0(%2)\n"
1277 INS "%0, %1, 8, 8\n"
1278 ADDIU "%2, %2, 1\n"
1279 " andi %1, %2, 0x3\n"
1280 " beq $0, %1, 9f\n"
1281 "3:" LB "%1, 0(%2)\n"
1282 INS "%0, %1, 16, 8\n"
1283 ADDIU "%2, %2, 1\n"
1284 " andi %1, %2, 0x3\n"
1285 " beq $0, %1, 9f\n"
1286 "4:" LB "%1, 0(%2)\n"
1287 INS "%0, %1, 24, 8\n"
1288 " sll %0, %0, 0\n"
1289#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1290 "1:" LB "%1, 0(%2)\n"
1291 INS "%0, %1, 0, 8\n"
1292 " andi %1, %2, 0x3\n"
1293 " beq $0, %1, 9f\n"
1294 ADDIU "%2, %2, -1\n"
1295 "2:" LB "%1, 0(%2)\n"
1296 INS "%0, %1, 8, 8\n"
1297 " andi %1, %2, 0x3\n"
1298 " beq $0, %1, 9f\n"
1299 ADDIU "%2, %2, -1\n"
1300 "3:" LB "%1, 0(%2)\n"
1301 INS "%0, %1, 16, 8\n"
1302 " andi %1, %2, 0x3\n"
1303 " beq $0, %1, 9f\n"
1304 ADDIU "%2, %2, -1\n"
1305 "4:" LB "%1, 0(%2)\n"
1306 INS "%0, %1, 24, 8\n"
1307 " sll %0, %0, 0\n"
1308#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1309 "9:\n"
1310 "10:\n"
1311 " .insn\n"
1312 " .section .fixup,\"ax\"\n"
1313 "8: li %3,%4\n"
1314 " j 10b\n"
1315 " .previous\n"
1316 " .section __ex_table,\"a\"\n"
1317 " .word 1b,8b\n"
1318 " .word 2b,8b\n"
1319 " .word 3b,8b\n"
1320 " .word 4b,8b\n"
1321 " .previous\n"
1322 " .set pop\n"
1323 : "+&r"(rt), "=&r"(rs),
1324 "+&r"(vaddr), "+&r"(err)
1325 : "i"(SIGSEGV));
1326 if (MIPSInst_RT(inst) && !err)
1327 regs->regs[MIPSInst_RT(inst)] = rt;
1328
1329 MIPS_R2_STATS(loads);
1330
1331 break;
1332
1333 case swl_op:
1334 rt = regs->regs[MIPSInst_RT(inst)];
1335 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1336 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1337 current->thread.cp0_baduaddr = vaddr;
1338 err = SIGSEGV;
1339 break;
1340 }
1341 __asm__ __volatile__(
1342 " .set push\n"
1343 " .set reorder\n"
1344#ifdef CONFIG_CPU_LITTLE_ENDIAN
1345 EXT "%1, %0, 24, 8\n"
1346 "1:" SB "%1, 0(%2)\n"
1347 " andi %1, %2, 0x3\n"
1348 " beq $0, %1, 9f\n"
1349 ADDIU "%2, %2, -1\n"
1350 EXT "%1, %0, 16, 8\n"
1351 "2:" SB "%1, 0(%2)\n"
1352 " andi %1, %2, 0x3\n"
1353 " beq $0, %1, 9f\n"
1354 ADDIU "%2, %2, -1\n"
1355 EXT "%1, %0, 8, 8\n"
1356 "3:" SB "%1, 0(%2)\n"
1357 " andi %1, %2, 0x3\n"
1358 " beq $0, %1, 9f\n"
1359 ADDIU "%2, %2, -1\n"
1360 EXT "%1, %0, 0, 8\n"
1361 "4:" SB "%1, 0(%2)\n"
1362#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1363 EXT "%1, %0, 24, 8\n"
1364 "1:" SB "%1, 0(%2)\n"
1365 ADDIU "%2, %2, 1\n"
1366 " andi %1, %2, 0x3\n"
1367 " beq $0, %1, 9f\n"
1368 EXT "%1, %0, 16, 8\n"
1369 "2:" SB "%1, 0(%2)\n"
1370 ADDIU "%2, %2, 1\n"
1371 " andi %1, %2, 0x3\n"
1372 " beq $0, %1, 9f\n"
1373 EXT "%1, %0, 8, 8\n"
1374 "3:" SB "%1, 0(%2)\n"
1375 ADDIU "%2, %2, 1\n"
1376 " andi %1, %2, 0x3\n"
1377 " beq $0, %1, 9f\n"
1378 EXT "%1, %0, 0, 8\n"
1379 "4:" SB "%1, 0(%2)\n"
1380#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1381 "9:\n"
1382 " .insn\n"
1383 " .section .fixup,\"ax\"\n"
1384 "8: li %3,%4\n"
1385 " j 9b\n"
1386 " .previous\n"
1387 " .section __ex_table,\"a\"\n"
1388 " .word 1b,8b\n"
1389 " .word 2b,8b\n"
1390 " .word 3b,8b\n"
1391 " .word 4b,8b\n"
1392 " .previous\n"
1393 " .set pop\n"
1394 : "+&r"(rt), "=&r"(rs),
1395 "+&r"(vaddr), "+&r"(err)
1396 : "i"(SIGSEGV)
1397 : "memory");
1398
1399 MIPS_R2_STATS(stores);
1400
1401 break;
1402
1403 case swr_op:
1404 rt = regs->regs[MIPSInst_RT(inst)];
1405 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1406 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
1407 current->thread.cp0_baduaddr = vaddr;
1408 err = SIGSEGV;
1409 break;
1410 }
1411 __asm__ __volatile__(
1412 " .set push\n"
1413 " .set reorder\n"
1414#ifdef CONFIG_CPU_LITTLE_ENDIAN
1415 EXT "%1, %0, 0, 8\n"
1416 "1:" SB "%1, 0(%2)\n"
1417 ADDIU "%2, %2, 1\n"
1418 " andi %1, %2, 0x3\n"
1419 " beq $0, %1, 9f\n"
1420 EXT "%1, %0, 8, 8\n"
1421 "2:" SB "%1, 0(%2)\n"
1422 ADDIU "%2, %2, 1\n"
1423 " andi %1, %2, 0x3\n"
1424 " beq $0, %1, 9f\n"
1425 EXT "%1, %0, 16, 8\n"
1426 "3:" SB "%1, 0(%2)\n"
1427 ADDIU "%2, %2, 1\n"
1428 " andi %1, %2, 0x3\n"
1429 " beq $0, %1, 9f\n"
1430 EXT "%1, %0, 24, 8\n"
1431 "4:" SB "%1, 0(%2)\n"
1432#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1433 EXT "%1, %0, 0, 8\n"
1434 "1:" SB "%1, 0(%2)\n"
1435 " andi %1, %2, 0x3\n"
1436 " beq $0, %1, 9f\n"
1437 ADDIU "%2, %2, -1\n"
1438 EXT "%1, %0, 8, 8\n"
1439 "2:" SB "%1, 0(%2)\n"
1440 " andi %1, %2, 0x3\n"
1441 " beq $0, %1, 9f\n"
1442 ADDIU "%2, %2, -1\n"
1443 EXT "%1, %0, 16, 8\n"
1444 "3:" SB "%1, 0(%2)\n"
1445 " andi %1, %2, 0x3\n"
1446 " beq $0, %1, 9f\n"
1447 ADDIU "%2, %2, -1\n"
1448 EXT "%1, %0, 24, 8\n"
1449 "4:" SB "%1, 0(%2)\n"
1450#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1451 "9:\n"
1452 " .insn\n"
1453 " .section .fixup,\"ax\"\n"
1454 "8: li %3,%4\n"
1455 " j 9b\n"
1456 " .previous\n"
1457 " .section __ex_table,\"a\"\n"
1458 " .word 1b,8b\n"
1459 " .word 2b,8b\n"
1460 " .word 3b,8b\n"
1461 " .word 4b,8b\n"
1462 " .previous\n"
1463 " .set pop\n"
1464 : "+&r"(rt), "=&r"(rs),
1465 "+&r"(vaddr), "+&r"(err)
1466 : "i"(SIGSEGV)
1467 : "memory");
1468
1469 MIPS_R2_STATS(stores);
1470
1471 break;
1472
1473 case ldl_op:
1474 if (config_enabled(CONFIG_32BIT)) {
1475 err = SIGILL;
1476 break;
1477 }
1478
1479 rt = regs->regs[MIPSInst_RT(inst)];
1480 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1481 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1482 current->thread.cp0_baduaddr = vaddr;
1483 err = SIGSEGV;
1484 break;
1485 }
1486 __asm__ __volatile__(
1487 " .set push\n"
1488 " .set reorder\n"
1489#ifdef CONFIG_CPU_LITTLE_ENDIAN
1490 "1: lb %1, 0(%2)\n"
1491 " dinsu %0, %1, 56, 8\n"
1492 " andi %1, %2, 0x7\n"
1493 " beq $0, %1, 9f\n"
1494 " daddiu %2, %2, -1\n"
1495 "2: lb %1, 0(%2)\n"
1496 " dinsu %0, %1, 48, 8\n"
1497 " andi %1, %2, 0x7\n"
1498 " beq $0, %1, 9f\n"
1499 " daddiu %2, %2, -1\n"
1500 "3: lb %1, 0(%2)\n"
1501 " dinsu %0, %1, 40, 8\n"
1502 " andi %1, %2, 0x7\n"
1503 " beq $0, %1, 9f\n"
1504 " daddiu %2, %2, -1\n"
1505 "4: lb %1, 0(%2)\n"
1506 " dinsu %0, %1, 32, 8\n"
1507 " andi %1, %2, 0x7\n"
1508 " beq $0, %1, 9f\n"
1509 " daddiu %2, %2, -1\n"
1510 "5: lb %1, 0(%2)\n"
1511 " dins %0, %1, 24, 8\n"
1512 " andi %1, %2, 0x7\n"
1513 " beq $0, %1, 9f\n"
1514 " daddiu %2, %2, -1\n"
1515 "6: lb %1, 0(%2)\n"
1516 " dins %0, %1, 16, 8\n"
1517 " andi %1, %2, 0x7\n"
1518 " beq $0, %1, 9f\n"
1519 " daddiu %2, %2, -1\n"
1520 "7: lb %1, 0(%2)\n"
1521 " dins %0, %1, 8, 8\n"
1522 " andi %1, %2, 0x7\n"
1523 " beq $0, %1, 9f\n"
1524 " daddiu %2, %2, -1\n"
1525 "0: lb %1, 0(%2)\n"
1526 " dins %0, %1, 0, 8\n"
1527#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1528 "1: lb %1, 0(%2)\n"
1529 " dinsu %0, %1, 56, 8\n"
1530 " daddiu %2, %2, 1\n"
1531 " andi %1, %2, 0x7\n"
1532 " beq $0, %1, 9f\n"
1533 "2: lb %1, 0(%2)\n"
1534 " dinsu %0, %1, 48, 8\n"
1535 " daddiu %2, %2, 1\n"
1536 " andi %1, %2, 0x7\n"
1537 " beq $0, %1, 9f\n"
1538 "3: lb %1, 0(%2)\n"
1539 " dinsu %0, %1, 40, 8\n"
1540 " daddiu %2, %2, 1\n"
1541 " andi %1, %2, 0x7\n"
1542 " beq $0, %1, 9f\n"
1543 "4: lb %1, 0(%2)\n"
1544 " dinsu %0, %1, 32, 8\n"
1545 " daddiu %2, %2, 1\n"
1546 " andi %1, %2, 0x7\n"
1547 " beq $0, %1, 9f\n"
1548 "5: lb %1, 0(%2)\n"
1549 " dins %0, %1, 24, 8\n"
1550 " daddiu %2, %2, 1\n"
1551 " andi %1, %2, 0x7\n"
1552 " beq $0, %1, 9f\n"
1553 "6: lb %1, 0(%2)\n"
1554 " dins %0, %1, 16, 8\n"
1555 " daddiu %2, %2, 1\n"
1556 " andi %1, %2, 0x7\n"
1557 " beq $0, %1, 9f\n"
1558 "7: lb %1, 0(%2)\n"
1559 " dins %0, %1, 8, 8\n"
1560 " daddiu %2, %2, 1\n"
1561 " andi %1, %2, 0x7\n"
1562 " beq $0, %1, 9f\n"
1563 "0: lb %1, 0(%2)\n"
1564 " dins %0, %1, 0, 8\n"
1565#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1566 "9:\n"
1567 " .insn\n"
1568 " .section .fixup,\"ax\"\n"
1569 "8: li %3,%4\n"
1570 " j 9b\n"
1571 " .previous\n"
1572 " .section __ex_table,\"a\"\n"
1573 " .word 1b,8b\n"
1574 " .word 2b,8b\n"
1575 " .word 3b,8b\n"
1576 " .word 4b,8b\n"
1577 " .word 5b,8b\n"
1578 " .word 6b,8b\n"
1579 " .word 7b,8b\n"
1580 " .word 0b,8b\n"
1581 " .previous\n"
1582 " .set pop\n"
1583 : "+&r"(rt), "=&r"(rs),
1584 "+&r"(vaddr), "+&r"(err)
1585 : "i"(SIGSEGV));
1586 if (MIPSInst_RT(inst) && !err)
1587 regs->regs[MIPSInst_RT(inst)] = rt;
1588
1589 MIPS_R2_STATS(loads);
1590 break;
1591
1592 case ldr_op:
1593 if (config_enabled(CONFIG_32BIT)) {
1594 err = SIGILL;
1595 break;
1596 }
1597
1598 rt = regs->regs[MIPSInst_RT(inst)];
1599 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1600 if (!access_ok(VERIFY_READ, vaddr, 8)) {
1601 current->thread.cp0_baduaddr = vaddr;
1602 err = SIGSEGV;
1603 break;
1604 }
1605 __asm__ __volatile__(
1606 " .set push\n"
1607 " .set reorder\n"
1608#ifdef CONFIG_CPU_LITTLE_ENDIAN
1609 "1: lb %1, 0(%2)\n"
1610 " dins %0, %1, 0, 8\n"
1611 " daddiu %2, %2, 1\n"
1612 " andi %1, %2, 0x7\n"
1613 " beq $0, %1, 9f\n"
1614 "2: lb %1, 0(%2)\n"
1615 " dins %0, %1, 8, 8\n"
1616 " daddiu %2, %2, 1\n"
1617 " andi %1, %2, 0x7\n"
1618 " beq $0, %1, 9f\n"
1619 "3: lb %1, 0(%2)\n"
1620 " dins %0, %1, 16, 8\n"
1621 " daddiu %2, %2, 1\n"
1622 " andi %1, %2, 0x7\n"
1623 " beq $0, %1, 9f\n"
1624 "4: lb %1, 0(%2)\n"
1625 " dins %0, %1, 24, 8\n"
1626 " daddiu %2, %2, 1\n"
1627 " andi %1, %2, 0x7\n"
1628 " beq $0, %1, 9f\n"
1629 "5: lb %1, 0(%2)\n"
1630 " dinsu %0, %1, 32, 8\n"
1631 " daddiu %2, %2, 1\n"
1632 " andi %1, %2, 0x7\n"
1633 " beq $0, %1, 9f\n"
1634 "6: lb %1, 0(%2)\n"
1635 " dinsu %0, %1, 40, 8\n"
1636 " daddiu %2, %2, 1\n"
1637 " andi %1, %2, 0x7\n"
1638 " beq $0, %1, 9f\n"
1639 "7: lb %1, 0(%2)\n"
1640 " dinsu %0, %1, 48, 8\n"
1641 " daddiu %2, %2, 1\n"
1642 " andi %1, %2, 0x7\n"
1643 " beq $0, %1, 9f\n"
1644 "0: lb %1, 0(%2)\n"
1645 " dinsu %0, %1, 56, 8\n"
1646#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1647 "1: lb %1, 0(%2)\n"
1648 " dins %0, %1, 0, 8\n"
1649 " andi %1, %2, 0x7\n"
1650 " beq $0, %1, 9f\n"
1651 " daddiu %2, %2, -1\n"
1652 "2: lb %1, 0(%2)\n"
1653 " dins %0, %1, 8, 8\n"
1654 " andi %1, %2, 0x7\n"
1655 " beq $0, %1, 9f\n"
1656 " daddiu %2, %2, -1\n"
1657 "3: lb %1, 0(%2)\n"
1658 " dins %0, %1, 16, 8\n"
1659 " andi %1, %2, 0x7\n"
1660 " beq $0, %1, 9f\n"
1661 " daddiu %2, %2, -1\n"
1662 "4: lb %1, 0(%2)\n"
1663 " dins %0, %1, 24, 8\n"
1664 " andi %1, %2, 0x7\n"
1665 " beq $0, %1, 9f\n"
1666 " daddiu %2, %2, -1\n"
1667 "5: lb %1, 0(%2)\n"
1668 " dinsu %0, %1, 32, 8\n"
1669 " andi %1, %2, 0x7\n"
1670 " beq $0, %1, 9f\n"
1671 " daddiu %2, %2, -1\n"
1672 "6: lb %1, 0(%2)\n"
1673 " dinsu %0, %1, 40, 8\n"
1674 " andi %1, %2, 0x7\n"
1675 " beq $0, %1, 9f\n"
1676 " daddiu %2, %2, -1\n"
1677 "7: lb %1, 0(%2)\n"
1678 " dinsu %0, %1, 48, 8\n"
1679 " andi %1, %2, 0x7\n"
1680 " beq $0, %1, 9f\n"
1681 " daddiu %2, %2, -1\n"
1682 "0: lb %1, 0(%2)\n"
1683 " dinsu %0, %1, 56, 8\n"
1684#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1685 "9:\n"
1686 " .insn\n"
1687 " .section .fixup,\"ax\"\n"
1688 "8: li %3,%4\n"
1689 " j 9b\n"
1690 " .previous\n"
1691 " .section __ex_table,\"a\"\n"
1692 " .word 1b,8b\n"
1693 " .word 2b,8b\n"
1694 " .word 3b,8b\n"
1695 " .word 4b,8b\n"
1696 " .word 5b,8b\n"
1697 " .word 6b,8b\n"
1698 " .word 7b,8b\n"
1699 " .word 0b,8b\n"
1700 " .previous\n"
1701 " .set pop\n"
1702 : "+&r"(rt), "=&r"(rs),
1703 "+&r"(vaddr), "+&r"(err)
1704 : "i"(SIGSEGV));
1705 if (MIPSInst_RT(inst) && !err)
1706 regs->regs[MIPSInst_RT(inst)] = rt;
1707
1708 MIPS_R2_STATS(loads);
1709 break;
1710
1711 case sdl_op:
1712 if (config_enabled(CONFIG_32BIT)) {
1713 err = SIGILL;
1714 break;
1715 }
1716
1717 rt = regs->regs[MIPSInst_RT(inst)];
1718 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1719 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1720 current->thread.cp0_baduaddr = vaddr;
1721 err = SIGSEGV;
1722 break;
1723 }
1724 __asm__ __volatile__(
1725 " .set push\n"
1726 " .set reorder\n"
1727#ifdef CONFIG_CPU_LITTLE_ENDIAN
1728 " dextu %1, %0, 56, 8\n"
1729 "1: sb %1, 0(%2)\n"
1730 " andi %1, %2, 0x7\n"
1731 " beq $0, %1, 9f\n"
1732 " daddiu %2, %2, -1\n"
1733 " dextu %1, %0, 48, 8\n"
1734 "2: sb %1, 0(%2)\n"
1735 " andi %1, %2, 0x7\n"
1736 " beq $0, %1, 9f\n"
1737 " daddiu %2, %2, -1\n"
1738 " dextu %1, %0, 40, 8\n"
1739 "3: sb %1, 0(%2)\n"
1740 " andi %1, %2, 0x7\n"
1741 " beq $0, %1, 9f\n"
1742 " daddiu %2, %2, -1\n"
1743 " dextu %1, %0, 32, 8\n"
1744 "4: sb %1, 0(%2)\n"
1745 " andi %1, %2, 0x7\n"
1746 " beq $0, %1, 9f\n"
1747 " daddiu %2, %2, -1\n"
1748 " dext %1, %0, 24, 8\n"
1749 "5: sb %1, 0(%2)\n"
1750 " andi %1, %2, 0x7\n"
1751 " beq $0, %1, 9f\n"
1752 " daddiu %2, %2, -1\n"
1753 " dext %1, %0, 16, 8\n"
1754 "6: sb %1, 0(%2)\n"
1755 " andi %1, %2, 0x7\n"
1756 " beq $0, %1, 9f\n"
1757 " daddiu %2, %2, -1\n"
1758 " dext %1, %0, 8, 8\n"
1759 "7: sb %1, 0(%2)\n"
1760 " andi %1, %2, 0x7\n"
1761 " beq $0, %1, 9f\n"
1762 " daddiu %2, %2, -1\n"
1763 " dext %1, %0, 0, 8\n"
1764 "0: sb %1, 0(%2)\n"
1765#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1766 " dextu %1, %0, 56, 8\n"
1767 "1: sb %1, 0(%2)\n"
1768 " daddiu %2, %2, 1\n"
1769 " andi %1, %2, 0x7\n"
1770 " beq $0, %1, 9f\n"
1771 " dextu %1, %0, 48, 8\n"
1772 "2: sb %1, 0(%2)\n"
1773 " daddiu %2, %2, 1\n"
1774 " andi %1, %2, 0x7\n"
1775 " beq $0, %1, 9f\n"
1776 " dextu %1, %0, 40, 8\n"
1777 "3: sb %1, 0(%2)\n"
1778 " daddiu %2, %2, 1\n"
1779 " andi %1, %2, 0x7\n"
1780 " beq $0, %1, 9f\n"
1781 " dextu %1, %0, 32, 8\n"
1782 "4: sb %1, 0(%2)\n"
1783 " daddiu %2, %2, 1\n"
1784 " andi %1, %2, 0x7\n"
1785 " beq $0, %1, 9f\n"
1786 " dext %1, %0, 24, 8\n"
1787 "5: sb %1, 0(%2)\n"
1788 " daddiu %2, %2, 1\n"
1789 " andi %1, %2, 0x7\n"
1790 " beq $0, %1, 9f\n"
1791 " dext %1, %0, 16, 8\n"
1792 "6: sb %1, 0(%2)\n"
1793 " daddiu %2, %2, 1\n"
1794 " andi %1, %2, 0x7\n"
1795 " beq $0, %1, 9f\n"
1796 " dext %1, %0, 8, 8\n"
1797 "7: sb %1, 0(%2)\n"
1798 " daddiu %2, %2, 1\n"
1799 " andi %1, %2, 0x7\n"
1800 " beq $0, %1, 9f\n"
1801 " dext %1, %0, 0, 8\n"
1802 "0: sb %1, 0(%2)\n"
1803#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1804 "9:\n"
1805 " .insn\n"
1806 " .section .fixup,\"ax\"\n"
1807 "8: li %3,%4\n"
1808 " j 9b\n"
1809 " .previous\n"
1810 " .section __ex_table,\"a\"\n"
1811 " .word 1b,8b\n"
1812 " .word 2b,8b\n"
1813 " .word 3b,8b\n"
1814 " .word 4b,8b\n"
1815 " .word 5b,8b\n"
1816 " .word 6b,8b\n"
1817 " .word 7b,8b\n"
1818 " .word 0b,8b\n"
1819 " .previous\n"
1820 " .set pop\n"
1821 : "+&r"(rt), "=&r"(rs),
1822 "+&r"(vaddr), "+&r"(err)
1823 : "i"(SIGSEGV)
1824 : "memory");
1825
1826 MIPS_R2_STATS(stores);
1827 break;
1828
1829 case sdr_op:
1830 if (config_enabled(CONFIG_32BIT)) {
1831 err = SIGILL;
1832 break;
1833 }
1834
1835 rt = regs->regs[MIPSInst_RT(inst)];
1836 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1837 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
1838 current->thread.cp0_baduaddr = vaddr;
1839 err = SIGSEGV;
1840 break;
1841 }
1842 __asm__ __volatile__(
1843 " .set push\n"
1844 " .set reorder\n"
1845#ifdef CONFIG_CPU_LITTLE_ENDIAN
1846 " dext %1, %0, 0, 8\n"
1847 "1: sb %1, 0(%2)\n"
1848 " daddiu %2, %2, 1\n"
1849 " andi %1, %2, 0x7\n"
1850 " beq $0, %1, 9f\n"
1851 " dext %1, %0, 8, 8\n"
1852 "2: sb %1, 0(%2)\n"
1853 " daddiu %2, %2, 1\n"
1854 " andi %1, %2, 0x7\n"
1855 " beq $0, %1, 9f\n"
1856 " dext %1, %0, 16, 8\n"
1857 "3: sb %1, 0(%2)\n"
1858 " daddiu %2, %2, 1\n"
1859 " andi %1, %2, 0x7\n"
1860 " beq $0, %1, 9f\n"
1861 " dext %1, %0, 24, 8\n"
1862 "4: sb %1, 0(%2)\n"
1863 " daddiu %2, %2, 1\n"
1864 " andi %1, %2, 0x7\n"
1865 " beq $0, %1, 9f\n"
1866 " dextu %1, %0, 32, 8\n"
1867 "5: sb %1, 0(%2)\n"
1868 " daddiu %2, %2, 1\n"
1869 " andi %1, %2, 0x7\n"
1870 " beq $0, %1, 9f\n"
1871 " dextu %1, %0, 40, 8\n"
1872 "6: sb %1, 0(%2)\n"
1873 " daddiu %2, %2, 1\n"
1874 " andi %1, %2, 0x7\n"
1875 " beq $0, %1, 9f\n"
1876 " dextu %1, %0, 48, 8\n"
1877 "7: sb %1, 0(%2)\n"
1878 " daddiu %2, %2, 1\n"
1879 " andi %1, %2, 0x7\n"
1880 " beq $0, %1, 9f\n"
1881 " dextu %1, %0, 56, 8\n"
1882 "0: sb %1, 0(%2)\n"
1883#else /* !CONFIG_CPU_LITTLE_ENDIAN */
1884 " dext %1, %0, 0, 8\n"
1885 "1: sb %1, 0(%2)\n"
1886 " andi %1, %2, 0x7\n"
1887 " beq $0, %1, 9f\n"
1888 " daddiu %2, %2, -1\n"
1889 " dext %1, %0, 8, 8\n"
1890 "2: sb %1, 0(%2)\n"
1891 " andi %1, %2, 0x7\n"
1892 " beq $0, %1, 9f\n"
1893 " daddiu %2, %2, -1\n"
1894 " dext %1, %0, 16, 8\n"
1895 "3: sb %1, 0(%2)\n"
1896 " andi %1, %2, 0x7\n"
1897 " beq $0, %1, 9f\n"
1898 " daddiu %2, %2, -1\n"
1899 " dext %1, %0, 24, 8\n"
1900 "4: sb %1, 0(%2)\n"
1901 " andi %1, %2, 0x7\n"
1902 " beq $0, %1, 9f\n"
1903 " daddiu %2, %2, -1\n"
1904 " dextu %1, %0, 32, 8\n"
1905 "5: sb %1, 0(%2)\n"
1906 " andi %1, %2, 0x7\n"
1907 " beq $0, %1, 9f\n"
1908 " daddiu %2, %2, -1\n"
1909 " dextu %1, %0, 40, 8\n"
1910 "6: sb %1, 0(%2)\n"
1911 " andi %1, %2, 0x7\n"
1912 " beq $0, %1, 9f\n"
1913 " daddiu %2, %2, -1\n"
1914 " dextu %1, %0, 48, 8\n"
1915 "7: sb %1, 0(%2)\n"
1916 " andi %1, %2, 0x7\n"
1917 " beq $0, %1, 9f\n"
1918 " daddiu %2, %2, -1\n"
1919 " dextu %1, %0, 56, 8\n"
1920 "0: sb %1, 0(%2)\n"
1921#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1922 "9:\n"
1923 " .insn\n"
1924 " .section .fixup,\"ax\"\n"
1925 "8: li %3,%4\n"
1926 " j 9b\n"
1927 " .previous\n"
1928 " .section __ex_table,\"a\"\n"
1929 " .word 1b,8b\n"
1930 " .word 2b,8b\n"
1931 " .word 3b,8b\n"
1932 " .word 4b,8b\n"
1933 " .word 5b,8b\n"
1934 " .word 6b,8b\n"
1935 " .word 7b,8b\n"
1936 " .word 0b,8b\n"
1937 " .previous\n"
1938 " .set pop\n"
1939 : "+&r"(rt), "=&r"(rs),
1940 "+&r"(vaddr), "+&r"(err)
1941 : "i"(SIGSEGV)
1942 : "memory");
1943
1944 MIPS_R2_STATS(stores);
1945
1946 break;
1947 case ll_op:
1948 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
1949 if (vaddr & 0x3) {
1950 current->thread.cp0_baduaddr = vaddr;
1951 err = SIGBUS;
1952 break;
1953 }
1954 if (!access_ok(VERIFY_READ, vaddr, 4)) {
1955 current->thread.cp0_baduaddr = vaddr;
1956 err = SIGBUS;
1957 break;
1958 }
1959
1960 if (!cpu_has_rw_llb) {
1961 /*
1962 * An LL/SC block can't be safely emulated without
1963 * a Config5/LLB availability. So it's probably time to
1964 * kill our process before things get any worse. This is
1965 * because Config5/LLB allows us to use ERETNC so that
1966 * the LLAddr/LLB bit is not cleared when we return from
1967 * an exception. MIPS R2 LL/SC instructions trap with an
1968 * RI exception so once we emulate them here, we return
1969 * back to userland with ERETNC. That preserves the
1970 * LLAddr/LLB so the subsequent SC instruction will
1971 * succeed preserving the atomic semantics of the LL/SC
1972 * block. Without that, there is no safe way to emulate
1973 * an LL/SC block in MIPSR2 userland.
1974 */
1975 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
1976 err = SIGKILL;
1977 break;
1978 }
1979
1980 __asm__ __volatile__(
1981 "1:\n"
1982 "ll %0, 0(%2)\n"
1983 "2:\n"
1984 ".insn\n"
1985 ".section .fixup,\"ax\"\n"
1986 "3:\n"
1987 "li %1, %3\n"
1988 "j 2b\n"
1989 ".previous\n"
1990 ".section __ex_table,\"a\"\n"
1991 ".word 1b, 3b\n"
1992 ".previous\n"
1993 : "=&r"(res), "+&r"(err)
1994 : "r"(vaddr), "i"(SIGSEGV)
1995 : "memory");
1996
1997 if (MIPSInst_RT(inst) && !err)
1998 regs->regs[MIPSInst_RT(inst)] = res;
1999 MIPS_R2_STATS(llsc);
2000
2001 break;
2002
2003 case sc_op:
2004 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2005 if (vaddr & 0x3) {
2006 current->thread.cp0_baduaddr = vaddr;
2007 err = SIGBUS;
2008 break;
2009 }
2010 if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
2011 current->thread.cp0_baduaddr = vaddr;
2012 err = SIGBUS;
2013 break;
2014 }
2015
2016 if (!cpu_has_rw_llb) {
2017 /*
2018 * An LL/SC block can't be safely emulated without
2019 * a Config5/LLB availability. So it's probably time to
2020 * kill our process before things get any worse. This is
2021 * because Config5/LLB allows us to use ERETNC so that
2022 * the LLAddr/LLB bit is not cleared when we return from
2023 * an exception. MIPS R2 LL/SC instructions trap with an
2024 * RI exception so once we emulate them here, we return
2025 * back to userland with ERETNC. That preserves the
2026 * LLAddr/LLB so the subsequent SC instruction will
2027 * succeed preserving the atomic semantics of the LL/SC
2028 * block. Without that, there is no safe way to emulate
2029 * an LL/SC block in MIPSR2 userland.
2030 */
2031 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2032 err = SIGKILL;
2033 break;
2034 }
2035
2036 res = regs->regs[MIPSInst_RT(inst)];
2037
2038 __asm__ __volatile__(
2039 "1:\n"
2040 "sc %0, 0(%2)\n"
2041 "2:\n"
2042 ".insn\n"
2043 ".section .fixup,\"ax\"\n"
2044 "3:\n"
2045 "li %1, %3\n"
2046 "j 2b\n"
2047 ".previous\n"
2048 ".section __ex_table,\"a\"\n"
2049 ".word 1b, 3b\n"
2050 ".previous\n"
2051 : "+&r"(res), "+&r"(err)
2052 : "r"(vaddr), "i"(SIGSEGV));
2053
2054 if (MIPSInst_RT(inst) && !err)
2055 regs->regs[MIPSInst_RT(inst)] = res;
2056
2057 MIPS_R2_STATS(llsc);
2058
2059 break;
2060
2061 case lld_op:
2062 if (config_enabled(CONFIG_32BIT)) {
2063 err = SIGILL;
2064 break;
2065 }
2066
2067 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2068 if (vaddr & 0x7) {
2069 current->thread.cp0_baduaddr = vaddr;
2070 err = SIGBUS;
2071 break;
2072 }
2073 if (!access_ok(VERIFY_READ, vaddr, 8)) {
2074 current->thread.cp0_baduaddr = vaddr;
2075 err = SIGBUS;
2076 break;
2077 }
2078
2079 if (!cpu_has_rw_llb) {
2080 /*
2081 * An LL/SC block can't be safely emulated without
2082 * a Config5/LLB availability. So it's probably time to
2083 * kill our process before things get any worse. This is
2084 * because Config5/LLB allows us to use ERETNC so that
2085 * the LLAddr/LLB bit is not cleared when we return from
2086 * an exception. MIPS R2 LL/SC instructions trap with an
2087 * RI exception so once we emulate them here, we return
2088 * back to userland with ERETNC. That preserves the
2089 * LLAddr/LLB so the subsequent SC instruction will
2090 * succeed preserving the atomic semantics of the LL/SC
2091 * block. Without that, there is no safe way to emulate
2092 * an LL/SC block in MIPSR2 userland.
2093 */
2094 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2095 err = SIGKILL;
2096 break;
2097 }
2098
2099 __asm__ __volatile__(
2100 "1:\n"
2101 "lld %0, 0(%2)\n"
2102 "2:\n"
2103 ".insn\n"
2104 ".section .fixup,\"ax\"\n"
2105 "3:\n"
2106 "li %1, %3\n"
2107 "j 2b\n"
2108 ".previous\n"
2109 ".section __ex_table,\"a\"\n"
2110 ".word 1b, 3b\n"
2111 ".previous\n"
2112 : "=&r"(res), "+&r"(err)
2113 : "r"(vaddr), "i"(SIGSEGV)
2114 : "memory");
2115 if (MIPSInst_RT(inst) && !err)
2116 regs->regs[MIPSInst_RT(inst)] = res;
2117
2118 MIPS_R2_STATS(llsc);
2119
2120 break;
2121
2122 case scd_op:
2123 if (config_enabled(CONFIG_32BIT)) {
2124 err = SIGILL;
2125 break;
2126 }
2127
2128 vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
2129 if (vaddr & 0x7) {
2130 current->thread.cp0_baduaddr = vaddr;
2131 err = SIGBUS;
2132 break;
2133 }
2134 if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
2135 current->thread.cp0_baduaddr = vaddr;
2136 err = SIGBUS;
2137 break;
2138 }
2139
2140 if (!cpu_has_rw_llb) {
2141 /*
2142 * An LL/SC block can't be safely emulated without
2143 * a Config5/LLB availability. So it's probably time to
2144 * kill our process before things get any worse. This is
2145 * because Config5/LLB allows us to use ERETNC so that
2146 * the LLAddr/LLB bit is not cleared when we return from
2147 * an exception. MIPS R2 LL/SC instructions trap with an
2148 * RI exception so once we emulate them here, we return
2149 * back to userland with ERETNC. That preserves the
2150 * LLAddr/LLB so the subsequent SC instruction will
2151 * succeed preserving the atomic semantics of the LL/SC
2152 * block. Without that, there is no safe way to emulate
2153 * an LL/SC block in MIPSR2 userland.
2154 */
2155 pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
2156 err = SIGKILL;
2157 break;
2158 }
2159
2160 res = regs->regs[MIPSInst_RT(inst)];
2161
2162 __asm__ __volatile__(
2163 "1:\n"
2164 "scd %0, 0(%2)\n"
2165 "2:\n"
2166 ".insn\n"
2167 ".section .fixup,\"ax\"\n"
2168 "3:\n"
2169 "li %1, %3\n"
2170 "j 2b\n"
2171 ".previous\n"
2172 ".section __ex_table,\"a\"\n"
2173 ".word 1b, 3b\n"
2174 ".previous\n"
2175 : "+&r"(res), "+&r"(err)
2176 : "r"(vaddr), "i"(SIGSEGV));
2177
2178 if (MIPSInst_RT(inst) && !err)
2179 regs->regs[MIPSInst_RT(inst)] = res;
2180
2181 MIPS_R2_STATS(llsc);
2182
2183 break;
2184 case pref_op:
2185 /* skip it */
2186 break;
2187 default:
2188 err = SIGILL;
2189 }
2190
2191 /*
2192 * Lets not return to userland just yet. It's constly and
2193 * it's likely we have more R2 instructions to emulate
2194 */
2195 if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
2196 regs->cp0_cause &= ~CAUSEF_BD;
2197 err = get_user(inst, (u32 __user *)regs->cp0_epc);
2198 if (!err)
2199 goto repeat;
2200
2201 if (err < 0)
2202 err = SIGSEGV;
2203 }
2204
2205 if (err && (err != SIGEMT)) {
2206 regs->regs[31] = r31;
2207 regs->cp0_epc = epc;
2208 }
2209
2210 /* Likely a MIPS R6 compatible instruction */
2211 if (pass && (err == SIGILL))
2212 err = 0;
2213
2214 return err;
2215}
2216
2217#ifdef CONFIG_DEBUG_FS
2218
2219static int mipsr2_stats_show(struct seq_file *s, void *unused)
2220{
2221
2222 seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
2223 seq_printf(s, "movs\t\t%ld\t%ld\n",
2224 (unsigned long)__this_cpu_read(mipsr2emustats.movs),
2225 (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
2226 seq_printf(s, "hilo\t\t%ld\t%ld\n",
2227 (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
2228 (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
2229 seq_printf(s, "muls\t\t%ld\t%ld\n",
2230 (unsigned long)__this_cpu_read(mipsr2emustats.muls),
2231 (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
2232 seq_printf(s, "divs\t\t%ld\t%ld\n",
2233 (unsigned long)__this_cpu_read(mipsr2emustats.divs),
2234 (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
2235 seq_printf(s, "dsps\t\t%ld\t%ld\n",
2236 (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
2237 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
2238 seq_printf(s, "bops\t\t%ld\t%ld\n",
2239 (unsigned long)__this_cpu_read(mipsr2emustats.bops),
2240 (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
2241 seq_printf(s, "traps\t\t%ld\t%ld\n",
2242 (unsigned long)__this_cpu_read(mipsr2emustats.traps),
2243 (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
2244 seq_printf(s, "fpus\t\t%ld\t%ld\n",
2245 (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
2246 (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
2247 seq_printf(s, "loads\t\t%ld\t%ld\n",
2248 (unsigned long)__this_cpu_read(mipsr2emustats.loads),
2249 (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
2250 seq_printf(s, "stores\t\t%ld\t%ld\n",
2251 (unsigned long)__this_cpu_read(mipsr2emustats.stores),
2252 (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
2253 seq_printf(s, "llsc\t\t%ld\t%ld\n",
2254 (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
2255 (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
2256 seq_printf(s, "dsemul\t\t%ld\t%ld\n",
2257 (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
2258 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
2259 seq_printf(s, "jr\t\t%ld\n",
2260 (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
2261 seq_printf(s, "bltzl\t\t%ld\n",
2262 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
2263 seq_printf(s, "bgezl\t\t%ld\n",
2264 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
2265 seq_printf(s, "bltzll\t\t%ld\n",
2266 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
2267 seq_printf(s, "bgezll\t\t%ld\n",
2268 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
2269 seq_printf(s, "bltzal\t\t%ld\n",
2270 (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
2271 seq_printf(s, "bgezal\t\t%ld\n",
2272 (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
2273 seq_printf(s, "beql\t\t%ld\n",
2274 (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
2275 seq_printf(s, "bnel\t\t%ld\n",
2276 (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
2277 seq_printf(s, "blezl\t\t%ld\n",
2278 (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
2279 seq_printf(s, "bgtzl\t\t%ld\n",
2280 (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
2281
2282 return 0;
2283}
2284
2285static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
2286{
2287 mipsr2_stats_show(s, unused);
2288
2289 __this_cpu_write((mipsr2emustats).movs, 0);
2290 __this_cpu_write((mipsr2bdemustats).movs, 0);
2291 __this_cpu_write((mipsr2emustats).hilo, 0);
2292 __this_cpu_write((mipsr2bdemustats).hilo, 0);
2293 __this_cpu_write((mipsr2emustats).muls, 0);
2294 __this_cpu_write((mipsr2bdemustats).muls, 0);
2295 __this_cpu_write((mipsr2emustats).divs, 0);
2296 __this_cpu_write((mipsr2bdemustats).divs, 0);
2297 __this_cpu_write((mipsr2emustats).dsps, 0);
2298 __this_cpu_write((mipsr2bdemustats).dsps, 0);
2299 __this_cpu_write((mipsr2emustats).bops, 0);
2300 __this_cpu_write((mipsr2bdemustats).bops, 0);
2301 __this_cpu_write((mipsr2emustats).traps, 0);
2302 __this_cpu_write((mipsr2bdemustats).traps, 0);
2303 __this_cpu_write((mipsr2emustats).fpus, 0);
2304 __this_cpu_write((mipsr2bdemustats).fpus, 0);
2305 __this_cpu_write((mipsr2emustats).loads, 0);
2306 __this_cpu_write((mipsr2bdemustats).loads, 0);
2307 __this_cpu_write((mipsr2emustats).stores, 0);
2308 __this_cpu_write((mipsr2bdemustats).stores, 0);
2309 __this_cpu_write((mipsr2emustats).llsc, 0);
2310 __this_cpu_write((mipsr2bdemustats).llsc, 0);
2311 __this_cpu_write((mipsr2emustats).dsemul, 0);
2312 __this_cpu_write((mipsr2bdemustats).dsemul, 0);
2313 __this_cpu_write((mipsr2bremustats).jrs, 0);
2314 __this_cpu_write((mipsr2bremustats).bltzl, 0);
2315 __this_cpu_write((mipsr2bremustats).bgezl, 0);
2316 __this_cpu_write((mipsr2bremustats).bltzll, 0);
2317 __this_cpu_write((mipsr2bremustats).bgezll, 0);
2318 __this_cpu_write((mipsr2bremustats).bltzal, 0);
2319 __this_cpu_write((mipsr2bremustats).bgezal, 0);
2320 __this_cpu_write((mipsr2bremustats).beql, 0);
2321 __this_cpu_write((mipsr2bremustats).bnel, 0);
2322 __this_cpu_write((mipsr2bremustats).blezl, 0);
2323 __this_cpu_write((mipsr2bremustats).bgtzl, 0);
2324
2325 return 0;
2326}
2327
2328static int mipsr2_stats_open(struct inode *inode, struct file *file)
2329{
2330 return single_open(file, mipsr2_stats_show, inode->i_private);
2331}
2332
2333static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
2334{
2335 return single_open(file, mipsr2_stats_clear_show, inode->i_private);
2336}
2337
2338static const struct file_operations mipsr2_emul_fops = {
2339 .open = mipsr2_stats_open,
2340 .read = seq_read,
2341 .llseek = seq_lseek,
2342 .release = single_release,
2343};
2344
2345static const struct file_operations mipsr2_clear_fops = {
2346 .open = mipsr2_stats_clear_open,
2347 .read = seq_read,
2348 .llseek = seq_lseek,
2349 .release = single_release,
2350};
2351
2352
2353static int __init mipsr2_init_debugfs(void)
2354{
2355 extern struct dentry *mips_debugfs_dir;
2356 struct dentry *mipsr2_emul;
2357
2358 if (!mips_debugfs_dir)
2359 return -ENODEV;
2360
2361 mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
2362 mips_debugfs_dir, NULL,
2363 &mipsr2_emul_fops);
2364 if (!mipsr2_emul)
2365 return -ENOMEM;
2366
2367 mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
2368 mips_debugfs_dir, NULL,
2369 &mipsr2_clear_fops);
2370 if (!mipsr2_emul)
2371 return -ENOMEM;
2372
2373 return 0;
2374}
2375
2376device_initcall(mipsr2_init_debugfs);
2377
2378#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 1a73c6c9e4b7..291af0b5c482 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -77,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm);
77EXPORT_SYMBOL(__strnlen_user_nocheck_asm); 77EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
78EXPORT_SYMBOL(__strnlen_user_asm); 78EXPORT_SYMBOL(__strnlen_user_asm);
79 79
80#ifndef CONFIG_CPU_MIPSR6
80EXPORT_SYMBOL(csum_partial); 81EXPORT_SYMBOL(csum_partial);
81EXPORT_SYMBOL(csum_partial_copy_nocheck); 82EXPORT_SYMBOL(csum_partial_copy_nocheck);
82EXPORT_SYMBOL(__csum_partial_copy_kernel); 83EXPORT_SYMBOL(__csum_partial_copy_kernel);
83EXPORT_SYMBOL(__csum_partial_copy_to_user); 84EXPORT_SYMBOL(__csum_partial_copy_to_user);
84EXPORT_SYMBOL(__csum_partial_copy_from_user); 85EXPORT_SYMBOL(__csum_partial_copy_from_user);
86#endif
85 87
86EXPORT_SYMBOL(invalid_pte_table); 88EXPORT_SYMBOL(invalid_pte_table);
87#ifdef CONFIG_FUNCTION_TRACER 89#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 097fc8d14e42..130af7d26a9c 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
82 seq_printf(m, "]\n"); 82 seq_printf(m, "]\n");
83 } 83 }
84 84
85 seq_printf(m, "isa\t\t\t: mips1"); 85 seq_printf(m, "isa\t\t\t:");
86 if (cpu_has_mips_r1)
87 seq_printf(m, " mips1");
86 if (cpu_has_mips_2) 88 if (cpu_has_mips_2)
87 seq_printf(m, "%s", " mips2"); 89 seq_printf(m, "%s", " mips2");
88 if (cpu_has_mips_3) 90 if (cpu_has_mips_3)
@@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
95 seq_printf(m, "%s", " mips32r1"); 97 seq_printf(m, "%s", " mips32r1");
96 if (cpu_has_mips32r2) 98 if (cpu_has_mips32r2)
97 seq_printf(m, "%s", " mips32r2"); 99 seq_printf(m, "%s", " mips32r2");
100 if (cpu_has_mips32r6)
101 seq_printf(m, "%s", " mips32r6");
98 if (cpu_has_mips64r1) 102 if (cpu_has_mips64r1)
99 seq_printf(m, "%s", " mips64r1"); 103 seq_printf(m, "%s", " mips64r1");
100 if (cpu_has_mips64r2) 104 if (cpu_has_mips64r2)
101 seq_printf(m, "%s", " mips64r2"); 105 seq_printf(m, "%s", " mips64r2");
106 if (cpu_has_mips64r6)
107 seq_printf(m, "%s", " mips64r6");
102 seq_printf(m, "\n"); 108 seq_printf(m, "\n");
103 109
104 seq_printf(m, "ASEs implemented\t:"); 110 seq_printf(m, "ASEs implemented\t:");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 4677b4c67da6..696d59e40fa4 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -581,6 +581,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
581 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) 581 if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
582 return -EOPNOTSUPP; 582 return -EOPNOTSUPP;
583 583
584 /* FR = 0 not supported in MIPS R6 */
585 if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
586 return -EOPNOTSUPP;
587
584 /* Save FP & vector context, then disable FPU & MSA */ 588 /* Save FP & vector context, then disable FPU & MSA */
585 if (task->signal == current->signal) 589 if (task->signal == current->signal)
586 lose_fpu(1); 590 lose_fpu(1);
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 6c160c67984c..676c5030a953 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -34,7 +34,7 @@
34 .endm 34 .endm
35 35
36 .set noreorder 36 .set noreorder
37 .set arch=r4000 37 .set MIPS_ISA_ARCH_LEVEL_RAW
38 38
39LEAF(_save_fp_context) 39LEAF(_save_fp_context)
40 .set push 40 .set push
@@ -42,7 +42,8 @@ LEAF(_save_fp_context)
42 cfc1 t1, fcr31 42 cfc1 t1, fcr31
43 .set pop 43 .set pop
44 44
45#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 45#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
46 defined(CONFIG_CPU_MIPS32_R6)
46 .set push 47 .set push
47 SET_HARDFLOAT 48 SET_HARDFLOAT
48#ifdef CONFIG_CPU_MIPS32_R2 49#ifdef CONFIG_CPU_MIPS32_R2
@@ -105,10 +106,12 @@ LEAF(_save_fp_context32)
105 SET_HARDFLOAT 106 SET_HARDFLOAT
106 cfc1 t1, fcr31 107 cfc1 t1, fcr31
107 108
109#ifndef CONFIG_CPU_MIPS64_R6
108 mfc0 t0, CP0_STATUS 110 mfc0 t0, CP0_STATUS
109 sll t0, t0, 5 111 sll t0, t0, 5
110 bgez t0, 1f # skip storing odd if FR=0 112 bgez t0, 1f # skip storing odd if FR=0
111 nop 113 nop
114#endif
112 115
113 /* Store the 16 odd double precision registers */ 116 /* Store the 16 odd double precision registers */
114 EX sdc1 $f1, SC32_FPREGS+8(a0) 117 EX sdc1 $f1, SC32_FPREGS+8(a0)
@@ -163,7 +166,8 @@ LEAF(_save_fp_context32)
163LEAF(_restore_fp_context) 166LEAF(_restore_fp_context)
164 EX lw t1, SC_FPC_CSR(a0) 167 EX lw t1, SC_FPC_CSR(a0)
165 168
166#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 169#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
170 defined(CONFIG_CPU_MIPS32_R6)
167 .set push 171 .set push
168 SET_HARDFLOAT 172 SET_HARDFLOAT
169#ifdef CONFIG_CPU_MIPS32_R2 173#ifdef CONFIG_CPU_MIPS32_R2
@@ -223,10 +227,12 @@ LEAF(_restore_fp_context32)
223 SET_HARDFLOAT 227 SET_HARDFLOAT
224 EX lw t1, SC32_FPC_CSR(a0) 228 EX lw t1, SC32_FPC_CSR(a0)
225 229
230#ifndef CONFIG_CPU_MIPS64_R6
226 mfc0 t0, CP0_STATUS 231 mfc0 t0, CP0_STATUS
227 sll t0, t0, 5 232 sll t0, t0, 5
228 bgez t0, 1f # skip loading odd if FR=0 233 bgez t0, 1f # skip loading odd if FR=0
229 nop 234 nop
235#endif
230 236
231 EX ldc1 $f1, SC32_FPREGS+8(a0) 237 EX ldc1 $f1, SC32_FPREGS+8(a0)
232 EX ldc1 $f3, SC32_FPREGS+24(a0) 238 EX ldc1 $f3, SC32_FPREGS+24(a0)
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 64591e671878..3b1a36f13a7d 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -115,7 +115,8 @@
115 * Save a thread's fp context. 115 * Save a thread's fp context.
116 */ 116 */
117LEAF(_save_fp) 117LEAF(_save_fp)
118#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 118#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
119 defined(CONFIG_CPU_MIPS32_R6)
119 mfc0 t0, CP0_STATUS 120 mfc0 t0, CP0_STATUS
120#endif 121#endif
121 fpu_save_double a0 t0 t1 # clobbers t1 122 fpu_save_double a0 t0 t1 # clobbers t1
@@ -126,7 +127,8 @@ LEAF(_save_fp)
126 * Restore a thread's fp context. 127 * Restore a thread's fp context.
127 */ 128 */
128LEAF(_restore_fp) 129LEAF(_restore_fp)
129#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 130#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
131 defined(CONFIG_CPU_MIPS32_R6)
130 mfc0 t0, CP0_STATUS 132 mfc0 t0, CP0_STATUS
131#endif 133#endif
132 fpu_restore_double a0 t0 t1 # clobbers t1 134 fpu_restore_double a0 t0 t1 # clobbers t1
@@ -240,9 +242,9 @@ LEAF(_init_fpu)
240 mtc1 t1, $f30 242 mtc1 t1, $f30
241 mtc1 t1, $f31 243 mtc1 t1, $f31
242 244
243#ifdef CONFIG_CPU_MIPS32_R2 245#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
244 .set push 246 .set push
245 .set mips32r2 247 .set MIPS_ISA_LEVEL_RAW
246 .set fp=64 248 .set fp=64
247 sll t0, t0, 5 # is Status.FR set? 249 sll t0, t0, 5 # is Status.FR set?
248 bgez t0, 1f # no: skip setting upper 32b 250 bgez t0, 1f # no: skip setting upper 32b
@@ -280,9 +282,9 @@ LEAF(_init_fpu)
280 mthc1 t1, $f30 282 mthc1 t1, $f30
281 mthc1 t1, $f31 283 mthc1 t1, $f31
2821: .set pop 2841: .set pop
283#endif /* CONFIG_CPU_MIPS32_R2 */ 285#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
284#else 286#else
285 .set arch=r4000 287 .set MIPS_ISA_ARCH_LEVEL_RAW
286 dmtc1 t1, $f0 288 dmtc1 t1, $f0
287 dmtc1 t1, $f2 289 dmtc1 t1, $f2
288 dmtc1 t1, $f4 290 dmtc1 t1, $f4
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 67f2495def1c..d1168d7c31e8 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -208,6 +208,7 @@ void spram_config(void)
208 case CPU_INTERAPTIV: 208 case CPU_INTERAPTIV:
209 case CPU_PROAPTIV: 209 case CPU_PROAPTIV:
210 case CPU_P5600: 210 case CPU_P5600:
211 case CPU_QEMU_GENERIC:
211 config0 = read_c0_config(); 212 config0 = read_c0_config();
212 /* FIXME: addresses are Malta specific */ 213 /* FIXME: addresses are Malta specific */
213 if (config0 & (1<<24)) { 214 if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 604b558809c4..53a7ef9a8f32 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
136 : "memory"); 136 : "memory");
137 } else if (cpu_has_llsc) { 137 } else if (cpu_has_llsc) {
138 __asm__ __volatile__ ( 138 __asm__ __volatile__ (
139 " .set arch=r4000 \n" 139 " .set "MIPS_ISA_ARCH_LEVEL" \n"
140 " li %[err], 0 \n" 140 " li %[err], 0 \n"
141 "1: ll %[old], (%[addr]) \n" 141 "1: ll %[old], (%[addr]) \n"
142 " move %[tmp], %[new] \n" 142 " move %[tmp], %[new] \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d5fbfb51b9da..afa447e5e97f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -46,6 +46,7 @@
46#include <asm/fpu.h> 46#include <asm/fpu.h>
47#include <asm/fpu_emulator.h> 47#include <asm/fpu_emulator.h>
48#include <asm/idle.h> 48#include <asm/idle.h>
49#include <asm/mips-r2-to-r6-emul.h>
49#include <asm/mipsregs.h> 50#include <asm/mipsregs.h>
50#include <asm/mipsmtregs.h> 51#include <asm/mipsmtregs.h>
51#include <asm/module.h> 52#include <asm/module.h>
@@ -837,7 +838,7 @@ out:
837 exception_exit(prev_state); 838 exception_exit(prev_state);
838} 839}
839 840
840static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 841void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
841 const char *str) 842 const char *str)
842{ 843{
843 siginfo_t info; 844 siginfo_t info;
@@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs)
1027 unsigned int opcode = 0; 1028 unsigned int opcode = 0;
1028 int status = -1; 1029 int status = -1;
1029 1030
1031 /*
1032 * Avoid any kernel code. Just emulate the R2 instruction
1033 * as quickly as possible.
1034 */
1035 if (mipsr2_emulation && cpu_has_mips_r6 &&
1036 likely(user_mode(regs))) {
1037 if (likely(get_user(opcode, epc) >= 0)) {
1038 status = mipsr2_decoder(regs, opcode);
1039 switch (status) {
1040 case 0:
1041 case SIGEMT:
1042 task_thread_info(current)->r2_emul_return = 1;
1043 return;
1044 case SIGILL:
1045 goto no_r2_instr;
1046 default:
1047 process_fpemu_return(status,
1048 &current->thread.cp0_baduaddr);
1049 task_thread_info(current)->r2_emul_return = 1;
1050 return;
1051 }
1052 }
1053 }
1054
1055no_r2_instr:
1056
1030 prev_state = exception_enter(); 1057 prev_state = exception_enter();
1058
1031 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), 1059 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
1032 SIGILL) == NOTIFY_STOP) 1060 SIGILL) == NOTIFY_STOP)
1033 goto out; 1061 goto out;
@@ -1559,6 +1587,7 @@ static inline void parity_protection_init(void)
1559 case CPU_INTERAPTIV: 1587 case CPU_INTERAPTIV:
1560 case CPU_PROAPTIV: 1588 case CPU_PROAPTIV:
1561 case CPU_P5600: 1589 case CPU_P5600:
1590 case CPU_QEMU_GENERIC:
1562 { 1591 {
1563#define ERRCTL_PE 0x80000000 1592#define ERRCTL_PE 0x80000000
1564#define ERRCTL_L2P 0x00800000 1593#define ERRCTL_L2P 0x00800000
@@ -1648,7 +1677,7 @@ asmlinkage void cache_parity_error(void)
1648 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1677 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1649 reg_val & (1<<30) ? "secondary" : "primary", 1678 reg_val & (1<<30) ? "secondary" : "primary",
1650 reg_val & (1<<31) ? "data" : "insn"); 1679 reg_val & (1<<31) ? "data" : "insn");
1651 if (cpu_has_mips_r2 && 1680 if ((cpu_has_mips_r2_r6) &&
1652 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1681 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1653 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1682 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1654 reg_val & (1<<29) ? "ED " : "", 1683 reg_val & (1<<29) ? "ED " : "",
@@ -1688,7 +1717,7 @@ asmlinkage void do_ftlb(void)
1688 unsigned int reg_val; 1717 unsigned int reg_val;
1689 1718
1690 /* For the moment, report the problem and hang. */ 1719 /* For the moment, report the problem and hang. */
1691 if (cpu_has_mips_r2 && 1720 if ((cpu_has_mips_r2_r6) &&
1692 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1721 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1693 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1722 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1694 read_c0_ecc()); 1723 read_c0_ecc());
@@ -1977,7 +2006,7 @@ static void configure_hwrena(void)
1977{ 2006{
1978 unsigned int hwrena = cpu_hwrena_impl_bits; 2007 unsigned int hwrena = cpu_hwrena_impl_bits;
1979 2008
1980 if (cpu_has_mips_r2) 2009 if (cpu_has_mips_r2_r6)
1981 hwrena |= 0x0000000f; 2010 hwrena |= 0x0000000f;
1982 2011
1983 if (!noulri && cpu_has_userlocal) 2012 if (!noulri && cpu_has_userlocal)
@@ -2021,7 +2050,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
2021 * o read IntCtl.IPTI to determine the timer interrupt 2050 * o read IntCtl.IPTI to determine the timer interrupt
2022 * o read IntCtl.IPPCI to determine the performance counter interrupt 2051 * o read IntCtl.IPPCI to determine the performance counter interrupt
2023 */ 2052 */
2024 if (cpu_has_mips_r2) { 2053 if (cpu_has_mips_r2_r6) {
2025 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2054 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2026 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2055 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2027 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; 2056 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2112,7 +2141,7 @@ void __init trap_init(void)
2112#else 2141#else
2113 ebase = CKSEG0; 2142 ebase = CKSEG0;
2114#endif 2143#endif
2115 if (cpu_has_mips_r2) 2144 if (cpu_has_mips_r2_r6)
2116 ebase += (read_c0_ebase() & 0x3ffff000); 2145 ebase += (read_c0_ebase() & 0x3ffff000);
2117 } 2146 }
2118 2147
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index e11906dff885..bbb69695a0a1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs);
129 : "=&r" (value), "=r" (res) \ 129 : "=&r" (value), "=r" (res) \
130 : "r" (addr), "i" (-EFAULT)); 130 : "r" (addr), "i" (-EFAULT));
131 131
132#ifndef CONFIG_CPU_MIPSR6
132#define LoadW(addr, value, res) \ 133#define LoadW(addr, value, res) \
133 __asm__ __volatile__ ( \ 134 __asm__ __volatile__ ( \
134 "1:\t"user_lwl("%0", "(%2)")"\n" \ 135 "1:\t"user_lwl("%0", "(%2)")"\n" \
@@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs);
146 ".previous" \ 147 ".previous" \
147 : "=&r" (value), "=r" (res) \ 148 : "=&r" (value), "=r" (res) \
148 : "r" (addr), "i" (-EFAULT)); 149 : "r" (addr), "i" (-EFAULT));
150#else
151/* MIPSR6 has no lwl instruction */
152#define LoadW(addr, value, res) \
153 __asm__ __volatile__ ( \
154 ".set\tpush\n" \
155 ".set\tnoat\n\t" \
156 "1:"user_lb("%0", "0(%2)")"\n\t" \
157 "2:"user_lbu("$1", "1(%2)")"\n\t" \
158 "sll\t%0, 0x8\n\t" \
159 "or\t%0, $1\n\t" \
160 "3:"user_lbu("$1", "2(%2)")"\n\t" \
161 "sll\t%0, 0x8\n\t" \
162 "or\t%0, $1\n\t" \
163 "4:"user_lbu("$1", "3(%2)")"\n\t" \
164 "sll\t%0, 0x8\n\t" \
165 "or\t%0, $1\n\t" \
166 "li\t%1, 0\n" \
167 ".set\tpop\n" \
168 "10:\n\t" \
169 ".insn\n\t" \
170 ".section\t.fixup,\"ax\"\n\t" \
171 "11:\tli\t%1, %3\n\t" \
172 "j\t10b\n\t" \
173 ".previous\n\t" \
174 ".section\t__ex_table,\"a\"\n\t" \
175 STR(PTR)"\t1b, 11b\n\t" \
176 STR(PTR)"\t2b, 11b\n\t" \
177 STR(PTR)"\t3b, 11b\n\t" \
178 STR(PTR)"\t4b, 11b\n\t" \
179 ".previous" \
180 : "=&r" (value), "=r" (res) \
181 : "r" (addr), "i" (-EFAULT));
182#endif /* CONFIG_CPU_MIPSR6 */
149 183
150#define LoadHWU(addr, value, res) \ 184#define LoadHWU(addr, value, res) \
151 __asm__ __volatile__ ( \ 185 __asm__ __volatile__ ( \
@@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs);
169 : "=&r" (value), "=r" (res) \ 203 : "=&r" (value), "=r" (res) \
170 : "r" (addr), "i" (-EFAULT)); 204 : "r" (addr), "i" (-EFAULT));
171 205
206#ifndef CONFIG_CPU_MIPSR6
172#define LoadWU(addr, value, res) \ 207#define LoadWU(addr, value, res) \
173 __asm__ __volatile__ ( \ 208 __asm__ __volatile__ ( \
174 "1:\t"user_lwl("%0", "(%2)")"\n" \ 209 "1:\t"user_lwl("%0", "(%2)")"\n" \
@@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs);
206 ".previous" \ 241 ".previous" \
207 : "=&r" (value), "=r" (res) \ 242 : "=&r" (value), "=r" (res) \
208 : "r" (addr), "i" (-EFAULT)); 243 : "r" (addr), "i" (-EFAULT));
244#else
245/* MIPSR6 has not lwl and ldl instructions */
246#define LoadWU(addr, value, res) \
247 __asm__ __volatile__ ( \
248 ".set\tpush\n\t" \
249 ".set\tnoat\n\t" \
250 "1:"user_lbu("%0", "0(%2)")"\n\t" \
251 "2:"user_lbu("$1", "1(%2)")"\n\t" \
252 "sll\t%0, 0x8\n\t" \
253 "or\t%0, $1\n\t" \
254 "3:"user_lbu("$1", "2(%2)")"\n\t" \
255 "sll\t%0, 0x8\n\t" \
256 "or\t%0, $1\n\t" \
257 "4:"user_lbu("$1", "3(%2)")"\n\t" \
258 "sll\t%0, 0x8\n\t" \
259 "or\t%0, $1\n\t" \
260 "li\t%1, 0\n" \
261 ".set\tpop\n" \
262 "10:\n\t" \
263 ".insn\n\t" \
264 ".section\t.fixup,\"ax\"\n\t" \
265 "11:\tli\t%1, %3\n\t" \
266 "j\t10b\n\t" \
267 ".previous\n\t" \
268 ".section\t__ex_table,\"a\"\n\t" \
269 STR(PTR)"\t1b, 11b\n\t" \
270 STR(PTR)"\t2b, 11b\n\t" \
271 STR(PTR)"\t3b, 11b\n\t" \
272 STR(PTR)"\t4b, 11b\n\t" \
273 ".previous" \
274 : "=&r" (value), "=r" (res) \
275 : "r" (addr), "i" (-EFAULT));
276
277#define LoadDW(addr, value, res) \
278 __asm__ __volatile__ ( \
279 ".set\tpush\n\t" \
280 ".set\tnoat\n\t" \
281 "1:lb\t%0, 0(%2)\n\t" \
282 "2:lbu\t $1, 1(%2)\n\t" \
283 "dsll\t%0, 0x8\n\t" \
284 "or\t%0, $1\n\t" \
285 "3:lbu\t$1, 2(%2)\n\t" \
286 "dsll\t%0, 0x8\n\t" \
287 "or\t%0, $1\n\t" \
288 "4:lbu\t$1, 3(%2)\n\t" \
289 "dsll\t%0, 0x8\n\t" \
290 "or\t%0, $1\n\t" \
291 "5:lbu\t$1, 4(%2)\n\t" \
292 "dsll\t%0, 0x8\n\t" \
293 "or\t%0, $1\n\t" \
294 "6:lbu\t$1, 5(%2)\n\t" \
295 "dsll\t%0, 0x8\n\t" \
296 "or\t%0, $1\n\t" \
297 "7:lbu\t$1, 6(%2)\n\t" \
298 "dsll\t%0, 0x8\n\t" \
299 "or\t%0, $1\n\t" \
300 "8:lbu\t$1, 7(%2)\n\t" \
301 "dsll\t%0, 0x8\n\t" \
302 "or\t%0, $1\n\t" \
303 "li\t%1, 0\n" \
304 ".set\tpop\n\t" \
305 "10:\n\t" \
306 ".insn\n\t" \
307 ".section\t.fixup,\"ax\"\n\t" \
308 "11:\tli\t%1, %3\n\t" \
309 "j\t10b\n\t" \
310 ".previous\n\t" \
311 ".section\t__ex_table,\"a\"\n\t" \
312 STR(PTR)"\t1b, 11b\n\t" \
313 STR(PTR)"\t2b, 11b\n\t" \
314 STR(PTR)"\t3b, 11b\n\t" \
315 STR(PTR)"\t4b, 11b\n\t" \
316 STR(PTR)"\t5b, 11b\n\t" \
317 STR(PTR)"\t6b, 11b\n\t" \
318 STR(PTR)"\t7b, 11b\n\t" \
319 STR(PTR)"\t8b, 11b\n\t" \
320 ".previous" \
321 : "=&r" (value), "=r" (res) \
322 : "r" (addr), "i" (-EFAULT));
323#endif /* CONFIG_CPU_MIPSR6 */
324
209 325
210#define StoreHW(addr, value, res) \ 326#define StoreHW(addr, value, res) \
211 __asm__ __volatile__ ( \ 327 __asm__ __volatile__ ( \
@@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs);
228 : "=r" (res) \ 344 : "=r" (res) \
229 : "r" (value), "r" (addr), "i" (-EFAULT)); 345 : "r" (value), "r" (addr), "i" (-EFAULT));
230 346
347#ifndef CONFIG_CPU_MIPSR6
231#define StoreW(addr, value, res) \ 348#define StoreW(addr, value, res) \
232 __asm__ __volatile__ ( \ 349 __asm__ __volatile__ ( \
233 "1:\t"user_swl("%1", "(%2)")"\n" \ 350 "1:\t"user_swl("%1", "(%2)")"\n" \
@@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs);
263 ".previous" \ 380 ".previous" \
264 : "=r" (res) \ 381 : "=r" (res) \
265 : "r" (value), "r" (addr), "i" (-EFAULT)); 382 : "r" (value), "r" (addr), "i" (-EFAULT));
266#endif 383#else
384/* MIPSR6 has no swl and sdl instructions */
385#define StoreW(addr, value, res) \
386 __asm__ __volatile__ ( \
387 ".set\tpush\n\t" \
388 ".set\tnoat\n\t" \
389 "1:"user_sb("%1", "3(%2)")"\n\t" \
390 "srl\t$1, %1, 0x8\n\t" \
391 "2:"user_sb("$1", "2(%2)")"\n\t" \
392 "srl\t$1, $1, 0x8\n\t" \
393 "3:"user_sb("$1", "1(%2)")"\n\t" \
394 "srl\t$1, $1, 0x8\n\t" \
395 "4:"user_sb("$1", "0(%2)")"\n\t" \
396 ".set\tpop\n\t" \
397 "li\t%0, 0\n" \
398 "10:\n\t" \
399 ".insn\n\t" \
400 ".section\t.fixup,\"ax\"\n\t" \
401 "11:\tli\t%0, %3\n\t" \
402 "j\t10b\n\t" \
403 ".previous\n\t" \
404 ".section\t__ex_table,\"a\"\n\t" \
405 STR(PTR)"\t1b, 11b\n\t" \
406 STR(PTR)"\t2b, 11b\n\t" \
407 STR(PTR)"\t3b, 11b\n\t" \
408 STR(PTR)"\t4b, 11b\n\t" \
409 ".previous" \
410 : "=&r" (res) \
411 : "r" (value), "r" (addr), "i" (-EFAULT) \
412 : "memory");
413
414#define StoreDW(addr, value, res) \
415 __asm__ __volatile__ ( \
416 ".set\tpush\n\t" \
417 ".set\tnoat\n\t" \
418 "1:sb\t%1, 7(%2)\n\t" \
419 "dsrl\t$1, %1, 0x8\n\t" \
420 "2:sb\t$1, 6(%2)\n\t" \
421 "dsrl\t$1, $1, 0x8\n\t" \
422 "3:sb\t$1, 5(%2)\n\t" \
423 "dsrl\t$1, $1, 0x8\n\t" \
424 "4:sb\t$1, 4(%2)\n\t" \
425 "dsrl\t$1, $1, 0x8\n\t" \
426 "5:sb\t$1, 3(%2)\n\t" \
427 "dsrl\t$1, $1, 0x8\n\t" \
428 "6:sb\t$1, 2(%2)\n\t" \
429 "dsrl\t$1, $1, 0x8\n\t" \
430 "7:sb\t$1, 1(%2)\n\t" \
431 "dsrl\t$1, $1, 0x8\n\t" \
432 "8:sb\t$1, 0(%2)\n\t" \
433 "dsrl\t$1, $1, 0x8\n\t" \
434 ".set\tpop\n\t" \
435 "li\t%0, 0\n" \
436 "10:\n\t" \
437 ".insn\n\t" \
438 ".section\t.fixup,\"ax\"\n\t" \
439 "11:\tli\t%0, %3\n\t" \
440 "j\t10b\n\t" \
441 ".previous\n\t" \
442 ".section\t__ex_table,\"a\"\n\t" \
443 STR(PTR)"\t1b, 11b\n\t" \
444 STR(PTR)"\t2b, 11b\n\t" \
445 STR(PTR)"\t3b, 11b\n\t" \
446 STR(PTR)"\t4b, 11b\n\t" \
447 STR(PTR)"\t5b, 11b\n\t" \
448 STR(PTR)"\t6b, 11b\n\t" \
449 STR(PTR)"\t7b, 11b\n\t" \
450 STR(PTR)"\t8b, 11b\n\t" \
451 ".previous" \
452 : "=&r" (res) \
453 : "r" (value), "r" (addr), "i" (-EFAULT) \
454 : "memory");
455#endif /* CONFIG_CPU_MIPSR6 */
456
457#else /* __BIG_ENDIAN */
267 458
268#ifdef __LITTLE_ENDIAN
269#define LoadHW(addr, value, res) \ 459#define LoadHW(addr, value, res) \
270 __asm__ __volatile__ (".set\tnoat\n" \ 460 __asm__ __volatile__ (".set\tnoat\n" \
271 "1:\t"user_lb("%0", "1(%2)")"\n" \ 461 "1:\t"user_lb("%0", "1(%2)")"\n" \
@@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs);
286 : "=&r" (value), "=r" (res) \ 476 : "=&r" (value), "=r" (res) \
287 : "r" (addr), "i" (-EFAULT)); 477 : "r" (addr), "i" (-EFAULT));
288 478
479#ifndef CONFIG_CPU_MIPSR6
289#define LoadW(addr, value, res) \ 480#define LoadW(addr, value, res) \
290 __asm__ __volatile__ ( \ 481 __asm__ __volatile__ ( \
291 "1:\t"user_lwl("%0", "3(%2)")"\n" \ 482 "1:\t"user_lwl("%0", "3(%2)")"\n" \
@@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs);
303 ".previous" \ 494 ".previous" \
304 : "=&r" (value), "=r" (res) \ 495 : "=&r" (value), "=r" (res) \
305 : "r" (addr), "i" (-EFAULT)); 496 : "r" (addr), "i" (-EFAULT));
497#else
498/* MIPSR6 has no lwl instruction */
499#define LoadW(addr, value, res) \
500 __asm__ __volatile__ ( \
501 ".set\tpush\n" \
502 ".set\tnoat\n\t" \
503 "1:"user_lb("%0", "3(%2)")"\n\t" \
504 "2:"user_lbu("$1", "2(%2)")"\n\t" \
505 "sll\t%0, 0x8\n\t" \
506 "or\t%0, $1\n\t" \
507 "3:"user_lbu("$1", "1(%2)")"\n\t" \
508 "sll\t%0, 0x8\n\t" \
509 "or\t%0, $1\n\t" \
510 "4:"user_lbu("$1", "0(%2)")"\n\t" \
511 "sll\t%0, 0x8\n\t" \
512 "or\t%0, $1\n\t" \
513 "li\t%1, 0\n" \
514 ".set\tpop\n" \
515 "10:\n\t" \
516 ".insn\n\t" \
517 ".section\t.fixup,\"ax\"\n\t" \
518 "11:\tli\t%1, %3\n\t" \
519 "j\t10b\n\t" \
520 ".previous\n\t" \
521 ".section\t__ex_table,\"a\"\n\t" \
522 STR(PTR)"\t1b, 11b\n\t" \
523 STR(PTR)"\t2b, 11b\n\t" \
524 STR(PTR)"\t3b, 11b\n\t" \
525 STR(PTR)"\t4b, 11b\n\t" \
526 ".previous" \
527 : "=&r" (value), "=r" (res) \
528 : "r" (addr), "i" (-EFAULT));
529#endif /* CONFIG_CPU_MIPSR6 */
530
306 531
307#define LoadHWU(addr, value, res) \ 532#define LoadHWU(addr, value, res) \
308 __asm__ __volatile__ ( \ 533 __asm__ __volatile__ ( \
@@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs);
326 : "=&r" (value), "=r" (res) \ 551 : "=&r" (value), "=r" (res) \
327 : "r" (addr), "i" (-EFAULT)); 552 : "r" (addr), "i" (-EFAULT));
328 553
554#ifndef CONFIG_CPU_MIPSR6
329#define LoadWU(addr, value, res) \ 555#define LoadWU(addr, value, res) \
330 __asm__ __volatile__ ( \ 556 __asm__ __volatile__ ( \
331 "1:\t"user_lwl("%0", "3(%2)")"\n" \ 557 "1:\t"user_lwl("%0", "3(%2)")"\n" \
@@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs);
363 ".previous" \ 589 ".previous" \
364 : "=&r" (value), "=r" (res) \ 590 : "=&r" (value), "=r" (res) \
365 : "r" (addr), "i" (-EFAULT)); 591 : "r" (addr), "i" (-EFAULT));
592#else
593/* MIPSR6 has not lwl and ldl instructions */
594#define LoadWU(addr, value, res) \
595 __asm__ __volatile__ ( \
596 ".set\tpush\n\t" \
597 ".set\tnoat\n\t" \
598 "1:"user_lbu("%0", "3(%2)")"\n\t" \
599 "2:"user_lbu("$1", "2(%2)")"\n\t" \
600 "sll\t%0, 0x8\n\t" \
601 "or\t%0, $1\n\t" \
602 "3:"user_lbu("$1", "1(%2)")"\n\t" \
603 "sll\t%0, 0x8\n\t" \
604 "or\t%0, $1\n\t" \
605 "4:"user_lbu("$1", "0(%2)")"\n\t" \
606 "sll\t%0, 0x8\n\t" \
607 "or\t%0, $1\n\t" \
608 "li\t%1, 0\n" \
609 ".set\tpop\n" \
610 "10:\n\t" \
611 ".insn\n\t" \
612 ".section\t.fixup,\"ax\"\n\t" \
613 "11:\tli\t%1, %3\n\t" \
614 "j\t10b\n\t" \
615 ".previous\n\t" \
616 ".section\t__ex_table,\"a\"\n\t" \
617 STR(PTR)"\t1b, 11b\n\t" \
618 STR(PTR)"\t2b, 11b\n\t" \
619 STR(PTR)"\t3b, 11b\n\t" \
620 STR(PTR)"\t4b, 11b\n\t" \
621 ".previous" \
622 : "=&r" (value), "=r" (res) \
623 : "r" (addr), "i" (-EFAULT));
624
625#define LoadDW(addr, value, res) \
626 __asm__ __volatile__ ( \
627 ".set\tpush\n\t" \
628 ".set\tnoat\n\t" \
629 "1:lb\t%0, 7(%2)\n\t" \
630 "2:lbu\t$1, 6(%2)\n\t" \
631 "dsll\t%0, 0x8\n\t" \
632 "or\t%0, $1\n\t" \
633 "3:lbu\t$1, 5(%2)\n\t" \
634 "dsll\t%0, 0x8\n\t" \
635 "or\t%0, $1\n\t" \
636 "4:lbu\t$1, 4(%2)\n\t" \
637 "dsll\t%0, 0x8\n\t" \
638 "or\t%0, $1\n\t" \
639 "5:lbu\t$1, 3(%2)\n\t" \
640 "dsll\t%0, 0x8\n\t" \
641 "or\t%0, $1\n\t" \
642 "6:lbu\t$1, 2(%2)\n\t" \
643 "dsll\t%0, 0x8\n\t" \
644 "or\t%0, $1\n\t" \
645 "7:lbu\t$1, 1(%2)\n\t" \
646 "dsll\t%0, 0x8\n\t" \
647 "or\t%0, $1\n\t" \
648 "8:lbu\t$1, 0(%2)\n\t" \
649 "dsll\t%0, 0x8\n\t" \
650 "or\t%0, $1\n\t" \
651 "li\t%1, 0\n" \
652 ".set\tpop\n\t" \
653 "10:\n\t" \
654 ".insn\n\t" \
655 ".section\t.fixup,\"ax\"\n\t" \
656 "11:\tli\t%1, %3\n\t" \
657 "j\t10b\n\t" \
658 ".previous\n\t" \
659 ".section\t__ex_table,\"a\"\n\t" \
660 STR(PTR)"\t1b, 11b\n\t" \
661 STR(PTR)"\t2b, 11b\n\t" \
662 STR(PTR)"\t3b, 11b\n\t" \
663 STR(PTR)"\t4b, 11b\n\t" \
664 STR(PTR)"\t5b, 11b\n\t" \
665 STR(PTR)"\t6b, 11b\n\t" \
666 STR(PTR)"\t7b, 11b\n\t" \
667 STR(PTR)"\t8b, 11b\n\t" \
668 ".previous" \
669 : "=&r" (value), "=r" (res) \
670 : "r" (addr), "i" (-EFAULT));
671#endif /* CONFIG_CPU_MIPSR6 */
366 672
367#define StoreHW(addr, value, res) \ 673#define StoreHW(addr, value, res) \
368 __asm__ __volatile__ ( \ 674 __asm__ __volatile__ ( \
@@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs);
384 ".previous" \ 690 ".previous" \
385 : "=r" (res) \ 691 : "=r" (res) \
386 : "r" (value), "r" (addr), "i" (-EFAULT)); 692 : "r" (value), "r" (addr), "i" (-EFAULT));
387 693#ifndef CONFIG_CPU_MIPSR6
388#define StoreW(addr, value, res) \ 694#define StoreW(addr, value, res) \
389 __asm__ __volatile__ ( \ 695 __asm__ __volatile__ ( \
390 "1:\t"user_swl("%1", "3(%2)")"\n" \ 696 "1:\t"user_swl("%1", "3(%2)")"\n" \
@@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs);
420 ".previous" \ 726 ".previous" \
421 : "=r" (res) \ 727 : "=r" (res) \
422 : "r" (value), "r" (addr), "i" (-EFAULT)); 728 : "r" (value), "r" (addr), "i" (-EFAULT));
729#else
730/* MIPSR6 has no swl and sdl instructions */
731#define StoreW(addr, value, res) \
732 __asm__ __volatile__ ( \
733 ".set\tpush\n\t" \
734 ".set\tnoat\n\t" \
735 "1:"user_sb("%1", "0(%2)")"\n\t" \
736 "srl\t$1, %1, 0x8\n\t" \
737 "2:"user_sb("$1", "1(%2)")"\n\t" \
738 "srl\t$1, $1, 0x8\n\t" \
739 "3:"user_sb("$1", "2(%2)")"\n\t" \
740 "srl\t$1, $1, 0x8\n\t" \
741 "4:"user_sb("$1", "3(%2)")"\n\t" \
742 ".set\tpop\n\t" \
743 "li\t%0, 0\n" \
744 "10:\n\t" \
745 ".insn\n\t" \
746 ".section\t.fixup,\"ax\"\n\t" \
747 "11:\tli\t%0, %3\n\t" \
748 "j\t10b\n\t" \
749 ".previous\n\t" \
750 ".section\t__ex_table,\"a\"\n\t" \
751 STR(PTR)"\t1b, 11b\n\t" \
752 STR(PTR)"\t2b, 11b\n\t" \
753 STR(PTR)"\t3b, 11b\n\t" \
754 STR(PTR)"\t4b, 11b\n\t" \
755 ".previous" \
756 : "=&r" (res) \
757 : "r" (value), "r" (addr), "i" (-EFAULT) \
758 : "memory");
759
760#define StoreDW(addr, value, res) \
761 __asm__ __volatile__ ( \
762 ".set\tpush\n\t" \
763 ".set\tnoat\n\t" \
764 "1:sb\t%1, 0(%2)\n\t" \
765 "dsrl\t$1, %1, 0x8\n\t" \
766 "2:sb\t$1, 1(%2)\n\t" \
767 "dsrl\t$1, $1, 0x8\n\t" \
768 "3:sb\t$1, 2(%2)\n\t" \
769 "dsrl\t$1, $1, 0x8\n\t" \
770 "4:sb\t$1, 3(%2)\n\t" \
771 "dsrl\t$1, $1, 0x8\n\t" \
772 "5:sb\t$1, 4(%2)\n\t" \
773 "dsrl\t$1, $1, 0x8\n\t" \
774 "6:sb\t$1, 5(%2)\n\t" \
775 "dsrl\t$1, $1, 0x8\n\t" \
776 "7:sb\t$1, 6(%2)\n\t" \
777 "dsrl\t$1, $1, 0x8\n\t" \
778 "8:sb\t$1, 7(%2)\n\t" \
779 "dsrl\t$1, $1, 0x8\n\t" \
780 ".set\tpop\n\t" \
781 "li\t%0, 0\n" \
782 "10:\n\t" \
783 ".insn\n\t" \
784 ".section\t.fixup,\"ax\"\n\t" \
785 "11:\tli\t%0, %3\n\t" \
786 "j\t10b\n\t" \
787 ".previous\n\t" \
788 ".section\t__ex_table,\"a\"\n\t" \
789 STR(PTR)"\t1b, 11b\n\t" \
790 STR(PTR)"\t2b, 11b\n\t" \
791 STR(PTR)"\t3b, 11b\n\t" \
792 STR(PTR)"\t4b, 11b\n\t" \
793 STR(PTR)"\t5b, 11b\n\t" \
794 STR(PTR)"\t6b, 11b\n\t" \
795 STR(PTR)"\t7b, 11b\n\t" \
796 STR(PTR)"\t8b, 11b\n\t" \
797 ".previous" \
798 : "=&r" (res) \
799 : "r" (value), "r" (addr), "i" (-EFAULT) \
800 : "memory");
801#endif /* CONFIG_CPU_MIPSR6 */
423#endif 802#endif
424 803
425static void emulate_load_store_insn(struct pt_regs *regs, 804static void emulate_load_store_insn(struct pt_regs *regs,
@@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
703 break; 1082 break;
704 return; 1083 return;
705 1084
1085#ifndef CONFIG_CPU_MIPSR6
706 /* 1086 /*
707 * COP2 is available to implementor for application specific use. 1087 * COP2 is available to implementor for application specific use.
708 * It's up to applications to register a notifier chain and do 1088 * It's up to applications to register a notifier chain and do
709 * whatever they have to do, including possible sending of signals. 1089 * whatever they have to do, including possible sending of signals.
1090 *
1091 * This instruction has been reallocated in Release 6
710 */ 1092 */
711 case lwc2_op: 1093 case lwc2_op:
712 cu2_notifier_call_chain(CU2_LWC2_OP, regs); 1094 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
@@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
723 case sdc2_op: 1105 case sdc2_op:
724 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1106 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
725 break; 1107 break;
726 1108#endif
727 default: 1109 default:
728 /* 1110 /*
729 * Pheeee... We encountered an yet unknown instruction or 1111 * Pheeee... We encountered an yet unknown instruction or
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index eeddc58802e1..1e9e900cd3c3 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -8,6 +8,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
8 8
9obj-y += iomap.o 9obj-y += iomap.o
10obj-$(CONFIG_PCI) += iomap-pci.o 10obj-$(CONFIG_PCI) += iomap-pci.o
11lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y))
11 12
12obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o 13obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
13obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o 14obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 5d3238af9b5c..9245e1705e69 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -293,9 +293,14 @@
293 and t0, src, ADDRMASK 293 and t0, src, ADDRMASK
294 PREFS( 0, 2*32(src) ) 294 PREFS( 0, 2*32(src) )
295 PREFD( 1, 2*32(dst) ) 295 PREFD( 1, 2*32(dst) )
296#ifndef CONFIG_CPU_MIPSR6
296 bnez t1, .Ldst_unaligned\@ 297 bnez t1, .Ldst_unaligned\@
297 nop 298 nop
298 bnez t0, .Lsrc_unaligned_dst_aligned\@ 299 bnez t0, .Lsrc_unaligned_dst_aligned\@
300#else
301 or t0, t0, t1
302 bnez t0, .Lcopy_unaligned_bytes\@
303#endif
299 /* 304 /*
300 * use delay slot for fall-through 305 * use delay slot for fall-through
301 * src and dst are aligned; need to compute rem 306 * src and dst are aligned; need to compute rem
@@ -376,6 +381,7 @@
376 bne rem, len, 1b 381 bne rem, len, 1b
377 .set noreorder 382 .set noreorder
378 383
384#ifndef CONFIG_CPU_MIPSR6
379 /* 385 /*
380 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 386 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
381 * A loop would do only a byte at a time with possible branch 387 * A loop would do only a byte at a time with possible branch
@@ -477,6 +483,7 @@
477 bne len, rem, 1b 483 bne len, rem, 1b
478 .set noreorder 484 .set noreorder
479 485
486#endif /* !CONFIG_CPU_MIPSR6 */
480.Lcopy_bytes_checklen\@: 487.Lcopy_bytes_checklen\@:
481 beqz len, .Ldone\@ 488 beqz len, .Ldone\@
482 nop 489 nop
@@ -504,6 +511,22 @@
504.Ldone\@: 511.Ldone\@:
505 jr ra 512 jr ra
506 nop 513 nop
514
515#ifdef CONFIG_CPU_MIPSR6
516.Lcopy_unaligned_bytes\@:
5171:
518 COPY_BYTE(0)
519 COPY_BYTE(1)
520 COPY_BYTE(2)
521 COPY_BYTE(3)
522 COPY_BYTE(4)
523 COPY_BYTE(5)
524 COPY_BYTE(6)
525 COPY_BYTE(7)
526 ADD src, src, 8
527 b 1b
528 ADD dst, dst, 8
529#endif /* CONFIG_CPU_MIPSR6 */
507 .if __memcpy == 1 530 .if __memcpy == 1
508 END(memcpy) 531 END(memcpy)
509 .set __memcpy, 0 532 .set __memcpy, 0
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index c8fe6b1968fb..b8e63fd00375 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -111,6 +111,7 @@
111 .set at 111 .set at
112#endif 112#endif
113 113
114#ifndef CONFIG_CPU_MIPSR6
114 R10KCBARRIER(0(ra)) 115 R10KCBARRIER(0(ra))
115#ifdef __MIPSEB__ 116#ifdef __MIPSEB__
116 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ 117 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
@@ -120,6 +121,30 @@
120 PTR_SUBU a0, t0 /* long align ptr */ 121 PTR_SUBU a0, t0 /* long align ptr */
121 PTR_ADDU a2, t0 /* correct size */ 122 PTR_ADDU a2, t0 /* correct size */
122 123
124#else /* CONFIG_CPU_MIPSR6 */
125#define STORE_BYTE(N) \
126 EX(sb, a1, N(a0), .Lbyte_fixup\@); \
127 beqz t0, 0f; \
128 PTR_ADDU t0, 1;
129
130 PTR_ADDU a2, t0 /* correct size */
131 PTR_ADDU t0, 1
132 STORE_BYTE(0)
133 STORE_BYTE(1)
134#if LONGSIZE == 4
135 EX(sb, a1, 2(a0), .Lbyte_fixup\@)
136#else
137 STORE_BYTE(2)
138 STORE_BYTE(3)
139 STORE_BYTE(4)
140 STORE_BYTE(5)
141 EX(sb, a1, 6(a0), .Lbyte_fixup\@)
142#endif
1430:
144 ori a0, STORMASK
145 xori a0, STORMASK
146 PTR_ADDIU a0, STORSIZE
147#endif /* CONFIG_CPU_MIPSR6 */
1231: ori t1, a2, 0x3f /* # of full blocks */ 1481: ori t1, a2, 0x3f /* # of full blocks */
124 xori t1, 0x3f 149 xori t1, 0x3f
125 beqz t1, .Lmemset_partial\@ /* no block to fill */ 150 beqz t1, .Lmemset_partial\@ /* no block to fill */
@@ -159,6 +184,7 @@
159 andi a2, STORMASK /* At most one long to go */ 184 andi a2, STORMASK /* At most one long to go */
160 185
161 beqz a2, 1f 186 beqz a2, 1f
187#ifndef CONFIG_CPU_MIPSR6
162 PTR_ADDU a0, a2 /* What's left */ 188 PTR_ADDU a0, a2 /* What's left */
163 R10KCBARRIER(0(ra)) 189 R10KCBARRIER(0(ra))
164#ifdef __MIPSEB__ 190#ifdef __MIPSEB__
@@ -166,6 +192,22 @@
166#else 192#else
167 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) 193 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
168#endif 194#endif
195#else
196 PTR_SUBU t0, $0, a2
197 PTR_ADDIU t0, 1
198 STORE_BYTE(0)
199 STORE_BYTE(1)
200#if LONGSIZE == 4
201 EX(sb, a1, 2(a0), .Lbyte_fixup\@)
202#else
203 STORE_BYTE(2)
204 STORE_BYTE(3)
205 STORE_BYTE(4)
206 STORE_BYTE(5)
207 EX(sb, a1, 6(a0), .Lbyte_fixup\@)
208#endif
2090:
210#endif
1691: jr ra 2111: jr ra
170 move a2, zero 212 move a2, zero
171 213
@@ -186,6 +228,11 @@
186 .hidden __memset 228 .hidden __memset
187 .endif 229 .endif
188 230
231.Lbyte_fixup\@:
232 PTR_SUBU a2, $0, t0
233 jr ra
234 PTR_ADDIU a2, 1
235
189.Lfirst_fixup\@: 236.Lfirst_fixup\@:
190 jr ra 237 jr ra
191 nop 238 nop
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index be777d9a3f85..272af8ac2425 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/stringify.h> 16#include <linux/stringify.h>
17 17
18#ifndef CONFIG_CPU_MIPSR2 18#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
19 19
20/* 20/*
21 * For cli() we have to insert nops to make sure that the new value 21 * For cli() we have to insert nops to make sure that the new value
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 9dfcd7fc1bc3..b30bf65c7d7d 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -48,6 +48,7 @@
48#include <asm/processor.h> 48#include <asm/processor.h>
49#include <asm/fpu_emulator.h> 49#include <asm/fpu_emulator.h>
50#include <asm/fpu.h> 50#include <asm/fpu.h>
51#include <asm/mips-r2-to-r6-emul.h>
51 52
52#include "ieee754.h" 53#include "ieee754.h"
53 54
@@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *,
68#define modeindex(v) ((v) & FPU_CSR_RM) 69#define modeindex(v) ((v) & FPU_CSR_RM)
69 70
70/* convert condition code register number to csr bit */ 71/* convert condition code register number to csr bit */
71static const unsigned int fpucondbit[8] = { 72const unsigned int fpucondbit[8] = {
72 FPU_CSR_COND0, 73 FPU_CSR_COND0,
73 FPU_CSR_COND1, 74 FPU_CSR_COND1,
74 FPU_CSR_COND2, 75 FPU_CSR_COND2,
@@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
448 dec_insn.next_pc_inc; 449 dec_insn.next_pc_inc;
449 /* Fall through */ 450 /* Fall through */
450 case jr_op: 451 case jr_op:
452 /* For R6, JR already emulated in jalr_op */
453 if (NO_R6EMU && insn.r_format.opcode == jr_op)
454 break;
451 *contpc = regs->regs[insn.r_format.rs]; 455 *contpc = regs->regs[insn.r_format.rs];
452 return 1; 456 return 1;
453 } 457 }
@@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
456 switch (insn.i_format.rt) { 460 switch (insn.i_format.rt) {
457 case bltzal_op: 461 case bltzal_op:
458 case bltzall_op: 462 case bltzall_op:
463 if (NO_R6EMU && (insn.i_format.rs ||
464 insn.i_format.rt == bltzall_op))
465 break;
466
459 regs->regs[31] = regs->cp0_epc + 467 regs->regs[31] = regs->cp0_epc +
460 dec_insn.pc_inc + 468 dec_insn.pc_inc +
461 dec_insn.next_pc_inc; 469 dec_insn.next_pc_inc;
462 /* Fall through */ 470 /* Fall through */
463 case bltz_op:
464 case bltzl_op: 471 case bltzl_op:
472 if (NO_R6EMU)
473 break;
474 case bltz_op:
465 if ((long)regs->regs[insn.i_format.rs] < 0) 475 if ((long)regs->regs[insn.i_format.rs] < 0)
466 *contpc = regs->cp0_epc + 476 *contpc = regs->cp0_epc +
467 dec_insn.pc_inc + 477 dec_insn.pc_inc +
@@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
473 return 1; 483 return 1;
474 case bgezal_op: 484 case bgezal_op:
475 case bgezall_op: 485 case bgezall_op:
486 if (NO_R6EMU && (insn.i_format.rs ||
487 insn.i_format.rt == bgezall_op))
488 break;
489
476 regs->regs[31] = regs->cp0_epc + 490 regs->regs[31] = regs->cp0_epc +
477 dec_insn.pc_inc + 491 dec_insn.pc_inc +
478 dec_insn.next_pc_inc; 492 dec_insn.next_pc_inc;
479 /* Fall through */ 493 /* Fall through */
480 case bgez_op:
481 case bgezl_op: 494 case bgezl_op:
495 if (NO_R6EMU)
496 break;
497 case bgez_op:
482 if ((long)regs->regs[insn.i_format.rs] >= 0) 498 if ((long)regs->regs[insn.i_format.rs] >= 0)
483 *contpc = regs->cp0_epc + 499 *contpc = regs->cp0_epc +
484 dec_insn.pc_inc + 500 dec_insn.pc_inc +
@@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
505 /* Set microMIPS mode bit: XOR for jalx. */ 521 /* Set microMIPS mode bit: XOR for jalx. */
506 *contpc ^= bit; 522 *contpc ^= bit;
507 return 1; 523 return 1;
508 case beq_op:
509 case beql_op: 524 case beql_op:
525 if (NO_R6EMU)
526 break;
527 case beq_op:
510 if (regs->regs[insn.i_format.rs] == 528 if (regs->regs[insn.i_format.rs] ==
511 regs->regs[insn.i_format.rt]) 529 regs->regs[insn.i_format.rt])
512 *contpc = regs->cp0_epc + 530 *contpc = regs->cp0_epc +
@@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
517 dec_insn.pc_inc + 535 dec_insn.pc_inc +
518 dec_insn.next_pc_inc; 536 dec_insn.next_pc_inc;
519 return 1; 537 return 1;
520 case bne_op:
521 case bnel_op: 538 case bnel_op:
539 if (NO_R6EMU)
540 break;
541 case bne_op:
522 if (regs->regs[insn.i_format.rs] != 542 if (regs->regs[insn.i_format.rs] !=
523 regs->regs[insn.i_format.rt]) 543 regs->regs[insn.i_format.rt])
524 *contpc = regs->cp0_epc + 544 *contpc = regs->cp0_epc +
@@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
529 dec_insn.pc_inc + 549 dec_insn.pc_inc +
530 dec_insn.next_pc_inc; 550 dec_insn.next_pc_inc;
531 return 1; 551 return 1;
532 case blez_op:
533 case blezl_op: 552 case blezl_op:
553 if (NO_R6EMU)
554 break;
555 case blez_op:
556
557 /*
558 * Compact branches for R6 for the
559 * blez and blezl opcodes.
560 * BLEZ | rs = 0 | rt != 0 == BLEZALC
561 * BLEZ | rs = rt != 0 == BGEZALC
562 * BLEZ | rs != 0 | rt != 0 == BGEUC
563 * BLEZL | rs = 0 | rt != 0 == BLEZC
564 * BLEZL | rs = rt != 0 == BGEZC
565 * BLEZL | rs != 0 | rt != 0 == BGEC
566 *
567 * For real BLEZ{,L}, rt is always 0.
568 */
569 if (cpu_has_mips_r6 && insn.i_format.rt) {
570 if ((insn.i_format.opcode == blez_op) &&
571 ((!insn.i_format.rs && insn.i_format.rt) ||
572 (insn.i_format.rs == insn.i_format.rt)))
573 regs->regs[31] = regs->cp0_epc +
574 dec_insn.pc_inc;
575 *contpc = regs->cp0_epc + dec_insn.pc_inc +
576 dec_insn.next_pc_inc;
577
578 return 1;
579 }
534 if ((long)regs->regs[insn.i_format.rs] <= 0) 580 if ((long)regs->regs[insn.i_format.rs] <= 0)
535 *contpc = regs->cp0_epc + 581 *contpc = regs->cp0_epc +
536 dec_insn.pc_inc + 582 dec_insn.pc_inc +
@@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
540 dec_insn.pc_inc + 586 dec_insn.pc_inc +
541 dec_insn.next_pc_inc; 587 dec_insn.next_pc_inc;
542 return 1; 588 return 1;
543 case bgtz_op:
544 case bgtzl_op: 589 case bgtzl_op:
590 if (NO_R6EMU)
591 break;
592 case bgtz_op:
593 /*
594 * Compact branches for R6 for the
595 * bgtz and bgtzl opcodes.
596 * BGTZ | rs = 0 | rt != 0 == BGTZALC
597 * BGTZ | rs = rt != 0 == BLTZALC
598 * BGTZ | rs != 0 | rt != 0 == BLTUC
599 * BGTZL | rs = 0 | rt != 0 == BGTZC
600 * BGTZL | rs = rt != 0 == BLTZC
601 * BGTZL | rs != 0 | rt != 0 == BLTC
602 *
603 * *ZALC varint for BGTZ &&& rt != 0
604 * For real GTZ{,L}, rt is always 0.
605 */
606 if (cpu_has_mips_r6 && insn.i_format.rt) {
607 if ((insn.i_format.opcode == blez_op) &&
608 ((!insn.i_format.rs && insn.i_format.rt) ||
609 (insn.i_format.rs == insn.i_format.rt)))
610 regs->regs[31] = regs->cp0_epc +
611 dec_insn.pc_inc;
612 *contpc = regs->cp0_epc + dec_insn.pc_inc +
613 dec_insn.next_pc_inc;
614
615 return 1;
616 }
617
545 if ((long)regs->regs[insn.i_format.rs] > 0) 618 if ((long)regs->regs[insn.i_format.rs] > 0)
546 *contpc = regs->cp0_epc + 619 *contpc = regs->cp0_epc +
547 dec_insn.pc_inc + 620 dec_insn.pc_inc +
@@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
551 dec_insn.pc_inc + 624 dec_insn.pc_inc +
552 dec_insn.next_pc_inc; 625 dec_insn.next_pc_inc;
553 return 1; 626 return 1;
627 case cbcond0_op:
628 case cbcond1_op:
629 if (!cpu_has_mips_r6)
630 break;
631 if (insn.i_format.rt && !insn.i_format.rs)
632 regs->regs[31] = regs->cp0_epc + 4;
633 *contpc = regs->cp0_epc + dec_insn.pc_inc +
634 dec_insn.next_pc_inc;
635
636 return 1;
554#ifdef CONFIG_CPU_CAVIUM_OCTEON 637#ifdef CONFIG_CPU_CAVIUM_OCTEON
555 case lwc2_op: /* This is bbit0 on Octeon */ 638 case lwc2_op: /* This is bbit0 on Octeon */
556 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) 639 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
@@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
576 else 659 else
577 *contpc = regs->cp0_epc + 8; 660 *contpc = regs->cp0_epc + 8;
578 return 1; 661 return 1;
662#else
663 case bc6_op:
664 /*
665 * Only valid for MIPS R6 but we can still end up
666 * here from a broken userland so just tell emulator
667 * this is not a branch and let it break later on.
668 */
669 if (!cpu_has_mips_r6)
670 break;
671 *contpc = regs->cp0_epc + dec_insn.pc_inc +
672 dec_insn.next_pc_inc;
673
674 return 1;
675 case balc6_op:
676 if (!cpu_has_mips_r6)
677 break;
678 regs->regs[31] = regs->cp0_epc + 4;
679 *contpc = regs->cp0_epc + dec_insn.pc_inc +
680 dec_insn.next_pc_inc;
681
682 return 1;
683 case beqzcjic_op:
684 if (!cpu_has_mips_r6)
685 break;
686 *contpc = regs->cp0_epc + dec_insn.pc_inc +
687 dec_insn.next_pc_inc;
688
689 return 1;
690 case bnezcjialc_op:
691 if (!cpu_has_mips_r6)
692 break;
693 if (!insn.i_format.rs)
694 regs->regs[31] = regs->cp0_epc + 4;
695 *contpc = regs->cp0_epc + dec_insn.pc_inc +
696 dec_insn.next_pc_inc;
697
698 return 1;
579#endif 699#endif
580 case cop0_op: 700 case cop0_op:
581 case cop1_op: 701 case cop1_op:
702 /* Need to check for R6 bc1nez and bc1eqz branches */
703 if (cpu_has_mips_r6 &&
704 ((insn.i_format.rs == bc1eqz_op) ||
705 (insn.i_format.rs == bc1nez_op))) {
706 bit = 0;
707 switch (insn.i_format.rs) {
708 case bc1eqz_op:
709 if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
710 bit = 1;
711 break;
712 case bc1nez_op:
713 if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
714 bit = 1;
715 break;
716 }
717 if (bit)
718 *contpc = regs->cp0_epc +
719 dec_insn.pc_inc +
720 (insn.i_format.simmediate << 2);
721 else
722 *contpc = regs->cp0_epc +
723 dec_insn.pc_inc +
724 dec_insn.next_pc_inc;
725
726 return 1;
727 }
728 /* R2/R6 compatible cop1 instruction. Fall through */
582 case cop2_op: 729 case cop2_op:
583 case cop1x_op: 730 case cop1x_op:
584 if (insn.i_format.rs == bc_op) { 731 if (insn.i_format.rs == bc_op) {
@@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1414 * achieve full IEEE-754 accuracy - however this emulator does. 1561 * achieve full IEEE-754 accuracy - however this emulator does.
1415 */ 1562 */
1416 case frsqrt_op: 1563 case frsqrt_op:
1417 if (!cpu_has_mips_4_5_r2) 1564 if (!cpu_has_mips_4_5_r2_r6)
1418 return SIGILL; 1565 return SIGILL;
1419 1566
1420 handler.u = fpemu_sp_rsqrt; 1567 handler.u = fpemu_sp_rsqrt;
1421 goto scopuop; 1568 goto scopuop;
1422 1569
1423 case frecip_op: 1570 case frecip_op:
1424 if (!cpu_has_mips_4_5_r2) 1571 if (!cpu_has_mips_4_5_r2_r6)
1425 return SIGILL; 1572 return SIGILL;
1426 1573
1427 handler.u = fpemu_sp_recip; 1574 handler.u = fpemu_sp_recip;
@@ -1616,13 +1763,13 @@ copcsr:
1616 * achieve full IEEE-754 accuracy - however this emulator does. 1763 * achieve full IEEE-754 accuracy - however this emulator does.
1617 */ 1764 */
1618 case frsqrt_op: 1765 case frsqrt_op:
1619 if (!cpu_has_mips_4_5_r2) 1766 if (!cpu_has_mips_4_5_r2_r6)
1620 return SIGILL; 1767 return SIGILL;
1621 1768
1622 handler.u = fpemu_dp_rsqrt; 1769 handler.u = fpemu_dp_rsqrt;
1623 goto dcopuop; 1770 goto dcopuop;
1624 case frecip_op: 1771 case frecip_op:
1625 if (!cpu_has_mips_4_5_r2) 1772 if (!cpu_has_mips_4_5_r2_r6)
1626 return SIGILL; 1773 return SIGILL;
1627 1774
1628 handler.u = fpemu_dp_recip; 1775 handler.u = fpemu_dp_recip;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index dd261df005c2..3f8059602765 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
794 __asm__ __volatile__ ( 794 __asm__ __volatile__ (
795 ".set push\n\t" 795 ".set push\n\t"
796 ".set noat\n\t" 796 ".set noat\n\t"
797 ".set mips3\n\t" 797 ".set "MIPS_ISA_LEVEL"\n\t"
798#ifdef CONFIG_32BIT 798#ifdef CONFIG_32BIT
799 "la $at,1f\n\t" 799 "la $at,1f\n\t"
800#endif 800#endif
@@ -1255,6 +1255,7 @@ static void probe_pcache(void)
1255 case CPU_P5600: 1255 case CPU_P5600:
1256 case CPU_PROAPTIV: 1256 case CPU_PROAPTIV:
1257 case CPU_M5150: 1257 case CPU_M5150:
1258 case CPU_QEMU_GENERIC:
1258 if (!(read_c0_config7() & MIPS_CONF7_IAR) && 1259 if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1259 (c->icache.waysize > PAGE_SIZE)) 1260 (c->icache.waysize > PAGE_SIZE))
1260 c->icache.flags |= MIPS_CACHE_ALIASES; 1261 c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1472,7 +1473,8 @@ static void setup_scache(void)
1472 1473
1473 default: 1474 default:
1474 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1475 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
1475 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1476 MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
1477 MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
1476#ifdef CONFIG_MIPS_CPU_SCACHE 1478#ifdef CONFIG_MIPS_CPU_SCACHE
1477 if (mips_sc_init ()) { 1479 if (mips_sc_init ()) {
1478 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; 1480 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index b611102e23b5..3f85f921801b 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5];
72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 74
75/*
76 * R6 has a limited offset of the pref instruction.
77 * Skip it if the offset is more than 9 bits.
78 */
79#define _uasm_i_pref(a, b, c, d) \
80do { \
81 if (cpu_has_mips_r6) { \
82 if (c <= 0xff && c >= -0x100) \
83 uasm_i_pref(a, b, c, d);\
84 } else { \
85 uasm_i_pref(a, b, c, d); \
86 } \
87} while(0)
88
75static int pref_bias_clear_store; 89static int pref_bias_clear_store;
76static int pref_bias_copy_load; 90static int pref_bias_copy_load;
77static int pref_bias_copy_store; 91static int pref_bias_copy_store;
@@ -178,7 +192,15 @@ static void set_prefetch_parameters(void)
178 pref_bias_copy_load = 256; 192 pref_bias_copy_load = 256;
179 pref_bias_copy_store = 128; 193 pref_bias_copy_store = 128;
180 pref_src_mode = Pref_LoadStreamed; 194 pref_src_mode = Pref_LoadStreamed;
181 pref_dst_mode = Pref_PrepareForStore; 195 if (cpu_has_mips_r6)
196 /*
197 * Bit 30 (Pref_PrepareForStore) has been
198 * removed from MIPS R6. Use bit 5
199 * (Pref_StoreStreamed).
200 */
201 pref_dst_mode = Pref_StoreStreamed;
202 else
203 pref_dst_mode = Pref_PrepareForStore;
182 break; 204 break;
183 } 205 }
184 } else { 206 } else {
@@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off)
214 return; 236 return;
215 237
216 if (pref_bias_clear_store) { 238 if (pref_bias_clear_store) {
217 uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 239 _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
218 A0); 240 A0);
219 } else if (cache_line_size == (half_clear_loop_size << 1)) { 241 } else if (cache_line_size == (half_clear_loop_size << 1)) {
220 if (cpu_has_cache_cdex_s) { 242 if (cpu_has_cache_cdex_s) {
@@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
357 return; 379 return;
358 380
359 if (pref_bias_copy_load) 381 if (pref_bias_copy_load)
360 uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); 382 _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
361} 383}
362 384
363static inline void build_copy_store_pref(u32 **buf, int off) 385static inline void build_copy_store_pref(u32 **buf, int off)
@@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
366 return; 388 return;
367 389
368 if (pref_bias_copy_store) { 390 if (pref_bias_copy_store) {
369 uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 391 _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
370 A0); 392 A0);
371 } else if (cache_line_size == (half_copy_loop_size << 1)) { 393 } else if (cache_line_size == (half_copy_loop_size << 1)) {
372 if (cpu_has_cache_cdex_s) { 394 if (cpu_has_cache_cdex_s) {
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 99eb8fabab60..4ceafd13870c 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
81 case CPU_PROAPTIV: 81 case CPU_PROAPTIV:
82 case CPU_P5600: 82 case CPU_P5600:
83 case CPU_BMIPS5000: 83 case CPU_BMIPS5000:
84 case CPU_QEMU_GENERIC:
84 if (config2 & (1 << 12)) 85 if (config2 & (1 << 12))
85 return 0; 86 return 0;
86 } 87 }
@@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void)
104 105
105 /* Ignore anything but MIPSxx processors */ 106 /* Ignore anything but MIPSxx processors */
106 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 107 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
107 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) 108 MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
109 MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
108 return 0; 110 return 0;
109 111
110 /* Does this MIPS32/MIPS64 CPU have a config2 register? */ 112 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 3978a3d81366..d75ff73a2012 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
501 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 501 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
502 } 502 }
503 503
504 if (cpu_has_mips_r2) { 504 if (cpu_has_mips_r2_exec_hazard) {
505 /* 505 /*
506 * The architecture spec says an ehb is required here, 506 * The architecture spec says an ehb is required here,
507 * but a number of cores do not have the hazard and 507 * but a number of cores do not have the hazard and
@@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
514 case CPU_PROAPTIV: 514 case CPU_PROAPTIV:
515 case CPU_P5600: 515 case CPU_P5600:
516 case CPU_M5150: 516 case CPU_M5150:
517 case CPU_QEMU_GENERIC:
517 break; 518 break;
518 519
519 default: 520 default:
@@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void)
1952 1953
1953 switch (current_cpu_type()) { 1954 switch (current_cpu_type()) {
1954 default: 1955 default:
1955 if (cpu_has_mips_r2) { 1956 if (cpu_has_mips_r2_exec_hazard) {
1956 uasm_i_ehb(&p); 1957 uasm_i_ehb(&p);
1957 1958
1958 case CPU_CAVIUM_OCTEON: 1959 case CPU_CAVIUM_OCTEON:
@@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void)
2019 2020
2020 switch (current_cpu_type()) { 2021 switch (current_cpu_type()) {
2021 default: 2022 default:
2022 if (cpu_has_mips_r2) { 2023 if (cpu_has_mips_r2_exec_hazard) {
2023 uasm_i_ehb(&p); 2024 uasm_i_ehb(&p);
2024 2025
2025 case CPU_CAVIUM_OCTEON: 2026 case CPU_CAVIUM_OCTEON:
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 8e02291cfc0c..855fc8a6a3c5 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -38,6 +38,14 @@
38 | (e) << RE_SH \ 38 | (e) << RE_SH \
39 | (f) << FUNC_SH) 39 | (f) << FUNC_SH)
40 40
41/* This macro sets the non-variable bits of an R6 instruction. */
42#define M6(a, b, c, d, e) \
43 ((a) << OP_SH \
44 | (b) << RS_SH \
45 | (c) << RT_SH \
46 | (d) << SIMM9_SH \
47 | (e) << FUNC_SH)
48
41/* Define these when we are not the ISA the kernel is being compiled with. */ 49/* Define these when we are not the ISA the kernel is being compiled with. */
42#ifdef CONFIG_CPU_MICROMIPS 50#ifdef CONFIG_CPU_MICROMIPS
43#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) 51#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
@@ -62,7 +70,11 @@ static struct insn insn_table[] = {
62 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 70 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
63 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 71 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
64 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 72 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
73#ifndef CONFIG_CPU_MIPSR6
65 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 74 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
75#else
76 { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
77#endif
66 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 78 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
67 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 79 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
68 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, 80 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -85,13 +97,22 @@ static struct insn insn_table[] = {
85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 97 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
86 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, 98 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
87 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 99 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
100#ifndef CONFIG_CPU_MIPSR6
88 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 101 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
102#else
103 { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS },
104#endif
89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 105 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 106 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 107 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
92 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 108 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
109#ifndef CONFIG_CPU_MIPSR6
93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 110 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 111 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
112#else
113 { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 },
114 { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 },
115#endif
95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 116 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
96 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 117 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
97 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, 118 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -104,11 +125,20 @@ static struct insn insn_table[] = {
104 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 125 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
105 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 126 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
106 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 127 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
128#ifndef CONFIG_CPU_MIPSR6
107 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 129 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
130#else
131 { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 },
132#endif
108 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 133 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
109 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, 134 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
135#ifndef CONFIG_CPU_MIPSR6
110 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 136 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
111 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 137 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
138#else
139 { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 },
140 { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 },
141#endif
112 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 142 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
113 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 143 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
114 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, 144 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
@@ -198,6 +228,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
198 op |= build_set(va_arg(ap, u32)); 228 op |= build_set(va_arg(ap, u32));
199 if (ip->fields & SCIMM) 229 if (ip->fields & SCIMM)
200 op |= build_scimm(va_arg(ap, u32)); 230 op |= build_scimm(va_arg(ap, u32));
231 if (ip->fields & SIMM9)
232 op |= build_scimm9(va_arg(ap, u32));
201 va_end(ap); 233 va_end(ap);
202 234
203 **buf = op; 235 **buf = op;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 4adf30284813..f86d293463a6 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -24,7 +24,8 @@ enum fields {
24 JIMM = 0x080, 24 JIMM = 0x080,
25 FUNC = 0x100, 25 FUNC = 0x100,
26 SET = 0x200, 26 SET = 0x200,
27 SCIMM = 0x400 27 SCIMM = 0x400,
28 SIMM9 = 0x800,
28}; 29};
29 30
30#define OP_MASK 0x3f 31#define OP_MASK 0x3f
@@ -41,6 +42,8 @@ enum fields {
41#define FUNC_SH 0 42#define FUNC_SH 0
42#define SET_MASK 0x7 43#define SET_MASK 0x7
43#define SET_SH 0 44#define SET_SH 0
45#define SIMM9_SH 7
46#define SIMM9_MASK 0x1ff
44 47
45enum opcode { 48enum opcode {
46 insn_invalid, 49 insn_invalid,
@@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg)
116 return (arg & SCIMM_MASK) << SCIMM_SH; 119 return (arg & SCIMM_MASK) << SCIMM_SH;
117} 120}
118 121
122static inline u32 build_scimm9(s32 arg)
123{
124 WARN((arg > 0xff || arg < -0x100),
125 KERN_WARNING "Micro-assembler field overflow\n");
126
127 return (arg & SIMM9_MASK) << SIMM9_SH;
128}
129
119static inline u32 build_func(u32 arg) 130static inline u32 build_func(u32 arg)
120{ 131{
121 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 132 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");