diff options
Diffstat (limited to 'arch/mips')
108 files changed, 6409 insertions, 1129 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 843713c05b79..c7a16904cd03 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -54,6 +54,7 @@ config MIPS | |||
54 | select CPU_PM if CPU_IDLE | 54 | select CPU_PM if CPU_IDLE |
55 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | 55 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
56 | select ARCH_BINFMT_ELF_STATE | 56 | select ARCH_BINFMT_ELF_STATE |
57 | select SYSCTL_EXCEPTION_TRACE | ||
57 | 58 | ||
58 | menu "Machine selection" | 59 | menu "Machine selection" |
59 | 60 | ||
@@ -376,8 +377,10 @@ config MIPS_MALTA | |||
376 | select SYS_HAS_CPU_MIPS32_R1 | 377 | select SYS_HAS_CPU_MIPS32_R1 |
377 | select SYS_HAS_CPU_MIPS32_R2 | 378 | select SYS_HAS_CPU_MIPS32_R2 |
378 | select SYS_HAS_CPU_MIPS32_R3_5 | 379 | select SYS_HAS_CPU_MIPS32_R3_5 |
380 | select SYS_HAS_CPU_MIPS32_R6 | ||
379 | select SYS_HAS_CPU_MIPS64_R1 | 381 | select SYS_HAS_CPU_MIPS64_R1 |
380 | select SYS_HAS_CPU_MIPS64_R2 | 382 | select SYS_HAS_CPU_MIPS64_R2 |
383 | select SYS_HAS_CPU_MIPS64_R6 | ||
381 | select SYS_HAS_CPU_NEVADA | 384 | select SYS_HAS_CPU_NEVADA |
382 | select SYS_HAS_CPU_RM7000 | 385 | select SYS_HAS_CPU_RM7000 |
383 | select SYS_SUPPORTS_32BIT_KERNEL | 386 | select SYS_SUPPORTS_32BIT_KERNEL |
@@ -1033,6 +1036,9 @@ config MIPS_MACHINE | |||
1033 | config NO_IOPORT_MAP | 1036 | config NO_IOPORT_MAP |
1034 | def_bool n | 1037 | def_bool n |
1035 | 1038 | ||
1039 | config GENERIC_CSUM | ||
1040 | bool | ||
1041 | |||
1036 | config GENERIC_ISA_DMA | 1042 | config GENERIC_ISA_DMA |
1037 | bool | 1043 | bool |
1038 | select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n | 1044 | select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n |
@@ -1146,6 +1152,9 @@ config SOC_PNX8335 | |||
1146 | bool | 1152 | bool |
1147 | select SOC_PNX833X | 1153 | select SOC_PNX833X |
1148 | 1154 | ||
1155 | config MIPS_SPRAM | ||
1156 | bool | ||
1157 | |||
1149 | config SWAP_IO_SPACE | 1158 | config SWAP_IO_SPACE |
1150 | bool | 1159 | bool |
1151 | 1160 | ||
@@ -1304,6 +1313,22 @@ config CPU_MIPS32_R2 | |||
1304 | specific type of processor in your system, choose those that one | 1313 | specific type of processor in your system, choose those that one |
1305 | otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. | 1314 | otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. |
1306 | 1315 | ||
1316 | config CPU_MIPS32_R6 | ||
1317 | bool "MIPS32 Release 6 (EXPERIMENTAL)" | ||
1318 | depends on SYS_HAS_CPU_MIPS32_R6 | ||
1319 | select CPU_HAS_PREFETCH | ||
1320 | select CPU_SUPPORTS_32BIT_KERNEL | ||
1321 | select CPU_SUPPORTS_HIGHMEM | ||
1322 | select CPU_SUPPORTS_MSA | ||
1323 | select GENERIC_CSUM | ||
1324 | select HAVE_KVM | ||
1325 | select MIPS_O32_FP64_SUPPORT | ||
1326 | help | ||
1327 | Choose this option to build a kernel for release 6 or later of the | ||
1328 | MIPS32 architecture. New MIPS processors, starting with the Warrior | ||
1329 | family, are based on a MIPS32r6 processor. If you own an older | ||
1330 | processor, you probably need to select MIPS32r1 or MIPS32r2 instead. | ||
1331 | |||
1307 | config CPU_MIPS64_R1 | 1332 | config CPU_MIPS64_R1 |
1308 | bool "MIPS64 Release 1" | 1333 | bool "MIPS64 Release 1" |
1309 | depends on SYS_HAS_CPU_MIPS64_R1 | 1334 | depends on SYS_HAS_CPU_MIPS64_R1 |
@@ -1339,6 +1364,21 @@ config CPU_MIPS64_R2 | |||
1339 | specific type of processor in your system, choose those that one | 1364 | specific type of processor in your system, choose those that one |
1340 | otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. | 1365 | otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. |
1341 | 1366 | ||
1367 | config CPU_MIPS64_R6 | ||
1368 | bool "MIPS64 Release 6 (EXPERIMENTAL)" | ||
1369 | depends on SYS_HAS_CPU_MIPS64_R6 | ||
1370 | select CPU_HAS_PREFETCH | ||
1371 | select CPU_SUPPORTS_32BIT_KERNEL | ||
1372 | select CPU_SUPPORTS_64BIT_KERNEL | ||
1373 | select CPU_SUPPORTS_HIGHMEM | ||
1374 | select CPU_SUPPORTS_MSA | ||
1375 | select GENERIC_CSUM | ||
1376 | help | ||
1377 | Choose this option to build a kernel for release 6 or later of the | ||
1378 | MIPS64 architecture. New MIPS processors, starting with the Warrior | ||
1379 | family, are based on a MIPS64r6 processor. If you own an older | ||
1380 | processor, you probably need to select MIPS64r1 or MIPS64r2 instead. | ||
1381 | |||
1342 | config CPU_R3000 | 1382 | config CPU_R3000 |
1343 | bool "R3000" | 1383 | bool "R3000" |
1344 | depends on SYS_HAS_CPU_R3000 | 1384 | depends on SYS_HAS_CPU_R3000 |
@@ -1539,7 +1579,7 @@ endchoice | |||
1539 | config CPU_MIPS32_3_5_FEATURES | 1579 | config CPU_MIPS32_3_5_FEATURES |
1540 | bool "MIPS32 Release 3.5 Features" | 1580 | bool "MIPS32 Release 3.5 Features" |
1541 | depends on SYS_HAS_CPU_MIPS32_R3_5 | 1581 | depends on SYS_HAS_CPU_MIPS32_R3_5 |
1542 | depends on CPU_MIPS32_R2 | 1582 | depends on CPU_MIPS32_R2 || CPU_MIPS32_R6 |
1543 | help | 1583 | help |
1544 | Choose this option to build a kernel for release 2 or later of the | 1584 | Choose this option to build a kernel for release 2 or later of the |
1545 | MIPS32 architecture including features from the 3.5 release such as | 1585 | MIPS32 architecture including features from the 3.5 release such as |
@@ -1659,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2 | |||
1659 | config SYS_HAS_CPU_MIPS32_R3_5 | 1699 | config SYS_HAS_CPU_MIPS32_R3_5 |
1660 | bool | 1700 | bool |
1661 | 1701 | ||
1702 | config SYS_HAS_CPU_MIPS32_R6 | ||
1703 | bool | ||
1704 | |||
1662 | config SYS_HAS_CPU_MIPS64_R1 | 1705 | config SYS_HAS_CPU_MIPS64_R1 |
1663 | bool | 1706 | bool |
1664 | 1707 | ||
1665 | config SYS_HAS_CPU_MIPS64_R2 | 1708 | config SYS_HAS_CPU_MIPS64_R2 |
1666 | bool | 1709 | bool |
1667 | 1710 | ||
1711 | config SYS_HAS_CPU_MIPS64_R6 | ||
1712 | bool | ||
1713 | |||
1668 | config SYS_HAS_CPU_R3000 | 1714 | config SYS_HAS_CPU_R3000 |
1669 | bool | 1715 | bool |
1670 | 1716 | ||
@@ -1764,11 +1810,11 @@ endmenu | |||
1764 | # | 1810 | # |
1765 | config CPU_MIPS32 | 1811 | config CPU_MIPS32 |
1766 | bool | 1812 | bool |
1767 | default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 | 1813 | default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6 |
1768 | 1814 | ||
1769 | config CPU_MIPS64 | 1815 | config CPU_MIPS64 |
1770 | bool | 1816 | bool |
1771 | default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 | 1817 | default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6 |
1772 | 1818 | ||
1773 | # | 1819 | # |
1774 | # These two indicate the revision of the architecture, either Release 1 or Release 2 | 1820 | # These two indicate the revision of the architecture, either Release 1 or Release 2 |
@@ -1780,6 +1826,12 @@ config CPU_MIPSR1 | |||
1780 | config CPU_MIPSR2 | 1826 | config CPU_MIPSR2 |
1781 | bool | 1827 | bool |
1782 | default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON | 1828 | default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON |
1829 | select MIPS_SPRAM | ||
1830 | |||
1831 | config CPU_MIPSR6 | ||
1832 | bool | ||
1833 | default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 | ||
1834 | select MIPS_SPRAM | ||
1783 | 1835 | ||
1784 | config EVA | 1836 | config EVA |
1785 | bool | 1837 | bool |
@@ -2013,6 +2065,19 @@ config MIPS_MT_FPAFF | |||
2013 | default y | 2065 | default y |
2014 | depends on MIPS_MT_SMP | 2066 | depends on MIPS_MT_SMP |
2015 | 2067 | ||
2068 | config MIPSR2_TO_R6_EMULATOR | ||
2069 | bool "MIPS R2-to-R6 emulator" | ||
2070 | depends on CPU_MIPSR6 && !SMP | ||
2071 | default y | ||
2072 | help | ||
2073 | Choose this option if you want to run non-R6 MIPS userland code. | ||
2074 | Even if you say 'Y' here, the emulator will still be disabled by | ||
2075 | default. You can enable it using the 'mipsr2emul' kernel option. | ||
2076 | The only reason this is a build-time option is to save ~14K from the | ||
2077 | final kernel image. | ||
2078 | comment "MIPS R2-to-R6 emulator is only available for UP kernels" | ||
2079 | depends on SMP && CPU_MIPSR6 | ||
2080 | |||
2016 | config MIPS_VPE_LOADER | 2081 | config MIPS_VPE_LOADER |
2017 | bool "VPE loader support." | 2082 | bool "VPE loader support." |
2018 | depends on SYS_SUPPORTS_MULTITHREADING && MODULES | 2083 | depends on SYS_SUPPORTS_MULTITHREADING && MODULES |
@@ -2148,7 +2213,7 @@ config CPU_HAS_SMARTMIPS | |||
2148 | here. | 2213 | here. |
2149 | 2214 | ||
2150 | config CPU_MICROMIPS | 2215 | config CPU_MICROMIPS |
2151 | depends on 32BIT && SYS_SUPPORTS_MICROMIPS | 2216 | depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6 |
2152 | bool "microMIPS" | 2217 | bool "microMIPS" |
2153 | help | 2218 | help |
2154 | When this option is enabled the kernel will be built using the | 2219 | When this option is enabled the kernel will be built using the |
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 88a9f433f6fc..3a2b775e8458 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug | |||
@@ -122,17 +122,4 @@ config SPINLOCK_TEST | |||
122 | help | 122 | help |
123 | Add several files to the debugfs to test spinlock speed. | 123 | Add several files to the debugfs to test spinlock speed. |
124 | 124 | ||
125 | config FP32XX_HYBRID_FPRS | ||
126 | bool "Run FP32 & FPXX code with hybrid FPRs" | ||
127 | depends on MIPS_O32_FP64_SUPPORT | ||
128 | help | ||
129 | The hybrid FPR scheme is normally used only when a program needs to | ||
130 | execute a mix of FP32 & FP64A code, since the trapping & emulation | ||
131 | that it entails is expensive. When enabled, this option will lead | ||
132 | to the kernel running programs which use the FP32 & FPXX FP ABIs | ||
133 | using the hybrid FPR scheme, which can be useful for debugging | ||
134 | purposes. | ||
135 | |||
136 | If unsure, say N. | ||
137 | |||
138 | endmenu | 125 | endmenu |
diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 2563a088d3b8..8f57fc72d62c 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile | |||
@@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__ | |||
122 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) | 122 | cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) |
123 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) | 123 | cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) |
124 | 124 | ||
125 | # For smartmips configurations, there are hundreds of warnings due to ISA overrides | ||
126 | # in assembly and header files. smartmips is only supported for MIPS32r1 onwards | ||
127 | # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or | ||
128 | # similar directives in the kernel will spam the build logs with the following warnings: | ||
129 | # Warning: the `smartmips' extension requires MIPS32 revision 1 or greater | ||
130 | # or | ||
131 | # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension | ||
132 | # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has | ||
133 | # been fixed properly. | ||
134 | cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn | ||
135 | cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) | ||
136 | |||
137 | cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ | 125 | cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ |
138 | -fno-omit-frame-pointer | 126 | -fno-omit-frame-pointer |
139 | |||
140 | ifeq ($(CONFIG_CPU_HAS_MSA),y) | ||
141 | toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa) | ||
142 | cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA | ||
143 | endif | ||
144 | |||
145 | # | 127 | # |
146 | # CPU-dependent compiler/assembler options for optimization. | 128 | # CPU-dependent compiler/assembler options for optimization. |
147 | # | 129 | # |
@@ -156,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS | |||
156 | -Wa,-mips32 -Wa,--trap | 138 | -Wa,-mips32 -Wa,--trap |
157 | cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ | 139 | cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ |
158 | -Wa,-mips32r2 -Wa,--trap | 140 | -Wa,-mips32r2 -Wa,--trap |
141 | cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap | ||
159 | cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ | 142 | cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ |
160 | -Wa,-mips64 -Wa,--trap | 143 | -Wa,-mips64 -Wa,--trap |
161 | cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ | 144 | cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ |
162 | -Wa,-mips64r2 -Wa,--trap | 145 | -Wa,-mips64r2 -Wa,--trap |
146 | cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap | ||
163 | cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap | 147 | cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap |
164 | cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ | 148 | cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ |
165 | -Wa,--trap | 149 | -Wa,--trap |
@@ -182,6 +166,16 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon | |||
182 | endif | 166 | endif |
183 | cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 | 167 | cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 |
184 | cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap | 168 | cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap |
169 | # | ||
170 | # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a | ||
171 | # as MIPS64 R1; older versions as just R1. This leaves the possibility open | ||
172 | # that GCC might generate R2 code for -march=loongson3a which then is rejected | ||
173 | # by GAS. The cc-option can't probe for this behaviour so -march=loongson3a | ||
174 | # can't easily be used safely within the kbuild framework. | ||
175 | # | ||
176 | cflags-$(CONFIG_CPU_LOONGSON3) += \ | ||
177 | $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ | ||
178 | -Wa,-mips64r2 -Wa,--trap | ||
185 | 179 | ||
186 | cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) | 180 | cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) |
187 | cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) | 181 | cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) |
@@ -194,6 +188,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds | |||
194 | endif | 188 | endif |
195 | endif | 189 | endif |
196 | 190 | ||
191 | # For smartmips configurations, there are hundreds of warnings due to ISA overrides | ||
192 | # in assembly and header files. smartmips is only supported for MIPS32r1 onwards | ||
193 | # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or | ||
194 | # similar directives in the kernel will spam the build logs with the following warnings: | ||
195 | # Warning: the `smartmips' extension requires MIPS32 revision 1 or greater | ||
196 | # or | ||
197 | # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension | ||
198 | # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has | ||
199 | # been fixed properly. | ||
200 | mips-cflags := "$(cflags-y)" | ||
201 | cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn | ||
202 | cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips) | ||
203 | ifeq ($(CONFIG_CPU_HAS_MSA),y) | ||
204 | toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa) | ||
205 | cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA | ||
206 | endif | ||
207 | |||
197 | # | 208 | # |
198 | # Firmware support | 209 | # Firmware support |
199 | # | 210 | # |
@@ -287,7 +298,11 @@ boot-y += vmlinux.ecoff | |||
287 | boot-y += vmlinux.srec | 298 | boot-y += vmlinux.srec |
288 | ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) | 299 | ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) |
289 | boot-y += uImage | 300 | boot-y += uImage |
301 | boot-y += uImage.bin | ||
302 | boot-y += uImage.bz2 | ||
290 | boot-y += uImage.gz | 303 | boot-y += uImage.gz |
304 | boot-y += uImage.lzma | ||
305 | boot-y += uImage.lzo | ||
291 | endif | 306 | endif |
292 | 307 | ||
293 | # compressed boot image targets (arch/mips/boot/compressed/) | 308 | # compressed boot image targets (arch/mips/boot/compressed/) |
@@ -386,7 +401,11 @@ define archhelp | |||
386 | echo ' vmlinuz.bin - Raw binary zboot image' | 401 | echo ' vmlinuz.bin - Raw binary zboot image' |
387 | echo ' vmlinuz.srec - SREC zboot image' | 402 | echo ' vmlinuz.srec - SREC zboot image' |
388 | echo ' uImage - U-Boot image' | 403 | echo ' uImage - U-Boot image' |
404 | echo ' uImage.bin - U-Boot image (uncompressed)' | ||
405 | echo ' uImage.bz2 - U-Boot image (bz2)' | ||
389 | echo ' uImage.gz - U-Boot image (gzip)' | 406 | echo ' uImage.gz - U-Boot image (gzip)' |
407 | echo ' uImage.lzma - U-Boot image (lzma)' | ||
408 | echo ' uImage.lzo - U-Boot image (lzo)' | ||
390 | echo ' dtbs - Device-tree blobs for enabled boards' | 409 | echo ' dtbs - Device-tree blobs for enabled boards' |
391 | echo | 410 | echo |
392 | echo ' These will be default as appropriate for a configured platform.' | 411 | echo ' These will be default as appropriate for a configured platform.' |
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 48a9dfc55b51..6a98d2cb402c 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c | |||
@@ -127,12 +127,20 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, | |||
127 | t = 396000000; | 127 | t = 396000000; |
128 | else { | 128 | else { |
129 | t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; | 129 | t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; |
130 | if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) | ||
131 | t &= 0x3f; | ||
130 | t *= parent_rate; | 132 | t *= parent_rate; |
131 | } | 133 | } |
132 | 134 | ||
133 | return t; | 135 | return t; |
134 | } | 136 | } |
135 | 137 | ||
138 | void __init alchemy_set_lpj(void) | ||
139 | { | ||
140 | preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); | ||
141 | preset_lpj /= 2 * HZ; | ||
142 | } | ||
143 | |||
136 | static struct clk_ops alchemy_clkops_cpu = { | 144 | static struct clk_ops alchemy_clkops_cpu = { |
137 | .recalc_rate = alchemy_clk_cpu_recalc, | 145 | .recalc_rate = alchemy_clk_cpu_recalc, |
138 | }; | 146 | }; |
@@ -315,17 +323,26 @@ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) | |||
315 | 323 | ||
316 | /* lrclk: external synchronous static bus clock ***********************/ | 324 | /* lrclk: external synchronous static bus clock ***********************/ |
317 | 325 | ||
318 | static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) | 326 | static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) |
319 | { | 327 | { |
320 | /* MEM_STCFG0[15:13] = divisor. | 328 | /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, |
329 | * otherwise lrclk=pclk/4. | ||
330 | * All other variants: MEM_STCFG0[15:13] = divisor. | ||
321 | * L/RCLK = periph_clk / (divisor + 1) | 331 | * L/RCLK = periph_clk / (divisor + 1) |
322 | * On Au1000, Au1500, Au1100 it's called LCLK, | 332 | * On Au1000, Au1500, Au1100 it's called LCLK, |
323 | * on later models it's called RCLK, but it's the same thing. | 333 | * on later models it's called RCLK, but it's the same thing. |
324 | */ | 334 | */ |
325 | struct clk *c; | 335 | struct clk *c; |
326 | unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; | 336 | unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); |
327 | 337 | ||
328 | v = (v & 7) + 1; | 338 | switch (t) { |
339 | case ALCHEMY_CPU_AU1000: | ||
340 | case ALCHEMY_CPU_AU1500: | ||
341 | v = 4 + ((v >> 11) & 1); | ||
342 | break; | ||
343 | default: /* all other models */ | ||
344 | v = ((v >> 13) & 7) + 1; | ||
345 | } | ||
329 | c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, | 346 | c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, |
330 | pn, 0, 1, v); | 347 | pn, 0, 1, v); |
331 | if (!IS_ERR(c)) | 348 | if (!IS_ERR(c)) |
@@ -546,6 +563,8 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, | |||
546 | } | 563 | } |
547 | 564 | ||
548 | static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, | 565 | static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, |
566 | unsigned long min_rate, | ||
567 | unsigned long max_rate, | ||
549 | unsigned long *best_parent_rate, | 568 | unsigned long *best_parent_rate, |
550 | struct clk_hw **best_parent_clk) | 569 | struct clk_hw **best_parent_clk) |
551 | { | 570 | { |
@@ -678,6 +697,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, | |||
678 | } | 697 | } |
679 | 698 | ||
680 | static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, | 699 | static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, |
700 | unsigned long min_rate, | ||
701 | unsigned long max_rate, | ||
681 | unsigned long *best_parent_rate, | 702 | unsigned long *best_parent_rate, |
682 | struct clk_hw **best_parent_clk) | 703 | struct clk_hw **best_parent_clk) |
683 | { | 704 | { |
@@ -897,6 +918,8 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, | |||
897 | } | 918 | } |
898 | 919 | ||
899 | static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, | 920 | static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, |
921 | unsigned long min_rate, | ||
922 | unsigned long max_rate, | ||
900 | unsigned long *best_parent_rate, | 923 | unsigned long *best_parent_rate, |
901 | struct clk_hw **best_parent_clk) | 924 | struct clk_hw **best_parent_clk) |
902 | { | 925 | { |
@@ -1060,7 +1083,7 @@ static int __init alchemy_clk_init(void) | |||
1060 | ERRCK(c) | 1083 | ERRCK(c) |
1061 | 1084 | ||
1062 | /* L/RCLK: external static bus clock for synchronous mode */ | 1085 | /* L/RCLK: external static bus clock for synchronous mode */ |
1063 | c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); | 1086 | c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); |
1064 | ERRCK(c) | 1087 | ERRCK(c) |
1065 | 1088 | ||
1066 | /* Frequency dividers 0-5 */ | 1089 | /* Frequency dividers 0-5 */ |
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 4e72daf12c32..2902138b3e0f 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c | |||
@@ -34,10 +34,12 @@ | |||
34 | #include <au1000.h> | 34 | #include <au1000.h> |
35 | 35 | ||
36 | extern void __init board_setup(void); | 36 | extern void __init board_setup(void); |
37 | extern void set_cpuspec(void); | 37 | extern void __init alchemy_set_lpj(void); |
38 | 38 | ||
39 | void __init plat_mem_setup(void) | 39 | void __init plat_mem_setup(void) |
40 | { | 40 | { |
41 | alchemy_set_lpj(); | ||
42 | |||
41 | if (au1xxx_cpu_needs_config_od()) | 43 | if (au1xxx_cpu_needs_config_od()) |
42 | /* Various early Au1xx0 errata corrected by this */ | 44 | /* Various early Au1xx0 errata corrected by this */ |
43 | set_c0_config(1 << 19); /* Set Config[OD] */ | 45 | set_c0_config(1 << 19); /* Set Config[OD] */ |
diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c index 0fb5134fb832..fd94fe849af6 100644 --- a/arch/mips/bcm3384/irq.c +++ b/arch/mips/bcm3384/irq.c | |||
@@ -180,7 +180,7 @@ static int __init intc_of_init(struct device_node *node, | |||
180 | 180 | ||
181 | static struct of_device_id of_irq_ids[] __initdata = { | 181 | static struct of_device_id of_irq_ids[] __initdata = { |
182 | { .compatible = "mti,cpu-interrupt-controller", | 182 | { .compatible = "mti,cpu-interrupt-controller", |
183 | .data = mips_cpu_intc_init }, | 183 | .data = mips_cpu_irq_of_init }, |
184 | { .compatible = "brcm,bcm3384-intc", | 184 | { .compatible = "brcm,bcm3384-intc", |
185 | .data = intc_of_init }, | 185 | .data = intc_of_init }, |
186 | {}, | 186 | {}, |
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 1466c0026093..acb1988f354e 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile | |||
@@ -23,6 +23,12 @@ strip-flags := $(addprefix --remove-section=,$(drop-sections)) | |||
23 | 23 | ||
24 | hostprogs-y := elf2ecoff | 24 | hostprogs-y := elf2ecoff |
25 | 25 | ||
26 | suffix-y := bin | ||
27 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | ||
28 | suffix-$(CONFIG_KERNEL_GZIP) := gz | ||
29 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | ||
30 | suffix-$(CONFIG_KERNEL_LZO) := lzo | ||
31 | |||
26 | targets := vmlinux.ecoff | 32 | targets := vmlinux.ecoff |
27 | quiet_cmd_ecoff = ECOFF $@ | 33 | quiet_cmd_ecoff = ECOFF $@ |
28 | cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) | 34 | cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) |
@@ -44,14 +50,53 @@ $(obj)/vmlinux.srec: $(VMLINUX) FORCE | |||
44 | UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) | 50 | UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) |
45 | UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) | 51 | UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) |
46 | 52 | ||
53 | # | ||
54 | # Compressed vmlinux images | ||
55 | # | ||
56 | |||
57 | extra-y += vmlinux.bin.bz2 | ||
58 | extra-y += vmlinux.bin.gz | ||
59 | extra-y += vmlinux.bin.lzma | ||
60 | extra-y += vmlinux.bin.lzo | ||
61 | |||
62 | $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE | ||
63 | $(call if_changed,bzip2) | ||
64 | |||
47 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE | 65 | $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE |
48 | $(call if_changed,gzip) | 66 | $(call if_changed,gzip) |
49 | 67 | ||
68 | $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE | ||
69 | $(call if_changed,lzma) | ||
70 | |||
71 | $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE | ||
72 | $(call if_changed,lzo) | ||
73 | |||
74 | # | ||
75 | # Compressed u-boot images | ||
76 | # | ||
77 | |||
78 | targets += uImage | ||
79 | targets += uImage.bin | ||
80 | targets += uImage.bz2 | ||
50 | targets += uImage.gz | 81 | targets += uImage.gz |
82 | targets += uImage.lzma | ||
83 | targets += uImage.lzo | ||
84 | |||
85 | $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE | ||
86 | $(call if_changed,uimage,none) | ||
87 | |||
88 | $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE | ||
89 | $(call if_changed,uimage,bzip2) | ||
90 | |||
51 | $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE | 91 | $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE |
52 | $(call if_changed,uimage,gzip) | 92 | $(call if_changed,uimage,gzip) |
53 | 93 | ||
54 | targets += uImage | 94 | $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE |
55 | $(obj)/uImage: $(obj)/uImage.gz FORCE | 95 | $(call if_changed,uimage,lzma) |
96 | |||
97 | $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE | ||
98 | $(call if_changed,uimage,lzo) | ||
99 | |||
100 | $(obj)/uImage: $(obj)/uImage.$(suffix-y) | ||
56 | @ln -sf $(notdir $<) $@ | 101 | @ln -sf $(notdir $<) $@ |
57 | @echo ' Image $@ is ready' | 102 | @echo ' Image $@ is ready' |
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c index 2a4c52e27f41..266c8137e859 100644 --- a/arch/mips/boot/elf2ecoff.c +++ b/arch/mips/boot/elf2ecoff.c | |||
@@ -268,7 +268,6 @@ int main(int argc, char *argv[]) | |||
268 | Elf32_Ehdr ex; | 268 | Elf32_Ehdr ex; |
269 | Elf32_Phdr *ph; | 269 | Elf32_Phdr *ph; |
270 | Elf32_Shdr *sh; | 270 | Elf32_Shdr *sh; |
271 | char *shstrtab; | ||
272 | int i, pad; | 271 | int i, pad; |
273 | struct sect text, data, bss; | 272 | struct sect text, data, bss; |
274 | struct filehdr efh; | 273 | struct filehdr efh; |
@@ -336,9 +335,6 @@ int main(int argc, char *argv[]) | |||
336 | "sh"); | 335 | "sh"); |
337 | if (must_convert_endian) | 336 | if (must_convert_endian) |
338 | convert_elf_shdrs(sh, ex.e_shnum); | 337 | convert_elf_shdrs(sh, ex.e_shnum); |
339 | /* Read in the section string table. */ | ||
340 | shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset, | ||
341 | sh[ex.e_shstrndx].sh_size, "shstrtab"); | ||
342 | 338 | ||
343 | /* Figure out if we can cram the program header into an ECOFF | 339 | /* Figure out if we can cram the program header into an ECOFF |
344 | header... Basically, we can't handle anything but loadable | 340 | header... Basically, we can't handle anything but loadable |
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index b752c4ed0b79..1882e6475dd0 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/octeon/octeon.h> | 18 | #include <asm/octeon/octeon.h> |
19 | #include <asm/octeon/cvmx-ipd-defs.h> | 19 | #include <asm/octeon/cvmx-ipd-defs.h> |
20 | #include <asm/octeon/cvmx-mio-defs.h> | 20 | #include <asm/octeon/cvmx-mio-defs.h> |
21 | 21 | #include <asm/octeon/cvmx-rst-defs.h> | |
22 | 22 | ||
23 | static u64 f; | 23 | static u64 f; |
24 | static u64 rdiv; | 24 | static u64 rdiv; |
@@ -39,11 +39,20 @@ void __init octeon_setup_delays(void) | |||
39 | 39 | ||
40 | if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { | 40 | if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { |
41 | union cvmx_mio_rst_boot rst_boot; | 41 | union cvmx_mio_rst_boot rst_boot; |
42 | |||
42 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); | 43 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); |
43 | rdiv = rst_boot.s.c_mul; /* CPU clock */ | 44 | rdiv = rst_boot.s.c_mul; /* CPU clock */ |
44 | sdiv = rst_boot.s.pnr_mul; /* I/O clock */ | 45 | sdiv = rst_boot.s.pnr_mul; /* I/O clock */ |
45 | f = (0x8000000000000000ull / sdiv) * 2; | 46 | f = (0x8000000000000000ull / sdiv) * 2; |
47 | } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) { | ||
48 | union cvmx_rst_boot rst_boot; | ||
49 | |||
50 | rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); | ||
51 | rdiv = rst_boot.s.c_mul; /* CPU clock */ | ||
52 | sdiv = rst_boot.s.pnr_mul; /* I/O clock */ | ||
53 | f = (0x8000000000000000ull / sdiv) * 2; | ||
46 | } | 54 | } |
55 | |||
47 | } | 56 | } |
48 | 57 | ||
49 | /* | 58 | /* |
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 3778655c4a37..7d8987818ccf 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c | |||
@@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void) | |||
276 | continue; | 276 | continue; |
277 | 277 | ||
278 | /* These addresses map low for PCI. */ | 278 | /* These addresses map low for PCI. */ |
279 | if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX)) | 279 | if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2()) |
280 | continue; | 280 | continue; |
281 | 281 | ||
282 | addr_size += e->size; | 282 | addr_size += e->size; |
@@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void) | |||
308 | #endif | 308 | #endif |
309 | #ifdef CONFIG_USB_OCTEON_OHCI | 309 | #ifdef CONFIG_USB_OCTEON_OHCI |
310 | /* OCTEON II ohci is only 32-bit. */ | 310 | /* OCTEON II ohci is only 32-bit. */ |
311 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul) | 311 | if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) |
312 | swiotlbsize = 64 * (1<<20); | 312 | swiotlbsize = 64 * (1<<20); |
313 | #endif | 313 | #endif |
314 | swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; | 314 | swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; |
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 5dfef84b9576..9eb0feef4417 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c | |||
@@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo | |||
767 | break; | 767 | break; |
768 | } | 768 | } |
769 | /* Most boards except NIC10e use a 12MHz crystal */ | 769 | /* Most boards except NIC10e use a 12MHz crystal */ |
770 | if (OCTEON_IS_MODEL(OCTEON_FAM_2)) | 770 | if (OCTEON_IS_OCTEON2()) |
771 | return USB_CLOCK_TYPE_CRYSTAL_12; | 771 | return USB_CLOCK_TYPE_CRYSTAL_12; |
772 | return USB_CLOCK_TYPE_REF_48; | 772 | return USB_CLOCK_TYPE_REF_48; |
773 | } | 773 | } |
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 2bc4aa95944e..10f762557b92 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c | |||
@@ -3,12 +3,14 @@ | |||
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2004-2012 Cavium, Inc. | 6 | * Copyright (C) 2004-2014 Cavium, Inc. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/of_address.h> | ||
9 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
10 | #include <linux/irqdomain.h> | 11 | #include <linux/irqdomain.h> |
11 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/of_irq.h> | ||
12 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
13 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
14 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
@@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); | |||
22 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); | 24 | static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); |
23 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); | 25 | static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); |
24 | 26 | ||
27 | struct octeon_irq_ciu_domain_data { | ||
28 | int num_sum; /* number of sum registers (2 or 3). */ | ||
29 | }; | ||
30 | |||
25 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; | 31 | static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; |
26 | 32 | ||
27 | union octeon_ciu_chip_data { | 33 | struct octeon_ciu_chip_data { |
28 | void *p; | 34 | union { |
29 | unsigned long l; | 35 | struct { /* only used for ciu3 */ |
30 | struct { | 36 | u64 ciu3_addr; |
31 | unsigned long line:6; | 37 | unsigned int intsn; |
32 | unsigned long bit:6; | 38 | }; |
33 | unsigned long gpio_line:6; | 39 | struct { /* only used for ciu/ciu2 */ |
34 | } s; | 40 | u8 line; |
41 | u8 bit; | ||
42 | u8 gpio_line; | ||
43 | }; | ||
44 | }; | ||
45 | int current_cpu; /* Next CPU expected to take this irq */ | ||
35 | }; | 46 | }; |
36 | 47 | ||
37 | struct octeon_core_chip_data { | 48 | struct octeon_core_chip_data { |
@@ -45,27 +56,40 @@ struct octeon_core_chip_data { | |||
45 | 56 | ||
46 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; | 57 | static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; |
47 | 58 | ||
48 | static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, | 59 | static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, |
49 | struct irq_chip *chip, | 60 | struct irq_chip *chip, |
50 | irq_flow_handler_t handler) | 61 | irq_flow_handler_t handler) |
51 | { | 62 | { |
52 | union octeon_ciu_chip_data cd; | 63 | struct octeon_ciu_chip_data *cd; |
64 | |||
65 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
66 | if (!cd) | ||
67 | return -ENOMEM; | ||
53 | 68 | ||
54 | irq_set_chip_and_handler(irq, chip, handler); | 69 | irq_set_chip_and_handler(irq, chip, handler); |
55 | 70 | ||
56 | cd.l = 0; | 71 | cd->line = line; |
57 | cd.s.line = line; | 72 | cd->bit = bit; |
58 | cd.s.bit = bit; | 73 | cd->gpio_line = gpio_line; |
59 | cd.s.gpio_line = gpio_line; | ||
60 | 74 | ||
61 | irq_set_chip_data(irq, cd.p); | 75 | irq_set_chip_data(irq, cd); |
62 | octeon_irq_ciu_to_irq[line][bit] = irq; | 76 | octeon_irq_ciu_to_irq[line][bit] = irq; |
77 | return 0; | ||
63 | } | 78 | } |
64 | 79 | ||
65 | static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, | 80 | static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) |
66 | int irq, int line, int bit) | ||
67 | { | 81 | { |
68 | irq_domain_associate(domain, irq, line << 6 | bit); | 82 | struct irq_data *data = irq_get_irq_data(irq); |
83 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
84 | |||
85 | irq_set_chip_data(irq, NULL); | ||
86 | kfree(cd); | ||
87 | } | ||
88 | |||
89 | static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, | ||
90 | int irq, int line, int bit) | ||
91 | { | ||
92 | return irq_domain_associate(domain, irq, line << 6 | bit); | ||
69 | } | 93 | } |
70 | 94 | ||
71 | static int octeon_coreid_for_cpu(int cpu) | 95 | static int octeon_coreid_for_cpu(int cpu) |
@@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data) | |||
202 | #ifdef CONFIG_SMP | 226 | #ifdef CONFIG_SMP |
203 | int cpu; | 227 | int cpu; |
204 | int weight = cpumask_weight(data->affinity); | 228 | int weight = cpumask_weight(data->affinity); |
229 | struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
205 | 230 | ||
206 | if (weight > 1) { | 231 | if (weight > 1) { |
207 | cpu = smp_processor_id(); | 232 | cpu = cd->current_cpu; |
208 | for (;;) { | 233 | for (;;) { |
209 | cpu = cpumask_next(cpu, data->affinity); | 234 | cpu = cpumask_next(cpu, data->affinity); |
210 | if (cpu >= nr_cpu_ids) { | 235 | if (cpu >= nr_cpu_ids) { |
@@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data) | |||
219 | } else { | 244 | } else { |
220 | cpu = smp_processor_id(); | 245 | cpu = smp_processor_id(); |
221 | } | 246 | } |
247 | cd->current_cpu = cpu; | ||
222 | return cpu; | 248 | return cpu; |
223 | #else | 249 | #else |
224 | return smp_processor_id(); | 250 | return smp_processor_id(); |
@@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data) | |||
231 | int coreid = octeon_coreid_for_cpu(cpu); | 257 | int coreid = octeon_coreid_for_cpu(cpu); |
232 | unsigned long *pen; | 258 | unsigned long *pen; |
233 | unsigned long flags; | 259 | unsigned long flags; |
234 | union octeon_ciu_chip_data cd; | 260 | struct octeon_ciu_chip_data *cd; |
235 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 261 | raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
236 | 262 | ||
237 | cd.p = irq_data_get_irq_chip_data(data); | 263 | cd = irq_data_get_irq_chip_data(data); |
238 | 264 | ||
239 | raw_spin_lock_irqsave(lock, flags); | 265 | raw_spin_lock_irqsave(lock, flags); |
240 | if (cd.s.line == 0) { | 266 | if (cd->line == 0) { |
241 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 267 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
242 | __set_bit(cd.s.bit, pen); | 268 | __set_bit(cd->bit, pen); |
243 | /* | 269 | /* |
244 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 270 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
245 | * enabling the irq. | 271 | * enabling the irq. |
@@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data) | |||
248 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 274 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
249 | } else { | 275 | } else { |
250 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 276 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
251 | __set_bit(cd.s.bit, pen); | 277 | __set_bit(cd->bit, pen); |
252 | /* | 278 | /* |
253 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 279 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
254 | * enabling the irq. | 280 | * enabling the irq. |
@@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
263 | { | 289 | { |
264 | unsigned long *pen; | 290 | unsigned long *pen; |
265 | unsigned long flags; | 291 | unsigned long flags; |
266 | union octeon_ciu_chip_data cd; | 292 | struct octeon_ciu_chip_data *cd; |
267 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); | 293 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
268 | 294 | ||
269 | cd.p = irq_data_get_irq_chip_data(data); | 295 | cd = irq_data_get_irq_chip_data(data); |
270 | 296 | ||
271 | raw_spin_lock_irqsave(lock, flags); | 297 | raw_spin_lock_irqsave(lock, flags); |
272 | if (cd.s.line == 0) { | 298 | if (cd->line == 0) { |
273 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); | 299 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
274 | __set_bit(cd.s.bit, pen); | 300 | __set_bit(cd->bit, pen); |
275 | /* | 301 | /* |
276 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 302 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
277 | * enabling the irq. | 303 | * enabling the irq. |
@@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) | |||
280 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 306 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
281 | } else { | 307 | } else { |
282 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); | 308 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
283 | __set_bit(cd.s.bit, pen); | 309 | __set_bit(cd->bit, pen); |
284 | /* | 310 | /* |
285 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 311 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
286 | * enabling the irq. | 312 | * enabling the irq. |
@@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
295 | { | 321 | { |
296 | unsigned long *pen; | 322 | unsigned long *pen; |
297 | unsigned long flags; | 323 | unsigned long flags; |
298 | union octeon_ciu_chip_data cd; | 324 | struct octeon_ciu_chip_data *cd; |
299 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); | 325 | raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); |
300 | 326 | ||
301 | cd.p = irq_data_get_irq_chip_data(data); | 327 | cd = irq_data_get_irq_chip_data(data); |
302 | 328 | ||
303 | raw_spin_lock_irqsave(lock, flags); | 329 | raw_spin_lock_irqsave(lock, flags); |
304 | if (cd.s.line == 0) { | 330 | if (cd->line == 0) { |
305 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); | 331 | pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); |
306 | __clear_bit(cd.s.bit, pen); | 332 | __clear_bit(cd->bit, pen); |
307 | /* | 333 | /* |
308 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 334 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
309 | * enabling the irq. | 335 | * enabling the irq. |
@@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) | |||
312 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); | 338 | cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); |
313 | } else { | 339 | } else { |
314 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); | 340 | pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); |
315 | __clear_bit(cd.s.bit, pen); | 341 | __clear_bit(cd->bit, pen); |
316 | /* | 342 | /* |
317 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 343 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
318 | * enabling the irq. | 344 | * enabling the irq. |
@@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data) | |||
328 | unsigned long flags; | 354 | unsigned long flags; |
329 | unsigned long *pen; | 355 | unsigned long *pen; |
330 | int cpu; | 356 | int cpu; |
331 | union octeon_ciu_chip_data cd; | 357 | struct octeon_ciu_chip_data *cd; |
332 | raw_spinlock_t *lock; | 358 | raw_spinlock_t *lock; |
333 | 359 | ||
334 | cd.p = irq_data_get_irq_chip_data(data); | 360 | cd = irq_data_get_irq_chip_data(data); |
335 | 361 | ||
336 | for_each_online_cpu(cpu) { | 362 | for_each_online_cpu(cpu) { |
337 | int coreid = octeon_coreid_for_cpu(cpu); | 363 | int coreid = octeon_coreid_for_cpu(cpu); |
338 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 364 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
339 | if (cd.s.line == 0) | 365 | if (cd->line == 0) |
340 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 366 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
341 | else | 367 | else |
342 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 368 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
343 | 369 | ||
344 | raw_spin_lock_irqsave(lock, flags); | 370 | raw_spin_lock_irqsave(lock, flags); |
345 | __clear_bit(cd.s.bit, pen); | 371 | __clear_bit(cd->bit, pen); |
346 | /* | 372 | /* |
347 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 373 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
348 | * enabling the irq. | 374 | * enabling the irq. |
349 | */ | 375 | */ |
350 | wmb(); | 376 | wmb(); |
351 | if (cd.s.line == 0) | 377 | if (cd->line == 0) |
352 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 378 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
353 | else | 379 | else |
354 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 380 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
@@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data) | |||
361 | unsigned long flags; | 387 | unsigned long flags; |
362 | unsigned long *pen; | 388 | unsigned long *pen; |
363 | int cpu; | 389 | int cpu; |
364 | union octeon_ciu_chip_data cd; | 390 | struct octeon_ciu_chip_data *cd; |
365 | raw_spinlock_t *lock; | 391 | raw_spinlock_t *lock; |
366 | 392 | ||
367 | cd.p = irq_data_get_irq_chip_data(data); | 393 | cd = irq_data_get_irq_chip_data(data); |
368 | 394 | ||
369 | for_each_online_cpu(cpu) { | 395 | for_each_online_cpu(cpu) { |
370 | int coreid = octeon_coreid_for_cpu(cpu); | 396 | int coreid = octeon_coreid_for_cpu(cpu); |
371 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 397 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
372 | if (cd.s.line == 0) | 398 | if (cd->line == 0) |
373 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 399 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
374 | else | 400 | else |
375 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 401 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
376 | 402 | ||
377 | raw_spin_lock_irqsave(lock, flags); | 403 | raw_spin_lock_irqsave(lock, flags); |
378 | __set_bit(cd.s.bit, pen); | 404 | __set_bit(cd->bit, pen); |
379 | /* | 405 | /* |
380 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 406 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
381 | * enabling the irq. | 407 | * enabling the irq. |
382 | */ | 408 | */ |
383 | wmb(); | 409 | wmb(); |
384 | if (cd.s.line == 0) | 410 | if (cd->line == 0) |
385 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 411 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
386 | else | 412 | else |
387 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 413 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
@@ -397,45 +423,106 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data) | |||
397 | { | 423 | { |
398 | u64 mask; | 424 | u64 mask; |
399 | int cpu = next_cpu_for_irq(data); | 425 | int cpu = next_cpu_for_irq(data); |
400 | union octeon_ciu_chip_data cd; | 426 | struct octeon_ciu_chip_data *cd; |
401 | 427 | ||
402 | cd.p = irq_data_get_irq_chip_data(data); | 428 | cd = irq_data_get_irq_chip_data(data); |
403 | mask = 1ull << (cd.s.bit); | 429 | mask = 1ull << (cd->bit); |
404 | 430 | ||
405 | /* | 431 | /* |
406 | * Called under the desc lock, so these should never get out | 432 | * Called under the desc lock, so these should never get out |
407 | * of sync. | 433 | * of sync. |
408 | */ | 434 | */ |
409 | if (cd.s.line == 0) { | 435 | if (cd->line == 0) { |
410 | int index = octeon_coreid_for_cpu(cpu) * 2; | 436 | int index = octeon_coreid_for_cpu(cpu) * 2; |
411 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 437 | set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); |
412 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 438 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
413 | } else { | 439 | } else { |
414 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 440 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
415 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 441 | set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); |
416 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 442 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
417 | } | 443 | } |
418 | } | 444 | } |
419 | 445 | ||
420 | /* | 446 | /* |
447 | * Enable the irq in the sum2 registers. | ||
448 | */ | ||
449 | static void octeon_irq_ciu_enable_sum2(struct irq_data *data) | ||
450 | { | ||
451 | u64 mask; | ||
452 | int cpu = next_cpu_for_irq(data); | ||
453 | int index = octeon_coreid_for_cpu(cpu); | ||
454 | struct octeon_ciu_chip_data *cd; | ||
455 | |||
456 | cd = irq_data_get_irq_chip_data(data); | ||
457 | mask = 1ull << (cd->bit); | ||
458 | |||
459 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Disable the irq in the sum2 registers. | ||
464 | */ | ||
465 | static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) | ||
466 | { | ||
467 | u64 mask; | ||
468 | int cpu = next_cpu_for_irq(data); | ||
469 | int index = octeon_coreid_for_cpu(cpu); | ||
470 | struct octeon_ciu_chip_data *cd; | ||
471 | |||
472 | cd = irq_data_get_irq_chip_data(data); | ||
473 | mask = 1ull << (cd->bit); | ||
474 | |||
475 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); | ||
476 | } | ||
477 | |||
478 | static void octeon_irq_ciu_ack_sum2(struct irq_data *data) | ||
479 | { | ||
480 | u64 mask; | ||
481 | int cpu = next_cpu_for_irq(data); | ||
482 | int index = octeon_coreid_for_cpu(cpu); | ||
483 | struct octeon_ciu_chip_data *cd; | ||
484 | |||
485 | cd = irq_data_get_irq_chip_data(data); | ||
486 | mask = 1ull << (cd->bit); | ||
487 | |||
488 | cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); | ||
489 | } | ||
490 | |||
491 | static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) | ||
492 | { | ||
493 | int cpu; | ||
494 | struct octeon_ciu_chip_data *cd; | ||
495 | u64 mask; | ||
496 | |||
497 | cd = irq_data_get_irq_chip_data(data); | ||
498 | mask = 1ull << (cd->bit); | ||
499 | |||
500 | for_each_online_cpu(cpu) { | ||
501 | int coreid = octeon_coreid_for_cpu(cpu); | ||
502 | |||
503 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); | ||
504 | } | ||
505 | } | ||
506 | |||
507 | /* | ||
421 | * Enable the irq on the current CPU for chips that | 508 | * Enable the irq on the current CPU for chips that |
422 | * have the EN*_W1{S,C} registers. | 509 | * have the EN*_W1{S,C} registers. |
423 | */ | 510 | */ |
424 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | 511 | static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) |
425 | { | 512 | { |
426 | u64 mask; | 513 | u64 mask; |
427 | union octeon_ciu_chip_data cd; | 514 | struct octeon_ciu_chip_data *cd; |
428 | 515 | ||
429 | cd.p = irq_data_get_irq_chip_data(data); | 516 | cd = irq_data_get_irq_chip_data(data); |
430 | mask = 1ull << (cd.s.bit); | 517 | mask = 1ull << (cd->bit); |
431 | 518 | ||
432 | if (cd.s.line == 0) { | 519 | if (cd->line == 0) { |
433 | int index = cvmx_get_core_num() * 2; | 520 | int index = cvmx_get_core_num() * 2; |
434 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); | 521 | set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
435 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 522 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
436 | } else { | 523 | } else { |
437 | int index = cvmx_get_core_num() * 2 + 1; | 524 | int index = cvmx_get_core_num() * 2 + 1; |
438 | set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); | 525 | set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
439 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 526 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
440 | } | 527 | } |
441 | } | 528 | } |
@@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) | |||
443 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | 530 | static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) |
444 | { | 531 | { |
445 | u64 mask; | 532 | u64 mask; |
446 | union octeon_ciu_chip_data cd; | 533 | struct octeon_ciu_chip_data *cd; |
447 | 534 | ||
448 | cd.p = irq_data_get_irq_chip_data(data); | 535 | cd = irq_data_get_irq_chip_data(data); |
449 | mask = 1ull << (cd.s.bit); | 536 | mask = 1ull << (cd->bit); |
450 | 537 | ||
451 | if (cd.s.line == 0) { | 538 | if (cd->line == 0) { |
452 | int index = cvmx_get_core_num() * 2; | 539 | int index = cvmx_get_core_num() * 2; |
453 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); | 540 | clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); |
454 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 541 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
455 | } else { | 542 | } else { |
456 | int index = cvmx_get_core_num() * 2 + 1; | 543 | int index = cvmx_get_core_num() * 2 + 1; |
457 | clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); | 544 | clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); |
458 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 545 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
459 | } | 546 | } |
460 | } | 547 | } |
@@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) | |||
465 | static void octeon_irq_ciu_ack(struct irq_data *data) | 552 | static void octeon_irq_ciu_ack(struct irq_data *data) |
466 | { | 553 | { |
467 | u64 mask; | 554 | u64 mask; |
468 | union octeon_ciu_chip_data cd; | 555 | struct octeon_ciu_chip_data *cd; |
469 | 556 | ||
470 | cd.p = irq_data_get_irq_chip_data(data); | 557 | cd = irq_data_get_irq_chip_data(data); |
471 | mask = 1ull << (cd.s.bit); | 558 | mask = 1ull << (cd->bit); |
472 | 559 | ||
473 | if (cd.s.line == 0) { | 560 | if (cd->line == 0) { |
474 | int index = cvmx_get_core_num() * 2; | 561 | int index = cvmx_get_core_num() * 2; |
475 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); | 562 | cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); |
476 | } else { | 563 | } else { |
@@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) | |||
486 | { | 573 | { |
487 | int cpu; | 574 | int cpu; |
488 | u64 mask; | 575 | u64 mask; |
489 | union octeon_ciu_chip_data cd; | 576 | struct octeon_ciu_chip_data *cd; |
490 | 577 | ||
491 | cd.p = irq_data_get_irq_chip_data(data); | 578 | cd = irq_data_get_irq_chip_data(data); |
492 | mask = 1ull << (cd.s.bit); | 579 | mask = 1ull << (cd->bit); |
493 | 580 | ||
494 | if (cd.s.line == 0) { | 581 | if (cd->line == 0) { |
495 | for_each_online_cpu(cpu) { | 582 | for_each_online_cpu(cpu) { |
496 | int index = octeon_coreid_for_cpu(cpu) * 2; | 583 | int index = octeon_coreid_for_cpu(cpu) * 2; |
497 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 584 | clear_bit(cd->bit, |
585 | &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
498 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 586 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
499 | } | 587 | } |
500 | } else { | 588 | } else { |
501 | for_each_online_cpu(cpu) { | 589 | for_each_online_cpu(cpu) { |
502 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 590 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
503 | clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 591 | clear_bit(cd->bit, |
592 | &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
504 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 593 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
505 | } | 594 | } |
506 | } | 595 | } |
@@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | |||
514 | { | 603 | { |
515 | int cpu; | 604 | int cpu; |
516 | u64 mask; | 605 | u64 mask; |
517 | union octeon_ciu_chip_data cd; | 606 | struct octeon_ciu_chip_data *cd; |
518 | 607 | ||
519 | cd.p = irq_data_get_irq_chip_data(data); | 608 | cd = irq_data_get_irq_chip_data(data); |
520 | mask = 1ull << (cd.s.bit); | 609 | mask = 1ull << (cd->bit); |
521 | 610 | ||
522 | if (cd.s.line == 0) { | 611 | if (cd->line == 0) { |
523 | for_each_online_cpu(cpu) { | 612 | for_each_online_cpu(cpu) { |
524 | int index = octeon_coreid_for_cpu(cpu) * 2; | 613 | int index = octeon_coreid_for_cpu(cpu) * 2; |
525 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | 614 | set_bit(cd->bit, |
615 | &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); | ||
526 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 616 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
527 | } | 617 | } |
528 | } else { | 618 | } else { |
529 | for_each_online_cpu(cpu) { | 619 | for_each_online_cpu(cpu) { |
530 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 620 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
531 | set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | 621 | set_bit(cd->bit, |
622 | &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); | ||
532 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 623 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
533 | } | 624 | } |
534 | } | 625 | } |
@@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) | |||
537 | static void octeon_irq_gpio_setup(struct irq_data *data) | 628 | static void octeon_irq_gpio_setup(struct irq_data *data) |
538 | { | 629 | { |
539 | union cvmx_gpio_bit_cfgx cfg; | 630 | union cvmx_gpio_bit_cfgx cfg; |
540 | union octeon_ciu_chip_data cd; | 631 | struct octeon_ciu_chip_data *cd; |
541 | u32 t = irqd_get_trigger_type(data); | 632 | u32 t = irqd_get_trigger_type(data); |
542 | 633 | ||
543 | cd.p = irq_data_get_irq_chip_data(data); | 634 | cd = irq_data_get_irq_chip_data(data); |
544 | 635 | ||
545 | cfg.u64 = 0; | 636 | cfg.u64 = 0; |
546 | cfg.s.int_en = 1; | 637 | cfg.s.int_en = 1; |
@@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data) | |||
551 | cfg.s.fil_cnt = 7; | 642 | cfg.s.fil_cnt = 7; |
552 | cfg.s.fil_sel = 3; | 643 | cfg.s.fil_sel = 3; |
553 | 644 | ||
554 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); | 645 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); |
555 | } | 646 | } |
556 | 647 | ||
557 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) | 648 | static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) |
@@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) | |||
576 | 667 | ||
577 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) | 668 | static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) |
578 | { | 669 | { |
579 | union octeon_ciu_chip_data cd; | 670 | struct octeon_ciu_chip_data *cd; |
580 | 671 | ||
581 | cd.p = irq_data_get_irq_chip_data(data); | 672 | cd = irq_data_get_irq_chip_data(data); |
582 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 673 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
583 | 674 | ||
584 | octeon_irq_ciu_disable_all_v2(data); | 675 | octeon_irq_ciu_disable_all_v2(data); |
585 | } | 676 | } |
586 | 677 | ||
587 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) | 678 | static void octeon_irq_ciu_disable_gpio(struct irq_data *data) |
588 | { | 679 | { |
589 | union octeon_ciu_chip_data cd; | 680 | struct octeon_ciu_chip_data *cd; |
590 | 681 | ||
591 | cd.p = irq_data_get_irq_chip_data(data); | 682 | cd = irq_data_get_irq_chip_data(data); |
592 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 683 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
593 | 684 | ||
594 | octeon_irq_ciu_disable_all(data); | 685 | octeon_irq_ciu_disable_all(data); |
595 | } | 686 | } |
596 | 687 | ||
597 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) | 688 | static void octeon_irq_ciu_gpio_ack(struct irq_data *data) |
598 | { | 689 | { |
599 | union octeon_ciu_chip_data cd; | 690 | struct octeon_ciu_chip_data *cd; |
600 | u64 mask; | 691 | u64 mask; |
601 | 692 | ||
602 | cd.p = irq_data_get_irq_chip_data(data); | 693 | cd = irq_data_get_irq_chip_data(data); |
603 | mask = 1ull << (cd.s.gpio_line); | 694 | mask = 1ull << (cd->gpio_line); |
604 | 695 | ||
605 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); | 696 | cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); |
606 | } | 697 | } |
607 | 698 | ||
608 | static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) | 699 | static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) |
609 | { | 700 | { |
610 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) | 701 | if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) |
611 | handle_edge_irq(irq, desc); | 702 | handle_edge_irq(irq, desc); |
@@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
644 | int cpu; | 735 | int cpu; |
645 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 736 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
646 | unsigned long flags; | 737 | unsigned long flags; |
647 | union octeon_ciu_chip_data cd; | 738 | struct octeon_ciu_chip_data *cd; |
648 | unsigned long *pen; | 739 | unsigned long *pen; |
649 | raw_spinlock_t *lock; | 740 | raw_spinlock_t *lock; |
650 | 741 | ||
651 | cd.p = irq_data_get_irq_chip_data(data); | 742 | cd = irq_data_get_irq_chip_data(data); |
652 | 743 | ||
653 | /* | 744 | /* |
654 | * For non-v2 CIU, we will allow only single CPU affinity. | 745 | * For non-v2 CIU, we will allow only single CPU affinity. |
@@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
668 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); | 759 | lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); |
669 | raw_spin_lock_irqsave(lock, flags); | 760 | raw_spin_lock_irqsave(lock, flags); |
670 | 761 | ||
671 | if (cd.s.line == 0) | 762 | if (cd->line == 0) |
672 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 763 | pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
673 | else | 764 | else |
674 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); | 765 | pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); |
675 | 766 | ||
676 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 767 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
677 | enable_one = 0; | 768 | enable_one = 0; |
678 | __set_bit(cd.s.bit, pen); | 769 | __set_bit(cd->bit, pen); |
679 | } else { | 770 | } else { |
680 | __clear_bit(cd.s.bit, pen); | 771 | __clear_bit(cd->bit, pen); |
681 | } | 772 | } |
682 | /* | 773 | /* |
683 | * Must be visible to octeon_irq_ip{2,3}_ciu() before | 774 | * Must be visible to octeon_irq_ip{2,3}_ciu() before |
@@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, | |||
685 | */ | 776 | */ |
686 | wmb(); | 777 | wmb(); |
687 | 778 | ||
688 | if (cd.s.line == 0) | 779 | if (cd->line == 0) |
689 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); | 780 | cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); |
690 | else | 781 | else |
691 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); | 782 | cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); |
@@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | |||
706 | int cpu; | 797 | int cpu; |
707 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 798 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
708 | u64 mask; | 799 | u64 mask; |
709 | union octeon_ciu_chip_data cd; | 800 | struct octeon_ciu_chip_data *cd; |
710 | 801 | ||
711 | if (!enable_one) | 802 | if (!enable_one) |
712 | return 0; | 803 | return 0; |
713 | 804 | ||
714 | cd.p = irq_data_get_irq_chip_data(data); | 805 | cd = irq_data_get_irq_chip_data(data); |
715 | mask = 1ull << cd.s.bit; | 806 | mask = 1ull << cd->bit; |
716 | 807 | ||
717 | if (cd.s.line == 0) { | 808 | if (cd->line == 0) { |
718 | for_each_online_cpu(cpu) { | 809 | for_each_online_cpu(cpu) { |
719 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); | 810 | unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); |
720 | int index = octeon_coreid_for_cpu(cpu) * 2; | 811 | int index = octeon_coreid_for_cpu(cpu) * 2; |
721 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 812 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
722 | enable_one = false; | 813 | enable_one = false; |
723 | set_bit(cd.s.bit, pen); | 814 | set_bit(cd->bit, pen); |
724 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); | 815 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); |
725 | } else { | 816 | } else { |
726 | clear_bit(cd.s.bit, pen); | 817 | clear_bit(cd->bit, pen); |
727 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); | 818 | cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); |
728 | } | 819 | } |
729 | } | 820 | } |
@@ -733,16 +824,44 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, | |||
733 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; | 824 | int index = octeon_coreid_for_cpu(cpu) * 2 + 1; |
734 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 825 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
735 | enable_one = false; | 826 | enable_one = false; |
736 | set_bit(cd.s.bit, pen); | 827 | set_bit(cd->bit, pen); |
737 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); | 828 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); |
738 | } else { | 829 | } else { |
739 | clear_bit(cd.s.bit, pen); | 830 | clear_bit(cd->bit, pen); |
740 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); | 831 | cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); |
741 | } | 832 | } |
742 | } | 833 | } |
743 | } | 834 | } |
744 | return 0; | 835 | return 0; |
745 | } | 836 | } |
837 | |||
838 | static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, | ||
839 | const struct cpumask *dest, | ||
840 | bool force) | ||
841 | { | ||
842 | int cpu; | ||
843 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | ||
844 | u64 mask; | ||
845 | struct octeon_ciu_chip_data *cd; | ||
846 | |||
847 | if (!enable_one) | ||
848 | return 0; | ||
849 | |||
850 | cd = irq_data_get_irq_chip_data(data); | ||
851 | mask = 1ull << cd->bit; | ||
852 | |||
853 | for_each_online_cpu(cpu) { | ||
854 | int index = octeon_coreid_for_cpu(cpu); | ||
855 | |||
856 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | ||
857 | enable_one = false; | ||
858 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); | ||
859 | } else { | ||
860 | cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); | ||
861 | } | ||
862 | } | ||
863 | return 0; | ||
864 | } | ||
746 | #endif | 865 | #endif |
747 | 866 | ||
748 | /* | 867 | /* |
@@ -752,6 +871,18 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = { | |||
752 | .name = "CIU", | 871 | .name = "CIU", |
753 | .irq_enable = octeon_irq_ciu_enable_v2, | 872 | .irq_enable = octeon_irq_ciu_enable_v2, |
754 | .irq_disable = octeon_irq_ciu_disable_all_v2, | 873 | .irq_disable = octeon_irq_ciu_disable_all_v2, |
874 | .irq_mask = octeon_irq_ciu_disable_local_v2, | ||
875 | .irq_unmask = octeon_irq_ciu_enable_v2, | ||
876 | #ifdef CONFIG_SMP | ||
877 | .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, | ||
878 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
879 | #endif | ||
880 | }; | ||
881 | |||
882 | static struct irq_chip octeon_irq_chip_ciu_v2_edge = { | ||
883 | .name = "CIU", | ||
884 | .irq_enable = octeon_irq_ciu_enable_v2, | ||
885 | .irq_disable = octeon_irq_ciu_disable_all_v2, | ||
755 | .irq_ack = octeon_irq_ciu_ack, | 886 | .irq_ack = octeon_irq_ciu_ack, |
756 | .irq_mask = octeon_irq_ciu_disable_local_v2, | 887 | .irq_mask = octeon_irq_ciu_disable_local_v2, |
757 | .irq_unmask = octeon_irq_ciu_enable_v2, | 888 | .irq_unmask = octeon_irq_ciu_enable_v2, |
@@ -761,10 +892,50 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = { | |||
761 | #endif | 892 | #endif |
762 | }; | 893 | }; |
763 | 894 | ||
895 | /* | ||
896 | * Newer octeon chips have support for lockless CIU operation. | ||
897 | */ | ||
898 | static struct irq_chip octeon_irq_chip_ciu_sum2 = { | ||
899 | .name = "CIU", | ||
900 | .irq_enable = octeon_irq_ciu_enable_sum2, | ||
901 | .irq_disable = octeon_irq_ciu_disable_all_sum2, | ||
902 | .irq_mask = octeon_irq_ciu_disable_local_sum2, | ||
903 | .irq_unmask = octeon_irq_ciu_enable_sum2, | ||
904 | #ifdef CONFIG_SMP | ||
905 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, | ||
906 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
907 | #endif | ||
908 | }; | ||
909 | |||
910 | static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { | ||
911 | .name = "CIU", | ||
912 | .irq_enable = octeon_irq_ciu_enable_sum2, | ||
913 | .irq_disable = octeon_irq_ciu_disable_all_sum2, | ||
914 | .irq_ack = octeon_irq_ciu_ack_sum2, | ||
915 | .irq_mask = octeon_irq_ciu_disable_local_sum2, | ||
916 | .irq_unmask = octeon_irq_ciu_enable_sum2, | ||
917 | #ifdef CONFIG_SMP | ||
918 | .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, | ||
919 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
920 | #endif | ||
921 | }; | ||
922 | |||
764 | static struct irq_chip octeon_irq_chip_ciu = { | 923 | static struct irq_chip octeon_irq_chip_ciu = { |
765 | .name = "CIU", | 924 | .name = "CIU", |
766 | .irq_enable = octeon_irq_ciu_enable, | 925 | .irq_enable = octeon_irq_ciu_enable, |
767 | .irq_disable = octeon_irq_ciu_disable_all, | 926 | .irq_disable = octeon_irq_ciu_disable_all, |
927 | .irq_mask = octeon_irq_ciu_disable_local, | ||
928 | .irq_unmask = octeon_irq_ciu_enable, | ||
929 | #ifdef CONFIG_SMP | ||
930 | .irq_set_affinity = octeon_irq_ciu_set_affinity, | ||
931 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
932 | #endif | ||
933 | }; | ||
934 | |||
935 | static struct irq_chip octeon_irq_chip_ciu_edge = { | ||
936 | .name = "CIU", | ||
937 | .irq_enable = octeon_irq_ciu_enable, | ||
938 | .irq_disable = octeon_irq_ciu_disable_all, | ||
768 | .irq_ack = octeon_irq_ciu_ack, | 939 | .irq_ack = octeon_irq_ciu_ack, |
769 | .irq_mask = octeon_irq_ciu_disable_local, | 940 | .irq_mask = octeon_irq_ciu_disable_local, |
770 | .irq_unmask = octeon_irq_ciu_enable, | 941 | .irq_unmask = octeon_irq_ciu_enable, |
@@ -970,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, | |||
970 | unsigned int *out_type) | 1141 | unsigned int *out_type) |
971 | { | 1142 | { |
972 | unsigned int ciu, bit; | 1143 | unsigned int ciu, bit; |
1144 | struct octeon_irq_ciu_domain_data *dd = d->host_data; | ||
973 | 1145 | ||
974 | ciu = intspec[0]; | 1146 | ciu = intspec[0]; |
975 | bit = intspec[1]; | 1147 | bit = intspec[1]; |
976 | 1148 | ||
977 | if (ciu > 1 || bit > 63) | 1149 | if (ciu >= dd->num_sum || bit > 63) |
978 | return -EINVAL; | 1150 | return -EINVAL; |
979 | 1151 | ||
980 | *out_hwirq = (ciu << 6) | bit; | 1152 | *out_hwirq = (ciu << 6) | bit; |
@@ -984,6 +1156,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, | |||
984 | } | 1156 | } |
985 | 1157 | ||
986 | static struct irq_chip *octeon_irq_ciu_chip; | 1158 | static struct irq_chip *octeon_irq_ciu_chip; |
1159 | static struct irq_chip *octeon_irq_ciu_chip_edge; | ||
987 | static struct irq_chip *octeon_irq_gpio_chip; | 1160 | static struct irq_chip *octeon_irq_gpio_chip; |
988 | 1161 | ||
989 | static bool octeon_irq_virq_in_range(unsigned int virq) | 1162 | static bool octeon_irq_virq_in_range(unsigned int virq) |
@@ -999,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq) | |||
999 | static int octeon_irq_ciu_map(struct irq_domain *d, | 1172 | static int octeon_irq_ciu_map(struct irq_domain *d, |
1000 | unsigned int virq, irq_hw_number_t hw) | 1173 | unsigned int virq, irq_hw_number_t hw) |
1001 | { | 1174 | { |
1175 | int rv; | ||
1002 | unsigned int line = hw >> 6; | 1176 | unsigned int line = hw >> 6; |
1003 | unsigned int bit = hw & 63; | 1177 | unsigned int bit = hw & 63; |
1178 | struct octeon_irq_ciu_domain_data *dd = d->host_data; | ||
1004 | 1179 | ||
1005 | if (!octeon_irq_virq_in_range(virq)) | 1180 | if (!octeon_irq_virq_in_range(virq)) |
1006 | return -EINVAL; | 1181 | return -EINVAL; |
@@ -1009,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d, | |||
1009 | if (line == 0 && bit >= 16 && bit <32) | 1184 | if (line == 0 && bit >= 16 && bit <32) |
1010 | return 0; | 1185 | return 0; |
1011 | 1186 | ||
1012 | if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) | 1187 | if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) |
1013 | return -EINVAL; | 1188 | return -EINVAL; |
1014 | 1189 | ||
1015 | if (octeon_irq_ciu_is_edge(line, bit)) | 1190 | if (line == 2) { |
1016 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1191 | if (octeon_irq_ciu_is_edge(line, bit)) |
1017 | octeon_irq_ciu_chip, | 1192 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1018 | handle_edge_irq); | 1193 | &octeon_irq_chip_ciu_sum2_edge, |
1019 | else | 1194 | handle_edge_irq); |
1020 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1195 | else |
1021 | octeon_irq_ciu_chip, | 1196 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1022 | handle_level_irq); | 1197 | &octeon_irq_chip_ciu_sum2, |
1023 | 1198 | handle_level_irq); | |
1024 | return 0; | 1199 | } else { |
1200 | if (octeon_irq_ciu_is_edge(line, bit)) | ||
1201 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, | ||
1202 | octeon_irq_ciu_chip_edge, | ||
1203 | handle_edge_irq); | ||
1204 | else | ||
1205 | rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, | ||
1206 | octeon_irq_ciu_chip, | ||
1207 | handle_level_irq); | ||
1208 | } | ||
1209 | return rv; | ||
1025 | } | 1210 | } |
1026 | 1211 | ||
1027 | static int octeon_irq_gpio_map_common(struct irq_domain *d, | 1212 | static int octeon_irq_gpio_map(struct irq_domain *d, |
1028 | unsigned int virq, irq_hw_number_t hw, | 1213 | unsigned int virq, irq_hw_number_t hw) |
1029 | int line_limit, struct irq_chip *chip) | ||
1030 | { | 1214 | { |
1031 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; | 1215 | struct octeon_irq_gpio_domain_data *gpiod = d->host_data; |
1032 | unsigned int line, bit; | 1216 | unsigned int line, bit; |
1217 | int r; | ||
1033 | 1218 | ||
1034 | if (!octeon_irq_virq_in_range(virq)) | 1219 | if (!octeon_irq_virq_in_range(virq)) |
1035 | return -EINVAL; | 1220 | return -EINVAL; |
1036 | 1221 | ||
1037 | line = (hw + gpiod->base_hwirq) >> 6; | 1222 | line = (hw + gpiod->base_hwirq) >> 6; |
1038 | bit = (hw + gpiod->base_hwirq) & 63; | 1223 | bit = (hw + gpiod->base_hwirq) & 63; |
1039 | if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) | 1224 | if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || |
1225 | octeon_irq_ciu_to_irq[line][bit] != 0) | ||
1040 | return -EINVAL; | 1226 | return -EINVAL; |
1041 | 1227 | ||
1042 | octeon_irq_set_ciu_mapping(virq, line, bit, hw, | 1228 | r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, |
1043 | chip, octeon_irq_handle_gpio); | 1229 | octeon_irq_gpio_chip, octeon_irq_handle_trigger); |
1044 | return 0; | 1230 | return r; |
1045 | } | ||
1046 | |||
1047 | static int octeon_irq_gpio_map(struct irq_domain *d, | ||
1048 | unsigned int virq, irq_hw_number_t hw) | ||
1049 | { | ||
1050 | return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); | ||
1051 | } | 1231 | } |
1052 | 1232 | ||
1053 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { | 1233 | static struct irq_domain_ops octeon_irq_domain_ciu_ops = { |
1054 | .map = octeon_irq_ciu_map, | 1234 | .map = octeon_irq_ciu_map, |
1235 | .unmap = octeon_irq_free_cd, | ||
1055 | .xlate = octeon_irq_ciu_xlat, | 1236 | .xlate = octeon_irq_ciu_xlat, |
1056 | }; | 1237 | }; |
1057 | 1238 | ||
1058 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { | 1239 | static struct irq_domain_ops octeon_irq_domain_gpio_ops = { |
1059 | .map = octeon_irq_gpio_map, | 1240 | .map = octeon_irq_gpio_map, |
1241 | .unmap = octeon_irq_free_cd, | ||
1060 | .xlate = octeon_irq_gpio_xlat, | 1242 | .xlate = octeon_irq_gpio_xlat, |
1061 | }; | 1243 | }; |
1062 | 1244 | ||
@@ -1095,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void) | |||
1095 | } | 1277 | } |
1096 | } | 1278 | } |
1097 | 1279 | ||
1280 | static void octeon_irq_ip4_ciu(void) | ||
1281 | { | ||
1282 | int coreid = cvmx_get_core_num(); | ||
1283 | u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); | ||
1284 | u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); | ||
1285 | |||
1286 | ciu_sum &= ciu_en; | ||
1287 | if (likely(ciu_sum)) { | ||
1288 | int bit = fls64(ciu_sum) - 1; | ||
1289 | int irq = octeon_irq_ciu_to_irq[2][bit]; | ||
1290 | |||
1291 | if (likely(irq)) | ||
1292 | do_IRQ(irq); | ||
1293 | else | ||
1294 | spurious_interrupt(); | ||
1295 | } else { | ||
1296 | spurious_interrupt(); | ||
1297 | } | ||
1298 | } | ||
1299 | |||
1098 | static bool octeon_irq_use_ip4; | 1300 | static bool octeon_irq_use_ip4; |
1099 | 1301 | ||
1100 | static void octeon_irq_local_enable_ip4(void *arg) | 1302 | static void octeon_irq_local_enable_ip4(void *arg) |
@@ -1176,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void) | |||
1176 | 1378 | ||
1177 | /* Enable the CIU lines */ | 1379 | /* Enable the CIU lines */ |
1178 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1380 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1179 | clear_c0_status(STATUSF_IP4); | 1381 | if (octeon_irq_use_ip4) |
1382 | set_c0_status(STATUSF_IP4); | ||
1383 | else | ||
1384 | clear_c0_status(STATUSF_IP4); | ||
1180 | } | 1385 | } |
1181 | 1386 | ||
1182 | static void octeon_irq_setup_secondary_ciu2(void) | 1387 | static void octeon_irq_setup_secondary_ciu2(void) |
@@ -1192,95 +1397,194 @@ static void octeon_irq_setup_secondary_ciu2(void) | |||
1192 | clear_c0_status(STATUSF_IP4); | 1397 | clear_c0_status(STATUSF_IP4); |
1193 | } | 1398 | } |
1194 | 1399 | ||
1195 | static void __init octeon_irq_init_ciu(void) | 1400 | static int __init octeon_irq_init_ciu( |
1401 | struct device_node *ciu_node, struct device_node *parent) | ||
1196 | { | 1402 | { |
1197 | unsigned int i; | 1403 | unsigned int i, r; |
1198 | struct irq_chip *chip; | 1404 | struct irq_chip *chip; |
1405 | struct irq_chip *chip_edge; | ||
1199 | struct irq_chip *chip_mbox; | 1406 | struct irq_chip *chip_mbox; |
1200 | struct irq_chip *chip_wd; | 1407 | struct irq_chip *chip_wd; |
1201 | struct device_node *gpio_node; | ||
1202 | struct device_node *ciu_node; | ||
1203 | struct irq_domain *ciu_domain = NULL; | 1408 | struct irq_domain *ciu_domain = NULL; |
1409 | struct octeon_irq_ciu_domain_data *dd; | ||
1410 | |||
1411 | dd = kzalloc(sizeof(*dd), GFP_KERNEL); | ||
1412 | if (!dd) | ||
1413 | return -ENOMEM; | ||
1204 | 1414 | ||
1205 | octeon_irq_init_ciu_percpu(); | 1415 | octeon_irq_init_ciu_percpu(); |
1206 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; | 1416 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; |
1207 | 1417 | ||
1208 | octeon_irq_ip2 = octeon_irq_ip2_ciu; | 1418 | octeon_irq_ip2 = octeon_irq_ip2_ciu; |
1209 | octeon_irq_ip3 = octeon_irq_ip3_ciu; | 1419 | octeon_irq_ip3 = octeon_irq_ip3_ciu; |
1420 | if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) | ||
1421 | && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { | ||
1422 | octeon_irq_ip4 = octeon_irq_ip4_ciu; | ||
1423 | dd->num_sum = 3; | ||
1424 | octeon_irq_use_ip4 = true; | ||
1425 | } else { | ||
1426 | octeon_irq_ip4 = octeon_irq_ip4_mask; | ||
1427 | dd->num_sum = 2; | ||
1428 | octeon_irq_use_ip4 = false; | ||
1429 | } | ||
1210 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || | 1430 | if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || |
1211 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || | 1431 | OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || |
1212 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || | 1432 | OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || |
1213 | OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | 1433 | OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { |
1214 | chip = &octeon_irq_chip_ciu_v2; | 1434 | chip = &octeon_irq_chip_ciu_v2; |
1435 | chip_edge = &octeon_irq_chip_ciu_v2_edge; | ||
1215 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; | 1436 | chip_mbox = &octeon_irq_chip_ciu_mbox_v2; |
1216 | chip_wd = &octeon_irq_chip_ciu_wd_v2; | 1437 | chip_wd = &octeon_irq_chip_ciu_wd_v2; |
1217 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; | 1438 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; |
1218 | } else { | 1439 | } else { |
1219 | chip = &octeon_irq_chip_ciu; | 1440 | chip = &octeon_irq_chip_ciu; |
1441 | chip_edge = &octeon_irq_chip_ciu_edge; | ||
1220 | chip_mbox = &octeon_irq_chip_ciu_mbox; | 1442 | chip_mbox = &octeon_irq_chip_ciu_mbox; |
1221 | chip_wd = &octeon_irq_chip_ciu_wd; | 1443 | chip_wd = &octeon_irq_chip_ciu_wd; |
1222 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; | 1444 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; |
1223 | } | 1445 | } |
1224 | octeon_irq_ciu_chip = chip; | 1446 | octeon_irq_ciu_chip = chip; |
1225 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 1447 | octeon_irq_ciu_chip_edge = chip_edge; |
1226 | 1448 | ||
1227 | /* Mips internal */ | 1449 | /* Mips internal */ |
1228 | octeon_irq_init_core(); | 1450 | octeon_irq_init_core(); |
1229 | 1451 | ||
1230 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 1452 | ciu_domain = irq_domain_add_tree( |
1231 | if (gpio_node) { | 1453 | ciu_node, &octeon_irq_domain_ciu_ops, dd); |
1232 | struct octeon_irq_gpio_domain_data *gpiod; | 1454 | irq_set_default_host(ciu_domain); |
1233 | |||
1234 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
1235 | if (gpiod) { | ||
1236 | /* gpio domain host_data is the base hwirq number. */ | ||
1237 | gpiod->base_hwirq = 16; | ||
1238 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | ||
1239 | of_node_put(gpio_node); | ||
1240 | } else | ||
1241 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
1242 | } else | ||
1243 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | ||
1244 | |||
1245 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); | ||
1246 | if (ciu_node) { | ||
1247 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); | ||
1248 | irq_set_default_host(ciu_domain); | ||
1249 | of_node_put(ciu_node); | ||
1250 | } else | ||
1251 | panic("Cannot find device node for cavium,octeon-3860-ciu."); | ||
1252 | 1455 | ||
1253 | /* CIU_0 */ | 1456 | /* CIU_0 */ |
1254 | for (i = 0; i < 16; i++) | 1457 | for (i = 0; i < 16; i++) { |
1255 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | 1458 | r = octeon_irq_force_ciu_mapping( |
1459 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); | ||
1460 | if (r) | ||
1461 | goto err; | ||
1462 | } | ||
1463 | |||
1464 | r = octeon_irq_set_ciu_mapping( | ||
1465 | OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | ||
1466 | if (r) | ||
1467 | goto err; | ||
1468 | r = octeon_irq_set_ciu_mapping( | ||
1469 | OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | ||
1470 | if (r) | ||
1471 | goto err; | ||
1472 | |||
1473 | for (i = 0; i < 4; i++) { | ||
1474 | r = octeon_irq_force_ciu_mapping( | ||
1475 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | ||
1476 | if (r) | ||
1477 | goto err; | ||
1478 | } | ||
1479 | for (i = 0; i < 4; i++) { | ||
1480 | r = octeon_irq_force_ciu_mapping( | ||
1481 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | ||
1482 | if (r) | ||
1483 | goto err; | ||
1484 | } | ||
1256 | 1485 | ||
1257 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); | 1486 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); |
1258 | octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); | 1487 | if (r) |
1488 | goto err; | ||
1259 | 1489 | ||
1260 | for (i = 0; i < 4; i++) | 1490 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); |
1261 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); | 1491 | if (r) |
1262 | for (i = 0; i < 4; i++) | 1492 | goto err; |
1263 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); | ||
1264 | 1493 | ||
1265 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); | 1494 | for (i = 0; i < 4; i++) { |
1266 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); | 1495 | r = octeon_irq_force_ciu_mapping( |
1267 | for (i = 0; i < 4; i++) | 1496 | ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); |
1268 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); | 1497 | if (r) |
1498 | goto err; | ||
1499 | } | ||
1500 | |||
1501 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | ||
1502 | if (r) | ||
1503 | goto err; | ||
1269 | 1504 | ||
1270 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); | 1505 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); |
1271 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); | 1506 | if (r) |
1507 | goto err; | ||
1272 | 1508 | ||
1273 | /* CIU_1 */ | 1509 | /* CIU_1 */ |
1274 | for (i = 0; i < 16; i++) | 1510 | for (i = 0; i < 16; i++) { |
1275 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); | 1511 | r = octeon_irq_set_ciu_mapping( |
1512 | i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, | ||
1513 | handle_level_irq); | ||
1514 | if (r) | ||
1515 | goto err; | ||
1516 | } | ||
1276 | 1517 | ||
1277 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); | 1518 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); |
1519 | if (r) | ||
1520 | goto err; | ||
1278 | 1521 | ||
1279 | /* Enable the CIU lines */ | 1522 | /* Enable the CIU lines */ |
1280 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 1523 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1281 | clear_c0_status(STATUSF_IP4); | 1524 | if (octeon_irq_use_ip4) |
1525 | set_c0_status(STATUSF_IP4); | ||
1526 | else | ||
1527 | clear_c0_status(STATUSF_IP4); | ||
1528 | |||
1529 | return 0; | ||
1530 | err: | ||
1531 | return r; | ||
1282 | } | 1532 | } |
1283 | 1533 | ||
1534 | static int __init octeon_irq_init_gpio( | ||
1535 | struct device_node *gpio_node, struct device_node *parent) | ||
1536 | { | ||
1537 | struct octeon_irq_gpio_domain_data *gpiod; | ||
1538 | u32 interrupt_cells; | ||
1539 | unsigned int base_hwirq; | ||
1540 | int r; | ||
1541 | |||
1542 | r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); | ||
1543 | if (r) | ||
1544 | return r; | ||
1545 | |||
1546 | if (interrupt_cells == 1) { | ||
1547 | u32 v; | ||
1548 | |||
1549 | r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); | ||
1550 | if (r) { | ||
1551 | pr_warn("No \"interrupts\" property.\n"); | ||
1552 | return r; | ||
1553 | } | ||
1554 | base_hwirq = v; | ||
1555 | } else if (interrupt_cells == 2) { | ||
1556 | u32 v0, v1; | ||
1557 | |||
1558 | r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); | ||
1559 | if (r) { | ||
1560 | pr_warn("No \"interrupts\" property.\n"); | ||
1561 | return r; | ||
1562 | } | ||
1563 | r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); | ||
1564 | if (r) { | ||
1565 | pr_warn("No \"interrupts\" property.\n"); | ||
1566 | return r; | ||
1567 | } | ||
1568 | base_hwirq = (v0 << 6) | v1; | ||
1569 | } else { | ||
1570 | pr_warn("Bad \"#interrupt-cells\" property: %u\n", | ||
1571 | interrupt_cells); | ||
1572 | return -EINVAL; | ||
1573 | } | ||
1574 | |||
1575 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
1576 | if (gpiod) { | ||
1577 | /* gpio domain host_data is the base hwirq number. */ | ||
1578 | gpiod->base_hwirq = base_hwirq; | ||
1579 | irq_domain_add_linear( | ||
1580 | gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); | ||
1581 | } else { | ||
1582 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
1583 | return -ENOMEM; | ||
1584 | } | ||
1585 | |||
1586 | return 0; | ||
1587 | } | ||
1284 | /* | 1588 | /* |
1285 | * Watchdog interrupts are special. They are associated with a single | 1589 | * Watchdog interrupts are special. They are associated with a single |
1286 | * core, so we hardwire the affinity to that core. | 1590 | * core, so we hardwire the affinity to that core. |
@@ -1290,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data) | |||
1290 | u64 mask; | 1594 | u64 mask; |
1291 | u64 en_addr; | 1595 | u64 en_addr; |
1292 | int coreid = data->irq - OCTEON_IRQ_WDOG0; | 1596 | int coreid = data->irq - OCTEON_IRQ_WDOG0; |
1293 | union octeon_ciu_chip_data cd; | 1597 | struct octeon_ciu_chip_data *cd; |
1294 | 1598 | ||
1295 | cd.p = irq_data_get_irq_chip_data(data); | 1599 | cd = irq_data_get_irq_chip_data(data); |
1296 | mask = 1ull << (cd.s.bit); | 1600 | mask = 1ull << (cd->bit); |
1297 | 1601 | ||
1298 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1602 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1603 | (0x1000ull * cd->line); | ||
1299 | cvmx_write_csr(en_addr, mask); | 1604 | cvmx_write_csr(en_addr, mask); |
1300 | 1605 | ||
1301 | } | 1606 | } |
@@ -1306,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data) | |||
1306 | u64 en_addr; | 1611 | u64 en_addr; |
1307 | int cpu = next_cpu_for_irq(data); | 1612 | int cpu = next_cpu_for_irq(data); |
1308 | int coreid = octeon_coreid_for_cpu(cpu); | 1613 | int coreid = octeon_coreid_for_cpu(cpu); |
1309 | union octeon_ciu_chip_data cd; | 1614 | struct octeon_ciu_chip_data *cd; |
1310 | 1615 | ||
1311 | cd.p = irq_data_get_irq_chip_data(data); | 1616 | cd = irq_data_get_irq_chip_data(data); |
1312 | mask = 1ull << (cd.s.bit); | 1617 | mask = 1ull << (cd->bit); |
1313 | 1618 | ||
1314 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1619 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1620 | (0x1000ull * cd->line); | ||
1315 | cvmx_write_csr(en_addr, mask); | 1621 | cvmx_write_csr(en_addr, mask); |
1316 | } | 1622 | } |
1317 | 1623 | ||
@@ -1320,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data) | |||
1320 | u64 mask; | 1626 | u64 mask; |
1321 | u64 en_addr; | 1627 | u64 en_addr; |
1322 | int coreid = cvmx_get_core_num(); | 1628 | int coreid = cvmx_get_core_num(); |
1323 | union octeon_ciu_chip_data cd; | 1629 | struct octeon_ciu_chip_data *cd; |
1324 | 1630 | ||
1325 | cd.p = irq_data_get_irq_chip_data(data); | 1631 | cd = irq_data_get_irq_chip_data(data); |
1326 | mask = 1ull << (cd.s.bit); | 1632 | mask = 1ull << (cd->bit); |
1327 | 1633 | ||
1328 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); | 1634 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + |
1635 | (0x1000ull * cd->line); | ||
1329 | cvmx_write_csr(en_addr, mask); | 1636 | cvmx_write_csr(en_addr, mask); |
1330 | 1637 | ||
1331 | } | 1638 | } |
@@ -1335,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data) | |||
1335 | u64 mask; | 1642 | u64 mask; |
1336 | u64 en_addr; | 1643 | u64 en_addr; |
1337 | int coreid = cvmx_get_core_num(); | 1644 | int coreid = cvmx_get_core_num(); |
1338 | union octeon_ciu_chip_data cd; | 1645 | struct octeon_ciu_chip_data *cd; |
1339 | 1646 | ||
1340 | cd.p = irq_data_get_irq_chip_data(data); | 1647 | cd = irq_data_get_irq_chip_data(data); |
1341 | mask = 1ull << (cd.s.bit); | 1648 | mask = 1ull << (cd->bit); |
1342 | 1649 | ||
1343 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); | 1650 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + |
1651 | (0x1000ull * cd->line); | ||
1344 | cvmx_write_csr(en_addr, mask); | 1652 | cvmx_write_csr(en_addr, mask); |
1345 | 1653 | ||
1346 | } | 1654 | } |
@@ -1350,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data) | |||
1350 | u64 mask; | 1658 | u64 mask; |
1351 | u64 en_addr; | 1659 | u64 en_addr; |
1352 | int coreid = cvmx_get_core_num(); | 1660 | int coreid = cvmx_get_core_num(); |
1353 | union octeon_ciu_chip_data cd; | 1661 | struct octeon_ciu_chip_data *cd; |
1354 | 1662 | ||
1355 | cd.p = irq_data_get_irq_chip_data(data); | 1663 | cd = irq_data_get_irq_chip_data(data); |
1356 | mask = 1ull << (cd.s.bit); | 1664 | mask = 1ull << (cd->bit); |
1357 | 1665 | ||
1358 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); | 1666 | en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); |
1359 | cvmx_write_csr(en_addr, mask); | 1667 | cvmx_write_csr(en_addr, mask); |
1360 | 1668 | ||
1361 | } | 1669 | } |
@@ -1364,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data) | |||
1364 | { | 1672 | { |
1365 | int cpu; | 1673 | int cpu; |
1366 | u64 mask; | 1674 | u64 mask; |
1367 | union octeon_ciu_chip_data cd; | 1675 | struct octeon_ciu_chip_data *cd; |
1368 | 1676 | ||
1369 | cd.p = irq_data_get_irq_chip_data(data); | 1677 | cd = irq_data_get_irq_chip_data(data); |
1370 | mask = 1ull << (cd.s.bit); | 1678 | mask = 1ull << (cd->bit); |
1371 | 1679 | ||
1372 | for_each_online_cpu(cpu) { | 1680 | for_each_online_cpu(cpu) { |
1373 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1681 | u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
1682 | octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); | ||
1374 | cvmx_write_csr(en_addr, mask); | 1683 | cvmx_write_csr(en_addr, mask); |
1375 | } | 1684 | } |
1376 | } | 1685 | } |
@@ -1383,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) | |||
1383 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1692 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1384 | 1693 | ||
1385 | for_each_online_cpu(cpu) { | 1694 | for_each_online_cpu(cpu) { |
1386 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); | 1695 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( |
1696 | octeon_coreid_for_cpu(cpu)); | ||
1387 | cvmx_write_csr(en_addr, mask); | 1697 | cvmx_write_csr(en_addr, mask); |
1388 | } | 1698 | } |
1389 | } | 1699 | } |
@@ -1396,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) | |||
1396 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); | 1706 | mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); |
1397 | 1707 | ||
1398 | for_each_online_cpu(cpu) { | 1708 | for_each_online_cpu(cpu) { |
1399 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); | 1709 | u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( |
1710 | octeon_coreid_for_cpu(cpu)); | ||
1400 | cvmx_write_csr(en_addr, mask); | 1711 | cvmx_write_csr(en_addr, mask); |
1401 | } | 1712 | } |
1402 | } | 1713 | } |
@@ -1430,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data, | |||
1430 | int cpu; | 1741 | int cpu; |
1431 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); | 1742 | bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); |
1432 | u64 mask; | 1743 | u64 mask; |
1433 | union octeon_ciu_chip_data cd; | 1744 | struct octeon_ciu_chip_data *cd; |
1434 | 1745 | ||
1435 | if (!enable_one) | 1746 | if (!enable_one) |
1436 | return 0; | 1747 | return 0; |
1437 | 1748 | ||
1438 | cd.p = irq_data_get_irq_chip_data(data); | 1749 | cd = irq_data_get_irq_chip_data(data); |
1439 | mask = 1ull << cd.s.bit; | 1750 | mask = 1ull << cd->bit; |
1440 | 1751 | ||
1441 | for_each_online_cpu(cpu) { | 1752 | for_each_online_cpu(cpu) { |
1442 | u64 en_addr; | 1753 | u64 en_addr; |
1443 | if (cpumask_test_cpu(cpu, dest) && enable_one) { | 1754 | if (cpumask_test_cpu(cpu, dest) && enable_one) { |
1444 | enable_one = false; | 1755 | enable_one = false; |
1445 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1756 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( |
1757 | octeon_coreid_for_cpu(cpu)) + | ||
1758 | (0x1000ull * cd->line); | ||
1446 | } else { | 1759 | } else { |
1447 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); | 1760 | en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( |
1761 | octeon_coreid_for_cpu(cpu)) + | ||
1762 | (0x1000ull * cd->line); | ||
1448 | } | 1763 | } |
1449 | cvmx_write_csr(en_addr, mask); | 1764 | cvmx_write_csr(en_addr, mask); |
1450 | } | 1765 | } |
@@ -1461,10 +1776,11 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) | |||
1461 | 1776 | ||
1462 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) | 1777 | static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) |
1463 | { | 1778 | { |
1464 | union octeon_ciu_chip_data cd; | 1779 | struct octeon_ciu_chip_data *cd; |
1465 | cd.p = irq_data_get_irq_chip_data(data); | 1780 | |
1781 | cd = irq_data_get_irq_chip_data(data); | ||
1466 | 1782 | ||
1467 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); | 1783 | cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); |
1468 | 1784 | ||
1469 | octeon_irq_ciu2_disable_all(data); | 1785 | octeon_irq_ciu2_disable_all(data); |
1470 | } | 1786 | } |
@@ -1473,6 +1789,18 @@ static struct irq_chip octeon_irq_chip_ciu2 = { | |||
1473 | .name = "CIU2-E", | 1789 | .name = "CIU2-E", |
1474 | .irq_enable = octeon_irq_ciu2_enable, | 1790 | .irq_enable = octeon_irq_ciu2_enable, |
1475 | .irq_disable = octeon_irq_ciu2_disable_all, | 1791 | .irq_disable = octeon_irq_ciu2_disable_all, |
1792 | .irq_mask = octeon_irq_ciu2_disable_local, | ||
1793 | .irq_unmask = octeon_irq_ciu2_enable, | ||
1794 | #ifdef CONFIG_SMP | ||
1795 | .irq_set_affinity = octeon_irq_ciu2_set_affinity, | ||
1796 | .irq_cpu_offline = octeon_irq_cpu_offline_ciu, | ||
1797 | #endif | ||
1798 | }; | ||
1799 | |||
1800 | static struct irq_chip octeon_irq_chip_ciu2_edge = { | ||
1801 | .name = "CIU2-E", | ||
1802 | .irq_enable = octeon_irq_ciu2_enable, | ||
1803 | .irq_disable = octeon_irq_ciu2_disable_all, | ||
1476 | .irq_ack = octeon_irq_ciu2_ack, | 1804 | .irq_ack = octeon_irq_ciu2_ack, |
1477 | .irq_mask = octeon_irq_ciu2_disable_local, | 1805 | .irq_mask = octeon_irq_ciu2_disable_local, |
1478 | .irq_unmask = octeon_irq_ciu2_enable, | 1806 | .irq_unmask = octeon_irq_ciu2_enable, |
@@ -1582,7 +1910,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, | |||
1582 | 1910 | ||
1583 | if (octeon_irq_ciu2_is_edge(line, bit)) | 1911 | if (octeon_irq_ciu2_is_edge(line, bit)) |
1584 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1912 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
1585 | &octeon_irq_chip_ciu2, | 1913 | &octeon_irq_chip_ciu2_edge, |
1586 | handle_edge_irq); | 1914 | handle_edge_irq); |
1587 | else | 1915 | else |
1588 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, | 1916 | octeon_irq_set_ciu_mapping(virq, line, bit, 0, |
@@ -1591,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, | |||
1591 | 1919 | ||
1592 | return 0; | 1920 | return 0; |
1593 | } | 1921 | } |
1594 | static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, | ||
1595 | unsigned int virq, irq_hw_number_t hw) | ||
1596 | { | ||
1597 | return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); | ||
1598 | } | ||
1599 | 1922 | ||
1600 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { | 1923 | static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { |
1601 | .map = octeon_irq_ciu2_map, | 1924 | .map = octeon_irq_ciu2_map, |
1925 | .unmap = octeon_irq_free_cd, | ||
1602 | .xlate = octeon_irq_ciu2_xlat, | 1926 | .xlate = octeon_irq_ciu2_xlat, |
1603 | }; | 1927 | }; |
1604 | 1928 | ||
1605 | static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { | ||
1606 | .map = octeon_irq_ciu2_gpio_map, | ||
1607 | .xlate = octeon_irq_gpio_xlat, | ||
1608 | }; | ||
1609 | |||
1610 | static void octeon_irq_ciu2(void) | 1929 | static void octeon_irq_ciu2(void) |
1611 | { | 1930 | { |
1612 | int line; | 1931 | int line; |
@@ -1674,16 +1993,16 @@ out: | |||
1674 | return; | 1993 | return; |
1675 | } | 1994 | } |
1676 | 1995 | ||
1677 | static void __init octeon_irq_init_ciu2(void) | 1996 | static int __init octeon_irq_init_ciu2( |
1997 | struct device_node *ciu_node, struct device_node *parent) | ||
1678 | { | 1998 | { |
1679 | unsigned int i; | 1999 | unsigned int i, r; |
1680 | struct device_node *gpio_node; | ||
1681 | struct device_node *ciu_node; | ||
1682 | struct irq_domain *ciu_domain = NULL; | 2000 | struct irq_domain *ciu_domain = NULL; |
1683 | 2001 | ||
1684 | octeon_irq_init_ciu2_percpu(); | 2002 | octeon_irq_init_ciu2_percpu(); |
1685 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; | 2003 | octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; |
1686 | 2004 | ||
2005 | octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; | ||
1687 | octeon_irq_ip2 = octeon_irq_ciu2; | 2006 | octeon_irq_ip2 = octeon_irq_ciu2; |
1688 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; | 2007 | octeon_irq_ip3 = octeon_irq_ciu2_mbox; |
1689 | octeon_irq_ip4 = octeon_irq_ip4_mask; | 2008 | octeon_irq_ip4 = octeon_irq_ip4_mask; |
@@ -1691,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void) | |||
1691 | /* Mips internal */ | 2010 | /* Mips internal */ |
1692 | octeon_irq_init_core(); | 2011 | octeon_irq_init_core(); |
1693 | 2012 | ||
1694 | gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); | 2013 | ciu_domain = irq_domain_add_tree( |
1695 | if (gpio_node) { | 2014 | ciu_node, &octeon_irq_domain_ciu2_ops, NULL); |
1696 | struct octeon_irq_gpio_domain_data *gpiod; | 2015 | irq_set_default_host(ciu_domain); |
1697 | |||
1698 | gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); | ||
1699 | if (gpiod) { | ||
1700 | /* gpio domain host_data is the base hwirq number. */ | ||
1701 | gpiod->base_hwirq = 7 << 6; | ||
1702 | irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); | ||
1703 | of_node_put(gpio_node); | ||
1704 | } else | ||
1705 | pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); | ||
1706 | } else | ||
1707 | pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); | ||
1708 | |||
1709 | ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); | ||
1710 | if (ciu_node) { | ||
1711 | ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); | ||
1712 | irq_set_default_host(ciu_domain); | ||
1713 | of_node_put(ciu_node); | ||
1714 | } else | ||
1715 | panic("Cannot find device node for cavium,octeon-6880-ciu2."); | ||
1716 | 2016 | ||
1717 | /* CUI2 */ | 2017 | /* CUI2 */ |
1718 | for (i = 0; i < 64; i++) | 2018 | for (i = 0; i < 64; i++) { |
1719 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | 2019 | r = octeon_irq_force_ciu_mapping( |
2020 | ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); | ||
2021 | if (r) | ||
2022 | goto err; | ||
2023 | } | ||
1720 | 2024 | ||
1721 | for (i = 0; i < 32; i++) | 2025 | for (i = 0; i < 32; i++) { |
1722 | octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, | 2026 | r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, |
1723 | &octeon_irq_chip_ciu2_wd, handle_level_irq); | 2027 | &octeon_irq_chip_ciu2_wd, handle_level_irq); |
2028 | if (r) | ||
2029 | goto err; | ||
2030 | } | ||
1724 | 2031 | ||
1725 | for (i = 0; i < 4; i++) | 2032 | for (i = 0; i < 4; i++) { |
1726 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | 2033 | r = octeon_irq_force_ciu_mapping( |
2034 | ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); | ||
2035 | if (r) | ||
2036 | goto err; | ||
2037 | } | ||
1727 | 2038 | ||
1728 | octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); | 2039 | r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); |
2040 | if (r) | ||
2041 | goto err; | ||
1729 | 2042 | ||
1730 | for (i = 0; i < 4; i++) | 2043 | for (i = 0; i < 4; i++) { |
1731 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | 2044 | r = octeon_irq_force_ciu_mapping( |
2045 | ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); | ||
2046 | if (r) | ||
2047 | goto err; | ||
2048 | } | ||
1732 | 2049 | ||
1733 | for (i = 0; i < 4; i++) | 2050 | for (i = 0; i < 4; i++) { |
1734 | octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | 2051 | r = octeon_irq_force_ciu_mapping( |
2052 | ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); | ||
2053 | if (r) | ||
2054 | goto err; | ||
2055 | } | ||
1735 | 2056 | ||
1736 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 2057 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
1737 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); | 2058 | irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); |
@@ -1741,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void) | |||
1741 | /* Enable the CIU lines */ | 2062 | /* Enable the CIU lines */ |
1742 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); | 2063 | set_c0_status(STATUSF_IP3 | STATUSF_IP2); |
1743 | clear_c0_status(STATUSF_IP4); | 2064 | clear_c0_status(STATUSF_IP4); |
2065 | return 0; | ||
2066 | err: | ||
2067 | return r; | ||
2068 | } | ||
2069 | |||
2070 | struct octeon_irq_cib_host_data { | ||
2071 | raw_spinlock_t lock; | ||
2072 | u64 raw_reg; | ||
2073 | u64 en_reg; | ||
2074 | int max_bits; | ||
2075 | }; | ||
2076 | |||
2077 | struct octeon_irq_cib_chip_data { | ||
2078 | struct octeon_irq_cib_host_data *host_data; | ||
2079 | int bit; | ||
2080 | }; | ||
2081 | |||
2082 | static void octeon_irq_cib_enable(struct irq_data *data) | ||
2083 | { | ||
2084 | unsigned long flags; | ||
2085 | u64 en; | ||
2086 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
2087 | struct octeon_irq_cib_host_data *host_data = cd->host_data; | ||
2088 | |||
2089 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
2090 | en = cvmx_read_csr(host_data->en_reg); | ||
2091 | en |= 1ull << cd->bit; | ||
2092 | cvmx_write_csr(host_data->en_reg, en); | ||
2093 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
2094 | } | ||
2095 | |||
2096 | static void octeon_irq_cib_disable(struct irq_data *data) | ||
2097 | { | ||
2098 | unsigned long flags; | ||
2099 | u64 en; | ||
2100 | struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); | ||
2101 | struct octeon_irq_cib_host_data *host_data = cd->host_data; | ||
2102 | |||
2103 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
2104 | en = cvmx_read_csr(host_data->en_reg); | ||
2105 | en &= ~(1ull << cd->bit); | ||
2106 | cvmx_write_csr(host_data->en_reg, en); | ||
2107 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
2108 | } | ||
2109 | |||
2110 | static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) | ||
2111 | { | ||
2112 | irqd_set_trigger_type(data, t); | ||
2113 | return IRQ_SET_MASK_OK; | ||
2114 | } | ||
2115 | |||
2116 | static struct irq_chip octeon_irq_chip_cib = { | ||
2117 | .name = "CIB", | ||
2118 | .irq_enable = octeon_irq_cib_enable, | ||
2119 | .irq_disable = octeon_irq_cib_disable, | ||
2120 | .irq_mask = octeon_irq_cib_disable, | ||
2121 | .irq_unmask = octeon_irq_cib_enable, | ||
2122 | .irq_set_type = octeon_irq_cib_set_type, | ||
2123 | }; | ||
2124 | |||
2125 | static int octeon_irq_cib_xlat(struct irq_domain *d, | ||
2126 | struct device_node *node, | ||
2127 | const u32 *intspec, | ||
2128 | unsigned int intsize, | ||
2129 | unsigned long *out_hwirq, | ||
2130 | unsigned int *out_type) | ||
2131 | { | ||
2132 | unsigned int type = 0; | ||
2133 | |||
2134 | if (intsize == 2) | ||
2135 | type = intspec[1]; | ||
2136 | |||
2137 | switch (type) { | ||
2138 | case 0: /* unofficial value, but we might as well let it work. */ | ||
2139 | case 4: /* official value for level triggering. */ | ||
2140 | *out_type = IRQ_TYPE_LEVEL_HIGH; | ||
2141 | break; | ||
2142 | case 1: /* official value for edge triggering. */ | ||
2143 | *out_type = IRQ_TYPE_EDGE_RISING; | ||
2144 | break; | ||
2145 | default: /* Nothing else is acceptable. */ | ||
2146 | return -EINVAL; | ||
2147 | } | ||
2148 | |||
2149 | *out_hwirq = intspec[0]; | ||
2150 | |||
2151 | return 0; | ||
2152 | } | ||
2153 | |||
2154 | static int octeon_irq_cib_map(struct irq_domain *d, | ||
2155 | unsigned int virq, irq_hw_number_t hw) | ||
2156 | { | ||
2157 | struct octeon_irq_cib_host_data *host_data = d->host_data; | ||
2158 | struct octeon_irq_cib_chip_data *cd; | ||
2159 | |||
2160 | if (hw >= host_data->max_bits) { | ||
2161 | pr_err("ERROR: %s mapping %u is to big!\n", | ||
2162 | d->of_node->name, (unsigned)hw); | ||
2163 | return -EINVAL; | ||
2164 | } | ||
2165 | |||
2166 | cd = kzalloc(sizeof(*cd), GFP_KERNEL); | ||
2167 | cd->host_data = host_data; | ||
2168 | cd->bit = hw; | ||
2169 | |||
2170 | irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, | ||
2171 | handle_simple_irq); | ||
2172 | irq_set_chip_data(virq, cd); | ||
2173 | return 0; | ||
1744 | } | 2174 | } |
1745 | 2175 | ||
2176 | static struct irq_domain_ops octeon_irq_domain_cib_ops = { | ||
2177 | .map = octeon_irq_cib_map, | ||
2178 | .unmap = octeon_irq_free_cd, | ||
2179 | .xlate = octeon_irq_cib_xlat, | ||
2180 | }; | ||
2181 | |||
2182 | /* Chain to real handler. */ | ||
2183 | static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) | ||
2184 | { | ||
2185 | u64 en; | ||
2186 | u64 raw; | ||
2187 | u64 bits; | ||
2188 | int i; | ||
2189 | int irq; | ||
2190 | struct irq_domain *cib_domain = data; | ||
2191 | struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; | ||
2192 | |||
2193 | en = cvmx_read_csr(host_data->en_reg); | ||
2194 | raw = cvmx_read_csr(host_data->raw_reg); | ||
2195 | |||
2196 | bits = en & raw; | ||
2197 | |||
2198 | for (i = 0; i < host_data->max_bits; i++) { | ||
2199 | if ((bits & 1ull << i) == 0) | ||
2200 | continue; | ||
2201 | irq = irq_find_mapping(cib_domain, i); | ||
2202 | if (!irq) { | ||
2203 | unsigned long flags; | ||
2204 | |||
2205 | pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", | ||
2206 | i, host_data->raw_reg); | ||
2207 | raw_spin_lock_irqsave(&host_data->lock, flags); | ||
2208 | en = cvmx_read_csr(host_data->en_reg); | ||
2209 | en &= ~(1ull << i); | ||
2210 | cvmx_write_csr(host_data->en_reg, en); | ||
2211 | cvmx_write_csr(host_data->raw_reg, 1ull << i); | ||
2212 | raw_spin_unlock_irqrestore(&host_data->lock, flags); | ||
2213 | } else { | ||
2214 | struct irq_desc *desc = irq_to_desc(irq); | ||
2215 | struct irq_data *irq_data = irq_desc_get_irq_data(desc); | ||
2216 | /* If edge, acknowledge the bit we will be sending. */ | ||
2217 | if (irqd_get_trigger_type(irq_data) & | ||
2218 | IRQ_TYPE_EDGE_BOTH) | ||
2219 | cvmx_write_csr(host_data->raw_reg, 1ull << i); | ||
2220 | generic_handle_irq_desc(irq, desc); | ||
2221 | } | ||
2222 | } | ||
2223 | |||
2224 | return IRQ_HANDLED; | ||
2225 | } | ||
2226 | |||
2227 | static int __init octeon_irq_init_cib(struct device_node *ciu_node, | ||
2228 | struct device_node *parent) | ||
2229 | { | ||
2230 | const __be32 *addr; | ||
2231 | u32 val; | ||
2232 | struct octeon_irq_cib_host_data *host_data; | ||
2233 | int parent_irq; | ||
2234 | int r; | ||
2235 | struct irq_domain *cib_domain; | ||
2236 | |||
2237 | parent_irq = irq_of_parse_and_map(ciu_node, 0); | ||
2238 | if (!parent_irq) { | ||
2239 | pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", | ||
2240 | ciu_node->name); | ||
2241 | return -EINVAL; | ||
2242 | } | ||
2243 | |||
2244 | host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); | ||
2245 | raw_spin_lock_init(&host_data->lock); | ||
2246 | |||
2247 | addr = of_get_address(ciu_node, 0, NULL, NULL); | ||
2248 | if (!addr) { | ||
2249 | pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); | ||
2250 | return -EINVAL; | ||
2251 | } | ||
2252 | host_data->raw_reg = (u64)phys_to_virt( | ||
2253 | of_translate_address(ciu_node, addr)); | ||
2254 | |||
2255 | addr = of_get_address(ciu_node, 1, NULL, NULL); | ||
2256 | if (!addr) { | ||
2257 | pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); | ||
2258 | return -EINVAL; | ||
2259 | } | ||
2260 | host_data->en_reg = (u64)phys_to_virt( | ||
2261 | of_translate_address(ciu_node, addr)); | ||
2262 | |||
2263 | r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); | ||
2264 | if (r) { | ||
2265 | pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", | ||
2266 | ciu_node->name); | ||
2267 | return r; | ||
2268 | } | ||
2269 | host_data->max_bits = val; | ||
2270 | |||
2271 | cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, | ||
2272 | &octeon_irq_domain_cib_ops, | ||
2273 | host_data); | ||
2274 | if (!cib_domain) { | ||
2275 | pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); | ||
2276 | return -ENOMEM; | ||
2277 | } | ||
2278 | |||
2279 | cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ | ||
2280 | cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ | ||
2281 | |||
2282 | r = request_irq(parent_irq, octeon_irq_cib_handler, | ||
2283 | IRQF_NO_THREAD, "cib", cib_domain); | ||
2284 | if (r) { | ||
2285 | pr_err("request_irq cib failed %d\n", r); | ||
2286 | return r; | ||
2287 | } | ||
2288 | pr_info("CIB interrupt controller probed: %llx %d\n", | ||
2289 | host_data->raw_reg, host_data->max_bits); | ||
2290 | return 0; | ||
2291 | } | ||
2292 | |||
2293 | static struct of_device_id ciu_types[] __initdata = { | ||
2294 | {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, | ||
2295 | {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, | ||
2296 | {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, | ||
2297 | {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, | ||
2298 | {} | ||
2299 | }; | ||
2300 | |||
1746 | void __init arch_init_irq(void) | 2301 | void __init arch_init_irq(void) |
1747 | { | 2302 | { |
1748 | #ifdef CONFIG_SMP | 2303 | #ifdef CONFIG_SMP |
@@ -1750,10 +2305,7 @@ void __init arch_init_irq(void) | |||
1750 | cpumask_clear(irq_default_affinity); | 2305 | cpumask_clear(irq_default_affinity); |
1751 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); | 2306 | cpumask_set_cpu(smp_processor_id(), irq_default_affinity); |
1752 | #endif | 2307 | #endif |
1753 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) | 2308 | of_irq_init(ciu_types); |
1754 | octeon_irq_init_ciu2(); | ||
1755 | else | ||
1756 | octeon_irq_init_ciu(); | ||
1757 | } | 2309 | } |
1758 | 2310 | ||
1759 | asmlinkage void plat_irq_dispatch(void) | 2311 | asmlinkage void plat_irq_dispatch(void) |
@@ -1767,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void) | |||
1767 | cop0_cause &= cop0_status; | 2319 | cop0_cause &= cop0_status; |
1768 | cop0_cause &= ST0_IM; | 2320 | cop0_cause &= ST0_IM; |
1769 | 2321 | ||
1770 | if (unlikely(cop0_cause & STATUSF_IP2)) | 2322 | if (cop0_cause & STATUSF_IP2) |
1771 | octeon_irq_ip2(); | 2323 | octeon_irq_ip2(); |
1772 | else if (unlikely(cop0_cause & STATUSF_IP3)) | 2324 | else if (cop0_cause & STATUSF_IP3) |
1773 | octeon_irq_ip3(); | 2325 | octeon_irq_ip3(); |
1774 | else if (unlikely(cop0_cause & STATUSF_IP4)) | 2326 | else if (cop0_cause & STATUSF_IP4) |
1775 | octeon_irq_ip4(); | 2327 | octeon_irq_ip4(); |
1776 | else if (likely(cop0_cause)) | 2328 | else if (cop0_cause) |
1777 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); | 2329 | do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); |
1778 | else | 2330 | else |
1779 | break; | 2331 | break; |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 94f888d3384e..a42110e7edbc 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/octeon/octeon.h> | 41 | #include <asm/octeon/octeon.h> |
42 | #include <asm/octeon/pci-octeon.h> | 42 | #include <asm/octeon/pci-octeon.h> |
43 | #include <asm/octeon/cvmx-mio-defs.h> | 43 | #include <asm/octeon/cvmx-mio-defs.h> |
44 | #include <asm/octeon/cvmx-rst-defs.h> | ||
44 | 45 | ||
45 | extern struct plat_smp_ops octeon_smp_ops; | 46 | extern struct plat_smp_ops octeon_smp_ops; |
46 | 47 | ||
@@ -579,12 +580,10 @@ void octeon_user_io_init(void) | |||
579 | /* R/W If set, CVMSEG is available for loads/stores in user | 580 | /* R/W If set, CVMSEG is available for loads/stores in user |
580 | * mode. */ | 581 | * mode. */ |
581 | cvmmemctl.s.cvmsegenau = 0; | 582 | cvmmemctl.s.cvmsegenau = 0; |
582 | /* R/W Size of local memory in cache blocks, 54 (6912 bytes) | ||
583 | * is max legal value. */ | ||
584 | cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; | ||
585 | 583 | ||
586 | write_c0_cvmmemctl(cvmmemctl.u64); | 584 | write_c0_cvmmemctl(cvmmemctl.u64); |
587 | 585 | ||
586 | /* Setup of CVMSEG is done in kernel-entry-init.h */ | ||
588 | if (smp_processor_id() == 0) | 587 | if (smp_processor_id() == 0) |
589 | pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", | 588 | pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", |
590 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, | 589 | CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, |
@@ -615,6 +614,7 @@ void __init prom_init(void) | |||
615 | const char *arg; | 614 | const char *arg; |
616 | char *p; | 615 | char *p; |
617 | int i; | 616 | int i; |
617 | u64 t; | ||
618 | int argc; | 618 | int argc; |
619 | #ifdef CONFIG_CAVIUM_RESERVE32 | 619 | #ifdef CONFIG_CAVIUM_RESERVE32 |
620 | int64_t addr = -1; | 620 | int64_t addr = -1; |
@@ -654,15 +654,56 @@ void __init prom_init(void) | |||
654 | sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; | 654 | sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; |
655 | sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; | 655 | sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; |
656 | 656 | ||
657 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { | 657 | if (OCTEON_IS_OCTEON2()) { |
658 | /* I/O clock runs at a different rate than the CPU. */ | 658 | /* I/O clock runs at a different rate than the CPU. */ |
659 | union cvmx_mio_rst_boot rst_boot; | 659 | union cvmx_mio_rst_boot rst_boot; |
660 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); | 660 | rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); |
661 | octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; | 661 | octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; |
662 | } else if (OCTEON_IS_OCTEON3()) { | ||
663 | /* I/O clock runs at a different rate than the CPU. */ | ||
664 | union cvmx_rst_boot rst_boot; | ||
665 | rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); | ||
666 | octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; | ||
662 | } else { | 667 | } else { |
663 | octeon_io_clock_rate = sysinfo->cpu_clock_hz; | 668 | octeon_io_clock_rate = sysinfo->cpu_clock_hz; |
664 | } | 669 | } |
665 | 670 | ||
671 | t = read_c0_cvmctl(); | ||
672 | if ((t & (1ull << 27)) == 0) { | ||
673 | /* | ||
674 | * Setup the multiplier save/restore code if | ||
675 | * CvmCtl[NOMUL] clear. | ||
676 | */ | ||
677 | void *save; | ||
678 | void *save_end; | ||
679 | void *restore; | ||
680 | void *restore_end; | ||
681 | int save_len; | ||
682 | int restore_len; | ||
683 | int save_max = (char *)octeon_mult_save_end - | ||
684 | (char *)octeon_mult_save; | ||
685 | int restore_max = (char *)octeon_mult_restore_end - | ||
686 | (char *)octeon_mult_restore; | ||
687 | if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) { | ||
688 | save = octeon_mult_save3; | ||
689 | save_end = octeon_mult_save3_end; | ||
690 | restore = octeon_mult_restore3; | ||
691 | restore_end = octeon_mult_restore3_end; | ||
692 | } else { | ||
693 | save = octeon_mult_save2; | ||
694 | save_end = octeon_mult_save2_end; | ||
695 | restore = octeon_mult_restore2; | ||
696 | restore_end = octeon_mult_restore2_end; | ||
697 | } | ||
698 | save_len = (char *)save_end - (char *)save; | ||
699 | restore_len = (char *)restore_end - (char *)restore; | ||
700 | if (!WARN_ON(save_len > save_max || | ||
701 | restore_len > restore_max)) { | ||
702 | memcpy(octeon_mult_save, save, save_len); | ||
703 | memcpy(octeon_mult_restore, restore, restore_len); | ||
704 | } | ||
705 | } | ||
706 | |||
666 | /* | 707 | /* |
667 | * Only enable the LED controller if we're running on a CN38XX, CN58XX, | 708 | * Only enable the LED controller if we're running on a CN38XX, CN58XX, |
668 | * or CN56XX. The CN30XX and CN31XX don't have an LED controller. | 709 | * or CN56XX. The CN30XX and CN31XX don't have an LED controller. |
@@ -1004,7 +1045,7 @@ EXPORT_SYMBOL(prom_putchar); | |||
1004 | 1045 | ||
1005 | void prom_free_prom_memory(void) | 1046 | void prom_free_prom_memory(void) |
1006 | { | 1047 | { |
1007 | if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { | 1048 | if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) { |
1008 | /* Check for presence of Core-14449 fix. */ | 1049 | /* Check for presence of Core-14449 fix. */ |
1009 | u32 insn; | 1050 | u32 insn; |
1010 | u32 *foo; | 1051 | u32 *foo; |
@@ -1026,8 +1067,9 @@ void prom_free_prom_memory(void) | |||
1026 | panic("No PREF instruction at Core-14449 probe point."); | 1067 | panic("No PREF instruction at Core-14449 probe point."); |
1027 | 1068 | ||
1028 | if (((insn >> 16) & 0x1f) != 28) | 1069 | if (((insn >> 16) & 0x1f) != 28) |
1029 | panic("Core-14449 WAR not in place (%04x).\n" | 1070 | panic("OCTEON II DCache prefetch workaround not in place (%04x).\n" |
1030 | "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); | 1071 | "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", |
1072 | insn); | ||
1031 | } | 1073 | } |
1032 | } | 1074 | } |
1033 | 1075 | ||
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig new file mode 100644 index 000000000000..4bce1f8ebe98 --- /dev/null +++ b/arch/mips/configs/malta_qemu_32r6_defconfig | |||
@@ -0,0 +1,193 @@ | |||
1 | CONFIG_MIPS_MALTA=y | ||
2 | CONFIG_CPU_LITTLE_ENDIAN=y | ||
3 | CONFIG_CPU_MIPS32_R6=y | ||
4 | CONFIG_PAGE_SIZE_16KB=y | ||
5 | CONFIG_HZ_100=y | ||
6 | CONFIG_SYSVIPC=y | ||
7 | CONFIG_POSIX_MQUEUE=y | ||
8 | CONFIG_AUDIT=y | ||
9 | CONFIG_NO_HZ=y | ||
10 | CONFIG_IKCONFIG=y | ||
11 | CONFIG_IKCONFIG_PROC=y | ||
12 | CONFIG_LOG_BUF_SHIFT=15 | ||
13 | CONFIG_SYSCTL_SYSCALL=y | ||
14 | CONFIG_EMBEDDED=y | ||
15 | CONFIG_SLAB=y | ||
16 | CONFIG_MODULES=y | ||
17 | CONFIG_MODULE_UNLOAD=y | ||
18 | CONFIG_MODVERSIONS=y | ||
19 | CONFIG_MODULE_SRCVERSION_ALL=y | ||
20 | # CONFIG_BLK_DEV_BSG is not set | ||
21 | CONFIG_PCI=y | ||
22 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | ||
23 | CONFIG_NET=y | ||
24 | CONFIG_PACKET=y | ||
25 | CONFIG_UNIX=y | ||
26 | CONFIG_XFRM_USER=m | ||
27 | CONFIG_NET_KEY=y | ||
28 | CONFIG_INET=y | ||
29 | CONFIG_IP_MULTICAST=y | ||
30 | CONFIG_IP_ADVANCED_ROUTER=y | ||
31 | CONFIG_IP_MULTIPLE_TABLES=y | ||
32 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
33 | CONFIG_IP_ROUTE_VERBOSE=y | ||
34 | CONFIG_IP_PNP=y | ||
35 | CONFIG_IP_PNP_DHCP=y | ||
36 | CONFIG_IP_PNP_BOOTP=y | ||
37 | CONFIG_NET_IPIP=m | ||
38 | CONFIG_IP_MROUTE=y | ||
39 | CONFIG_IP_PIMSM_V1=y | ||
40 | CONFIG_IP_PIMSM_V2=y | ||
41 | CONFIG_SYN_COOKIES=y | ||
42 | CONFIG_INET_AH=m | ||
43 | CONFIG_INET_ESP=m | ||
44 | CONFIG_INET_IPCOMP=m | ||
45 | # CONFIG_INET_LRO is not set | ||
46 | CONFIG_INET6_AH=m | ||
47 | CONFIG_INET6_ESP=m | ||
48 | CONFIG_INET6_IPCOMP=m | ||
49 | CONFIG_IPV6_TUNNEL=m | ||
50 | CONFIG_BRIDGE=m | ||
51 | CONFIG_VLAN_8021Q=m | ||
52 | CONFIG_ATALK=m | ||
53 | CONFIG_DEV_APPLETALK=m | ||
54 | CONFIG_IPDDP=m | ||
55 | CONFIG_IPDDP_ENCAP=y | ||
56 | CONFIG_NET_SCHED=y | ||
57 | CONFIG_NET_SCH_CBQ=m | ||
58 | CONFIG_NET_SCH_HTB=m | ||
59 | CONFIG_NET_SCH_HFSC=m | ||
60 | CONFIG_NET_SCH_PRIO=m | ||
61 | CONFIG_NET_SCH_RED=m | ||
62 | CONFIG_NET_SCH_SFQ=m | ||
63 | CONFIG_NET_SCH_TEQL=m | ||
64 | CONFIG_NET_SCH_TBF=m | ||
65 | CONFIG_NET_SCH_GRED=m | ||
66 | CONFIG_NET_SCH_DSMARK=m | ||
67 | CONFIG_NET_SCH_NETEM=m | ||
68 | CONFIG_NET_SCH_INGRESS=m | ||
69 | CONFIG_NET_CLS_BASIC=m | ||
70 | CONFIG_NET_CLS_TCINDEX=m | ||
71 | CONFIG_NET_CLS_ROUTE4=m | ||
72 | CONFIG_NET_CLS_FW=m | ||
73 | CONFIG_NET_CLS_U32=m | ||
74 | CONFIG_NET_CLS_RSVP=m | ||
75 | CONFIG_NET_CLS_RSVP6=m | ||
76 | CONFIG_NET_CLS_ACT=y | ||
77 | CONFIG_NET_ACT_POLICE=y | ||
78 | CONFIG_NET_CLS_IND=y | ||
79 | # CONFIG_WIRELESS is not set | ||
80 | CONFIG_DEVTMPFS=y | ||
81 | CONFIG_BLK_DEV_LOOP=y | ||
82 | CONFIG_BLK_DEV_CRYPTOLOOP=m | ||
83 | CONFIG_IDE=y | ||
84 | # CONFIG_IDE_PROC_FS is not set | ||
85 | # CONFIG_IDEPCI_PCIBUS_ORDER is not set | ||
86 | CONFIG_BLK_DEV_GENERIC=y | ||
87 | CONFIG_BLK_DEV_PIIX=y | ||
88 | CONFIG_SCSI=y | ||
89 | CONFIG_BLK_DEV_SD=y | ||
90 | CONFIG_CHR_DEV_SG=y | ||
91 | # CONFIG_SCSI_LOWLEVEL is not set | ||
92 | CONFIG_NETDEVICES=y | ||
93 | # CONFIG_NET_VENDOR_3COM is not set | ||
94 | # CONFIG_NET_VENDOR_ADAPTEC is not set | ||
95 | # CONFIG_NET_VENDOR_ALTEON is not set | ||
96 | CONFIG_PCNET32=y | ||
97 | # CONFIG_NET_VENDOR_ATHEROS is not set | ||
98 | # CONFIG_NET_VENDOR_BROADCOM is not set | ||
99 | # CONFIG_NET_VENDOR_BROCADE is not set | ||
100 | # CONFIG_NET_VENDOR_CHELSIO is not set | ||
101 | # CONFIG_NET_VENDOR_CISCO is not set | ||
102 | # CONFIG_NET_VENDOR_DEC is not set | ||
103 | # CONFIG_NET_VENDOR_DLINK is not set | ||
104 | # CONFIG_NET_VENDOR_EMULEX is not set | ||
105 | # CONFIG_NET_VENDOR_EXAR is not set | ||
106 | # CONFIG_NET_VENDOR_HP is not set | ||
107 | # CONFIG_NET_VENDOR_INTEL is not set | ||
108 | # CONFIG_NET_VENDOR_MARVELL is not set | ||
109 | # CONFIG_NET_VENDOR_MELLANOX is not set | ||
110 | # CONFIG_NET_VENDOR_MICREL is not set | ||
111 | # CONFIG_NET_VENDOR_MYRI is not set | ||
112 | # CONFIG_NET_VENDOR_NATSEMI is not set | ||
113 | # CONFIG_NET_VENDOR_NVIDIA is not set | ||
114 | # CONFIG_NET_VENDOR_OKI is not set | ||
115 | # CONFIG_NET_PACKET_ENGINE is not set | ||
116 | # CONFIG_NET_VENDOR_QLOGIC is not set | ||
117 | # CONFIG_NET_VENDOR_REALTEK is not set | ||
118 | # CONFIG_NET_VENDOR_RDC is not set | ||
119 | # CONFIG_NET_VENDOR_SEEQ is not set | ||
120 | # CONFIG_NET_VENDOR_SILAN is not set | ||
121 | # CONFIG_NET_VENDOR_SIS is not set | ||
122 | # CONFIG_NET_VENDOR_SMSC is not set | ||
123 | # CONFIG_NET_VENDOR_STMICRO is not set | ||
124 | # CONFIG_NET_VENDOR_SUN is not set | ||
125 | # CONFIG_NET_VENDOR_TEHUTI is not set | ||
126 | # CONFIG_NET_VENDOR_TI is not set | ||
127 | # CONFIG_NET_VENDOR_TOSHIBA is not set | ||
128 | # CONFIG_NET_VENDOR_VIA is not set | ||
129 | # CONFIG_NET_VENDOR_WIZNET is not set | ||
130 | # CONFIG_WLAN is not set | ||
131 | # CONFIG_VT is not set | ||
132 | CONFIG_LEGACY_PTY_COUNT=4 | ||
133 | CONFIG_SERIAL_8250=y | ||
134 | CONFIG_SERIAL_8250_CONSOLE=y | ||
135 | CONFIG_HW_RANDOM=y | ||
136 | # CONFIG_HWMON is not set | ||
137 | CONFIG_FB=y | ||
138 | CONFIG_FIRMWARE_EDID=y | ||
139 | CONFIG_FB_MATROX=y | ||
140 | CONFIG_FB_MATROX_G=y | ||
141 | CONFIG_USB=y | ||
142 | CONFIG_USB_EHCI_HCD=y | ||
143 | # CONFIG_USB_EHCI_TT_NEWSCHED is not set | ||
144 | CONFIG_USB_UHCI_HCD=y | ||
145 | CONFIG_USB_STORAGE=y | ||
146 | CONFIG_NEW_LEDS=y | ||
147 | CONFIG_LEDS_CLASS=y | ||
148 | CONFIG_LEDS_TRIGGERS=y | ||
149 | CONFIG_LEDS_TRIGGER_TIMER=y | ||
150 | CONFIG_LEDS_TRIGGER_IDE_DISK=y | ||
151 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||
152 | CONFIG_LEDS_TRIGGER_BACKLIGHT=y | ||
153 | CONFIG_LEDS_TRIGGER_DEFAULT_ON=y | ||
154 | CONFIG_RTC_CLASS=y | ||
155 | CONFIG_RTC_DRV_CMOS=y | ||
156 | CONFIG_EXT2_FS=y | ||
157 | CONFIG_EXT3_FS=y | ||
158 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
159 | CONFIG_XFS_FS=y | ||
160 | CONFIG_XFS_QUOTA=y | ||
161 | CONFIG_XFS_POSIX_ACL=y | ||
162 | CONFIG_QUOTA=y | ||
163 | CONFIG_QFMT_V2=y | ||
164 | CONFIG_MSDOS_FS=m | ||
165 | CONFIG_VFAT_FS=m | ||
166 | CONFIG_PROC_KCORE=y | ||
167 | CONFIG_TMPFS=y | ||
168 | CONFIG_NFS_FS=y | ||
169 | CONFIG_ROOT_NFS=y | ||
170 | CONFIG_CIFS=m | ||
171 | CONFIG_CIFS_WEAK_PW_HASH=y | ||
172 | CONFIG_CIFS_XATTR=y | ||
173 | CONFIG_CIFS_POSIX=y | ||
174 | CONFIG_NLS_CODEPAGE_437=m | ||
175 | CONFIG_NLS_ISO8859_1=m | ||
176 | # CONFIG_FTRACE is not set | ||
177 | CONFIG_CRYPTO_NULL=m | ||
178 | CONFIG_CRYPTO_PCBC=m | ||
179 | CONFIG_CRYPTO_HMAC=y | ||
180 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
181 | CONFIG_CRYPTO_SHA512=m | ||
182 | CONFIG_CRYPTO_TGR192=m | ||
183 | CONFIG_CRYPTO_WP512=m | ||
184 | CONFIG_CRYPTO_ANUBIS=m | ||
185 | CONFIG_CRYPTO_BLOWFISH=m | ||
186 | CONFIG_CRYPTO_CAST5=m | ||
187 | CONFIG_CRYPTO_CAST6=m | ||
188 | CONFIG_CRYPTO_KHAZAD=m | ||
189 | CONFIG_CRYPTO_SERPENT=m | ||
190 | CONFIG_CRYPTO_TEA=m | ||
191 | CONFIG_CRYPTO_TWOFISH=m | ||
192 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
193 | # CONFIG_CRYPTO_HW is not set | ||
diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c index f9f5307434c2..19f710117d97 100644 --- a/arch/mips/fw/arc/misc.c +++ b/arch/mips/fw/arc/misc.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) | 9 | * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) |
10 | * Copyright (C) 1999 Silicon Graphics, Inc. | 10 | * Copyright (C) 1999 Silicon Graphics, Inc. |
11 | */ | 11 | */ |
12 | #include <linux/compiler.h> | ||
12 | #include <linux/init.h> | 13 | #include <linux/init.h> |
13 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
14 | #include <linux/irqflags.h> | 15 | #include <linux/irqflags.h> |
@@ -19,50 +20,55 @@ | |||
19 | #include <asm/sgialib.h> | 20 | #include <asm/sgialib.h> |
20 | #include <asm/bootinfo.h> | 21 | #include <asm/bootinfo.h> |
21 | 22 | ||
22 | VOID | 23 | VOID __noreturn |
23 | ArcHalt(VOID) | 24 | ArcHalt(VOID) |
24 | { | 25 | { |
25 | bc_disable(); | 26 | bc_disable(); |
26 | local_irq_disable(); | 27 | local_irq_disable(); |
27 | ARC_CALL0(halt); | 28 | ARC_CALL0(halt); |
28 | never: goto never; | 29 | |
30 | unreachable(); | ||
29 | } | 31 | } |
30 | 32 | ||
31 | VOID | 33 | VOID __noreturn |
32 | ArcPowerDown(VOID) | 34 | ArcPowerDown(VOID) |
33 | { | 35 | { |
34 | bc_disable(); | 36 | bc_disable(); |
35 | local_irq_disable(); | 37 | local_irq_disable(); |
36 | ARC_CALL0(pdown); | 38 | ARC_CALL0(pdown); |
37 | never: goto never; | 39 | |
40 | unreachable(); | ||
38 | } | 41 | } |
39 | 42 | ||
40 | /* XXX is this a soft reset basically? XXX */ | 43 | /* XXX is this a soft reset basically? XXX */ |
41 | VOID | 44 | VOID __noreturn |
42 | ArcRestart(VOID) | 45 | ArcRestart(VOID) |
43 | { | 46 | { |
44 | bc_disable(); | 47 | bc_disable(); |
45 | local_irq_disable(); | 48 | local_irq_disable(); |
46 | ARC_CALL0(restart); | 49 | ARC_CALL0(restart); |
47 | never: goto never; | 50 | |
51 | unreachable(); | ||
48 | } | 52 | } |
49 | 53 | ||
50 | VOID | 54 | VOID __noreturn |
51 | ArcReboot(VOID) | 55 | ArcReboot(VOID) |
52 | { | 56 | { |
53 | bc_disable(); | 57 | bc_disable(); |
54 | local_irq_disable(); | 58 | local_irq_disable(); |
55 | ARC_CALL0(reboot); | 59 | ARC_CALL0(reboot); |
56 | never: goto never; | 60 | |
61 | unreachable(); | ||
57 | } | 62 | } |
58 | 63 | ||
59 | VOID | 64 | VOID __noreturn |
60 | ArcEnterInteractiveMode(VOID) | 65 | ArcEnterInteractiveMode(VOID) |
61 | { | 66 | { |
62 | bc_disable(); | 67 | bc_disable(); |
63 | local_irq_disable(); | 68 | local_irq_disable(); |
64 | ARC_CALL0(imode); | 69 | ARC_CALL0(imode); |
65 | never: goto never; | 70 | |
71 | unreachable(); | ||
66 | } | 72 | } |
67 | 73 | ||
68 | LONG | 74 | LONG |
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 200efeac4181..526539cbc99f 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild | |||
@@ -1,4 +1,5 @@ | |||
1 | # MIPS headers | 1 | # MIPS headers |
2 | generic-(CONFIG_GENERIC_CSUM) += checksum.h | ||
2 | generic-y += cputime.h | 3 | generic-y += cputime.h |
3 | generic-y += current.h | 4 | generic-y += current.h |
4 | generic-y += dma-contiguous.h | 5 | generic-y += dma-contiguous.h |
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 6caf8766b80f..0cae4595e985 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <asm/asmmacro-64.h> | 19 | #include <asm/asmmacro-64.h> |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_CPU_MIPSR2 | 22 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
23 | .macro local_irq_enable reg=t0 | 23 | .macro local_irq_enable reg=t0 |
24 | ei | 24 | ei |
25 | irq_enable_hazard | 25 | irq_enable_hazard |
@@ -104,7 +104,8 @@ | |||
104 | .endm | 104 | .endm |
105 | 105 | ||
106 | .macro fpu_save_double thread status tmp | 106 | .macro fpu_save_double thread status tmp |
107 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) | 107 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
108 | defined(CONFIG_CPU_MIPS32_R6) | ||
108 | sll \tmp, \status, 5 | 109 | sll \tmp, \status, 5 |
109 | bgez \tmp, 10f | 110 | bgez \tmp, 10f |
110 | fpu_save_16odd \thread | 111 | fpu_save_16odd \thread |
@@ -160,7 +161,8 @@ | |||
160 | .endm | 161 | .endm |
161 | 162 | ||
162 | .macro fpu_restore_double thread status tmp | 163 | .macro fpu_restore_double thread status tmp |
163 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) | 164 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
165 | defined(CONFIG_CPU_MIPS32_R6) | ||
164 | sll \tmp, \status, 5 | 166 | sll \tmp, \status, 5 |
165 | bgez \tmp, 10f # 16 register mode? | 167 | bgez \tmp, 10f # 16 register mode? |
166 | 168 | ||
@@ -170,16 +172,16 @@ | |||
170 | fpu_restore_16even \thread \tmp | 172 | fpu_restore_16even \thread \tmp |
171 | .endm | 173 | .endm |
172 | 174 | ||
173 | #ifdef CONFIG_CPU_MIPSR2 | 175 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
174 | .macro _EXT rd, rs, p, s | 176 | .macro _EXT rd, rs, p, s |
175 | ext \rd, \rs, \p, \s | 177 | ext \rd, \rs, \p, \s |
176 | .endm | 178 | .endm |
177 | #else /* !CONFIG_CPU_MIPSR2 */ | 179 | #else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ |
178 | .macro _EXT rd, rs, p, s | 180 | .macro _EXT rd, rs, p, s |
179 | srl \rd, \rs, \p | 181 | srl \rd, \rs, \p |
180 | andi \rd, \rd, (1 << \s) - 1 | 182 | andi \rd, \rd, (1 << \s) - 1 |
181 | .endm | 183 | .endm |
182 | #endif /* !CONFIG_CPU_MIPSR2 */ | 184 | #endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ |
183 | 185 | ||
184 | /* | 186 | /* |
185 | * Temporary until all gas have MT ASE support | 187 | * Temporary until all gas have MT ASE support |
@@ -304,7 +306,7 @@ | |||
304 | .set push | 306 | .set push |
305 | .set noat | 307 | .set noat |
306 | SET_HARDFLOAT | 308 | SET_HARDFLOAT |
307 | add $1, \base, \off | 309 | addu $1, \base, \off |
308 | .word LDD_MSA_INSN | (\wd << 6) | 310 | .word LDD_MSA_INSN | (\wd << 6) |
309 | .set pop | 311 | .set pop |
310 | .endm | 312 | .endm |
@@ -313,7 +315,7 @@ | |||
313 | .set push | 315 | .set push |
314 | .set noat | 316 | .set noat |
315 | SET_HARDFLOAT | 317 | SET_HARDFLOAT |
316 | add $1, \base, \off | 318 | addu $1, \base, \off |
317 | .word STD_MSA_INSN | (\wd << 6) | 319 | .word STD_MSA_INSN | (\wd << 6) |
318 | .set pop | 320 | .set pop |
319 | .endm | 321 | .endm |
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 857da84cfc92..26d436336f2e 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ | |||
54 | " sc %0, %1 \n" \ | 54 | " sc %0, %1 \n" \ |
55 | " beqzl %0, 1b \n" \ | 55 | " beqzl %0, 1b \n" \ |
56 | " .set mips0 \n" \ | 56 | " .set mips0 \n" \ |
57 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ | 57 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
58 | : "Ir" (i)); \ | 58 | : "Ir" (i)); \ |
59 | } else if (kernel_uses_llsc) { \ | 59 | } else if (kernel_uses_llsc) { \ |
60 | int temp; \ | 60 | int temp; \ |
61 | \ | 61 | \ |
62 | do { \ | 62 | do { \ |
63 | __asm__ __volatile__( \ | 63 | __asm__ __volatile__( \ |
64 | " .set arch=r4000 \n" \ | 64 | " .set "MIPS_ISA_LEVEL" \n" \ |
65 | " ll %0, %1 # atomic_" #op "\n" \ | 65 | " ll %0, %1 # atomic_" #op "\n" \ |
66 | " " #asm_op " %0, %2 \n" \ | 66 | " " #asm_op " %0, %2 \n" \ |
67 | " sc %0, %1 \n" \ | 67 | " sc %0, %1 \n" \ |
68 | " .set mips0 \n" \ | 68 | " .set mips0 \n" \ |
69 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ | 69 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
70 | : "Ir" (i)); \ | 70 | : "Ir" (i)); \ |
71 | } while (unlikely(!temp)); \ | 71 | } while (unlikely(!temp)); \ |
72 | } else { \ | 72 | } else { \ |
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ | |||
97 | " " #asm_op " %0, %1, %3 \n" \ | 97 | " " #asm_op " %0, %1, %3 \n" \ |
98 | " .set mips0 \n" \ | 98 | " .set mips0 \n" \ |
99 | : "=&r" (result), "=&r" (temp), \ | 99 | : "=&r" (result), "=&r" (temp), \ |
100 | "+" GCC_OFF12_ASM() (v->counter) \ | 100 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
101 | : "Ir" (i)); \ | 101 | : "Ir" (i)); \ |
102 | } else if (kernel_uses_llsc) { \ | 102 | } else if (kernel_uses_llsc) { \ |
103 | int temp; \ | 103 | int temp; \ |
104 | \ | 104 | \ |
105 | do { \ | 105 | do { \ |
106 | __asm__ __volatile__( \ | 106 | __asm__ __volatile__( \ |
107 | " .set arch=r4000 \n" \ | 107 | " .set "MIPS_ISA_LEVEL" \n" \ |
108 | " ll %1, %2 # atomic_" #op "_return \n" \ | 108 | " ll %1, %2 # atomic_" #op "_return \n" \ |
109 | " " #asm_op " %0, %1, %3 \n" \ | 109 | " " #asm_op " %0, %1, %3 \n" \ |
110 | " sc %0, %2 \n" \ | 110 | " sc %0, %2 \n" \ |
111 | " .set mips0 \n" \ | 111 | " .set mips0 \n" \ |
112 | : "=&r" (result), "=&r" (temp), \ | 112 | : "=&r" (result), "=&r" (temp), \ |
113 | "+" GCC_OFF12_ASM() (v->counter) \ | 113 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
114 | : "Ir" (i)); \ | 114 | : "Ir" (i)); \ |
115 | } while (unlikely(!result)); \ | 115 | } while (unlikely(!result)); \ |
116 | \ | 116 | \ |
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
171 | "1: \n" | 171 | "1: \n" |
172 | " .set mips0 \n" | 172 | " .set mips0 \n" |
173 | : "=&r" (result), "=&r" (temp), | 173 | : "=&r" (result), "=&r" (temp), |
174 | "+" GCC_OFF12_ASM() (v->counter) | 174 | "+" GCC_OFF_SMALL_ASM() (v->counter) |
175 | : "Ir" (i), GCC_OFF12_ASM() (v->counter) | 175 | : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) |
176 | : "memory"); | 176 | : "memory"); |
177 | } else if (kernel_uses_llsc) { | 177 | } else if (kernel_uses_llsc) { |
178 | int temp; | 178 | int temp; |
179 | 179 | ||
180 | __asm__ __volatile__( | 180 | __asm__ __volatile__( |
181 | " .set arch=r4000 \n" | 181 | " .set "MIPS_ISA_LEVEL" \n" |
182 | "1: ll %1, %2 # atomic_sub_if_positive\n" | 182 | "1: ll %1, %2 # atomic_sub_if_positive\n" |
183 | " subu %0, %1, %3 \n" | 183 | " subu %0, %1, %3 \n" |
184 | " bltz %0, 1f \n" | 184 | " bltz %0, 1f \n" |
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) | |||
190 | "1: \n" | 190 | "1: \n" |
191 | " .set mips0 \n" | 191 | " .set mips0 \n" |
192 | : "=&r" (result), "=&r" (temp), | 192 | : "=&r" (result), "=&r" (temp), |
193 | "+" GCC_OFF12_ASM() (v->counter) | 193 | "+" GCC_OFF_SMALL_ASM() (v->counter) |
194 | : "Ir" (i)); | 194 | : "Ir" (i)); |
195 | } else { | 195 | } else { |
196 | unsigned long flags; | 196 | unsigned long flags; |
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ | |||
333 | " scd %0, %1 \n" \ | 333 | " scd %0, %1 \n" \ |
334 | " beqzl %0, 1b \n" \ | 334 | " beqzl %0, 1b \n" \ |
335 | " .set mips0 \n" \ | 335 | " .set mips0 \n" \ |
336 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ | 336 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
337 | : "Ir" (i)); \ | 337 | : "Ir" (i)); \ |
338 | } else if (kernel_uses_llsc) { \ | 338 | } else if (kernel_uses_llsc) { \ |
339 | long temp; \ | 339 | long temp; \ |
340 | \ | 340 | \ |
341 | do { \ | 341 | do { \ |
342 | __asm__ __volatile__( \ | 342 | __asm__ __volatile__( \ |
343 | " .set arch=r4000 \n" \ | 343 | " .set "MIPS_ISA_LEVEL" \n" \ |
344 | " lld %0, %1 # atomic64_" #op "\n" \ | 344 | " lld %0, %1 # atomic64_" #op "\n" \ |
345 | " " #asm_op " %0, %2 \n" \ | 345 | " " #asm_op " %0, %2 \n" \ |
346 | " scd %0, %1 \n" \ | 346 | " scd %0, %1 \n" \ |
347 | " .set mips0 \n" \ | 347 | " .set mips0 \n" \ |
348 | : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ | 348 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
349 | : "Ir" (i)); \ | 349 | : "Ir" (i)); \ |
350 | } while (unlikely(!temp)); \ | 350 | } while (unlikely(!temp)); \ |
351 | } else { \ | 351 | } else { \ |
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ | |||
376 | " " #asm_op " %0, %1, %3 \n" \ | 376 | " " #asm_op " %0, %1, %3 \n" \ |
377 | " .set mips0 \n" \ | 377 | " .set mips0 \n" \ |
378 | : "=&r" (result), "=&r" (temp), \ | 378 | : "=&r" (result), "=&r" (temp), \ |
379 | "+" GCC_OFF12_ASM() (v->counter) \ | 379 | "+" GCC_OFF_SMALL_ASM() (v->counter) \ |
380 | : "Ir" (i)); \ | 380 | : "Ir" (i)); \ |
381 | } else if (kernel_uses_llsc) { \ | 381 | } else if (kernel_uses_llsc) { \ |
382 | long temp; \ | 382 | long temp; \ |
383 | \ | 383 | \ |
384 | do { \ | 384 | do { \ |
385 | __asm__ __volatile__( \ | 385 | __asm__ __volatile__( \ |
386 | " .set arch=r4000 \n" \ | 386 | " .set "MIPS_ISA_LEVEL" \n" \ |
387 | " lld %1, %2 # atomic64_" #op "_return\n" \ | 387 | " lld %1, %2 # atomic64_" #op "_return\n" \ |
388 | " " #asm_op " %0, %1, %3 \n" \ | 388 | " " #asm_op " %0, %1, %3 \n" \ |
389 | " scd %0, %2 \n" \ | 389 | " scd %0, %2 \n" \ |
390 | " .set mips0 \n" \ | 390 | " .set mips0 \n" \ |
391 | : "=&r" (result), "=&r" (temp), \ | 391 | : "=&r" (result), "=&r" (temp), \ |
392 | "=" GCC_OFF12_ASM() (v->counter) \ | 392 | "=" GCC_OFF_SMALL_ASM() (v->counter) \ |
393 | : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ | 393 | : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ |
394 | : "memory"); \ | 394 | : "memory"); \ |
395 | } while (unlikely(!result)); \ | 395 | } while (unlikely(!result)); \ |
396 | \ | 396 | \ |
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
452 | "1: \n" | 452 | "1: \n" |
453 | " .set mips0 \n" | 453 | " .set mips0 \n" |
454 | : "=&r" (result), "=&r" (temp), | 454 | : "=&r" (result), "=&r" (temp), |
455 | "=" GCC_OFF12_ASM() (v->counter) | 455 | "=" GCC_OFF_SMALL_ASM() (v->counter) |
456 | : "Ir" (i), GCC_OFF12_ASM() (v->counter) | 456 | : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) |
457 | : "memory"); | 457 | : "memory"); |
458 | } else if (kernel_uses_llsc) { | 458 | } else if (kernel_uses_llsc) { |
459 | long temp; | 459 | long temp; |
460 | 460 | ||
461 | __asm__ __volatile__( | 461 | __asm__ __volatile__( |
462 | " .set arch=r4000 \n" | 462 | " .set "MIPS_ISA_LEVEL" \n" |
463 | "1: lld %1, %2 # atomic64_sub_if_positive\n" | 463 | "1: lld %1, %2 # atomic64_sub_if_positive\n" |
464 | " dsubu %0, %1, %3 \n" | 464 | " dsubu %0, %1, %3 \n" |
465 | " bltz %0, 1f \n" | 465 | " bltz %0, 1f \n" |
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) | |||
471 | "1: \n" | 471 | "1: \n" |
472 | " .set mips0 \n" | 472 | " .set mips0 \n" |
473 | : "=&r" (result), "=&r" (temp), | 473 | : "=&r" (result), "=&r" (temp), |
474 | "+" GCC_OFF12_ASM() (v->counter) | 474 | "+" GCC_OFF_SMALL_ASM() (v->counter) |
475 | : "Ir" (i)); | 475 | : "Ir" (i)); |
476 | } else { | 476 | } else { |
477 | unsigned long flags; | 477 | unsigned long flags; |
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 6663bcca9d0c..9f935f6aa996 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h | |||
@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
79 | " " __SC "%0, %1 \n" | 79 | " " __SC "%0, %1 \n" |
80 | " beqzl %0, 1b \n" | 80 | " beqzl %0, 1b \n" |
81 | " .set mips0 \n" | 81 | " .set mips0 \n" |
82 | : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) | 82 | : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) |
83 | : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); | 83 | : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); |
84 | #ifdef CONFIG_CPU_MIPSR2 | 84 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
85 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 85 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
86 | do { | 86 | do { |
87 | __asm__ __volatile__( | 87 | __asm__ __volatile__( |
88 | " " __LL "%0, %1 # set_bit \n" | 88 | " " __LL "%0, %1 # set_bit \n" |
89 | " " __INS "%0, %3, %2, 1 \n" | 89 | " " __INS "%0, %3, %2, 1 \n" |
90 | " " __SC "%0, %1 \n" | 90 | " " __SC "%0, %1 \n" |
91 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) | 91 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) |
92 | : "ir" (bit), "r" (~0)); | 92 | : "ir" (bit), "r" (~0)); |
93 | } while (unlikely(!temp)); | 93 | } while (unlikely(!temp)); |
94 | #endif /* CONFIG_CPU_MIPSR2 */ | 94 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ |
95 | } else if (kernel_uses_llsc) { | 95 | } else if (kernel_uses_llsc) { |
96 | do { | 96 | do { |
97 | __asm__ __volatile__( | 97 | __asm__ __volatile__( |
98 | " .set arch=r4000 \n" | 98 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
99 | " " __LL "%0, %1 # set_bit \n" | 99 | " " __LL "%0, %1 # set_bit \n" |
100 | " or %0, %2 \n" | 100 | " or %0, %2 \n" |
101 | " " __SC "%0, %1 \n" | 101 | " " __SC "%0, %1 \n" |
102 | " .set mips0 \n" | 102 | " .set mips0 \n" |
103 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) | 103 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) |
104 | : "ir" (1UL << bit)); | 104 | : "ir" (1UL << bit)); |
105 | } while (unlikely(!temp)); | 105 | } while (unlikely(!temp)); |
106 | } else | 106 | } else |
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
131 | " " __SC "%0, %1 \n" | 131 | " " __SC "%0, %1 \n" |
132 | " beqzl %0, 1b \n" | 132 | " beqzl %0, 1b \n" |
133 | " .set mips0 \n" | 133 | " .set mips0 \n" |
134 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) | 134 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) |
135 | : "ir" (~(1UL << bit))); | 135 | : "ir" (~(1UL << bit))); |
136 | #ifdef CONFIG_CPU_MIPSR2 | 136 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
137 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 137 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
138 | do { | 138 | do { |
139 | __asm__ __volatile__( | 139 | __asm__ __volatile__( |
140 | " " __LL "%0, %1 # clear_bit \n" | 140 | " " __LL "%0, %1 # clear_bit \n" |
141 | " " __INS "%0, $0, %2, 1 \n" | 141 | " " __INS "%0, $0, %2, 1 \n" |
142 | " " __SC "%0, %1 \n" | 142 | " " __SC "%0, %1 \n" |
143 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) | 143 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) |
144 | : "ir" (bit)); | 144 | : "ir" (bit)); |
145 | } while (unlikely(!temp)); | 145 | } while (unlikely(!temp)); |
146 | #endif /* CONFIG_CPU_MIPSR2 */ | 146 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ |
147 | } else if (kernel_uses_llsc) { | 147 | } else if (kernel_uses_llsc) { |
148 | do { | 148 | do { |
149 | __asm__ __volatile__( | 149 | __asm__ __volatile__( |
150 | " .set arch=r4000 \n" | 150 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
151 | " " __LL "%0, %1 # clear_bit \n" | 151 | " " __LL "%0, %1 # clear_bit \n" |
152 | " and %0, %2 \n" | 152 | " and %0, %2 \n" |
153 | " " __SC "%0, %1 \n" | 153 | " " __SC "%0, %1 \n" |
154 | " .set mips0 \n" | 154 | " .set mips0 \n" |
155 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) | 155 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) |
156 | : "ir" (~(1UL << bit))); | 156 | : "ir" (~(1UL << bit))); |
157 | } while (unlikely(!temp)); | 157 | } while (unlikely(!temp)); |
158 | } else | 158 | } else |
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
197 | " " __SC "%0, %1 \n" | 197 | " " __SC "%0, %1 \n" |
198 | " beqzl %0, 1b \n" | 198 | " beqzl %0, 1b \n" |
199 | " .set mips0 \n" | 199 | " .set mips0 \n" |
200 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) | 200 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) |
201 | : "ir" (1UL << bit)); | 201 | : "ir" (1UL << bit)); |
202 | } else if (kernel_uses_llsc) { | 202 | } else if (kernel_uses_llsc) { |
203 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 203 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
205 | 205 | ||
206 | do { | 206 | do { |
207 | __asm__ __volatile__( | 207 | __asm__ __volatile__( |
208 | " .set arch=r4000 \n" | 208 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
209 | " " __LL "%0, %1 # change_bit \n" | 209 | " " __LL "%0, %1 # change_bit \n" |
210 | " xor %0, %2 \n" | 210 | " xor %0, %2 \n" |
211 | " " __SC "%0, %1 \n" | 211 | " " __SC "%0, %1 \n" |
212 | " .set mips0 \n" | 212 | " .set mips0 \n" |
213 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) | 213 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) |
214 | : "ir" (1UL << bit)); | 214 | : "ir" (1UL << bit)); |
215 | } while (unlikely(!temp)); | 215 | } while (unlikely(!temp)); |
216 | } else | 216 | } else |
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr, | |||
245 | " beqzl %2, 1b \n" | 245 | " beqzl %2, 1b \n" |
246 | " and %2, %0, %3 \n" | 246 | " and %2, %0, %3 \n" |
247 | " .set mips0 \n" | 247 | " .set mips0 \n" |
248 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 248 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
249 | : "r" (1UL << bit) | 249 | : "r" (1UL << bit) |
250 | : "memory"); | 250 | : "memory"); |
251 | } else if (kernel_uses_llsc) { | 251 | } else if (kernel_uses_llsc) { |
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr, | |||
254 | 254 | ||
255 | do { | 255 | do { |
256 | __asm__ __volatile__( | 256 | __asm__ __volatile__( |
257 | " .set arch=r4000 \n" | 257 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
258 | " " __LL "%0, %1 # test_and_set_bit \n" | 258 | " " __LL "%0, %1 # test_and_set_bit \n" |
259 | " or %2, %0, %3 \n" | 259 | " or %2, %0, %3 \n" |
260 | " " __SC "%2, %1 \n" | 260 | " " __SC "%2, %1 \n" |
261 | " .set mips0 \n" | 261 | " .set mips0 \n" |
262 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 262 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
263 | : "r" (1UL << bit) | 263 | : "r" (1UL << bit) |
264 | : "memory"); | 264 | : "memory"); |
265 | } while (unlikely(!res)); | 265 | } while (unlikely(!res)); |
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr, | |||
308 | 308 | ||
309 | do { | 309 | do { |
310 | __asm__ __volatile__( | 310 | __asm__ __volatile__( |
311 | " .set arch=r4000 \n" | 311 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
312 | " " __LL "%0, %1 # test_and_set_bit \n" | 312 | " " __LL "%0, %1 # test_and_set_bit \n" |
313 | " or %2, %0, %3 \n" | 313 | " or %2, %0, %3 \n" |
314 | " " __SC "%2, %1 \n" | 314 | " " __SC "%2, %1 \n" |
315 | " .set mips0 \n" | 315 | " .set mips0 \n" |
316 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 316 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
317 | : "r" (1UL << bit) | 317 | : "r" (1UL << bit) |
318 | : "memory"); | 318 | : "memory"); |
319 | } while (unlikely(!res)); | 319 | } while (unlikely(!res)); |
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
355 | " beqzl %2, 1b \n" | 355 | " beqzl %2, 1b \n" |
356 | " and %2, %0, %3 \n" | 356 | " and %2, %0, %3 \n" |
357 | " .set mips0 \n" | 357 | " .set mips0 \n" |
358 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 358 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
359 | : "r" (1UL << bit) | 359 | : "r" (1UL << bit) |
360 | : "memory"); | 360 | : "memory"); |
361 | #ifdef CONFIG_CPU_MIPSR2 | 361 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
362 | } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { | 362 | } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { |
363 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 363 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
364 | unsigned long temp; | 364 | unsigned long temp; |
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
369 | " " __EXT "%2, %0, %3, 1 \n" | 369 | " " __EXT "%2, %0, %3, 1 \n" |
370 | " " __INS "%0, $0, %3, 1 \n" | 370 | " " __INS "%0, $0, %3, 1 \n" |
371 | " " __SC "%0, %1 \n" | 371 | " " __SC "%0, %1 \n" |
372 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 372 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
373 | : "ir" (bit) | 373 | : "ir" (bit) |
374 | : "memory"); | 374 | : "memory"); |
375 | } while (unlikely(!temp)); | 375 | } while (unlikely(!temp)); |
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
380 | 380 | ||
381 | do { | 381 | do { |
382 | __asm__ __volatile__( | 382 | __asm__ __volatile__( |
383 | " .set arch=r4000 \n" | 383 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
384 | " " __LL "%0, %1 # test_and_clear_bit \n" | 384 | " " __LL "%0, %1 # test_and_clear_bit \n" |
385 | " or %2, %0, %3 \n" | 385 | " or %2, %0, %3 \n" |
386 | " xor %2, %3 \n" | 386 | " xor %2, %3 \n" |
387 | " " __SC "%2, %1 \n" | 387 | " " __SC "%2, %1 \n" |
388 | " .set mips0 \n" | 388 | " .set mips0 \n" |
389 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 389 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
390 | : "r" (1UL << bit) | 390 | : "r" (1UL << bit) |
391 | : "memory"); | 391 | : "memory"); |
392 | } while (unlikely(!res)); | 392 | } while (unlikely(!res)); |
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr, | |||
428 | " beqzl %2, 1b \n" | 428 | " beqzl %2, 1b \n" |
429 | " and %2, %0, %3 \n" | 429 | " and %2, %0, %3 \n" |
430 | " .set mips0 \n" | 430 | " .set mips0 \n" |
431 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 431 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
432 | : "r" (1UL << bit) | 432 | : "r" (1UL << bit) |
433 | : "memory"); | 433 | : "memory"); |
434 | } else if (kernel_uses_llsc) { | 434 | } else if (kernel_uses_llsc) { |
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr, | |||
437 | 437 | ||
438 | do { | 438 | do { |
439 | __asm__ __volatile__( | 439 | __asm__ __volatile__( |
440 | " .set arch=r4000 \n" | 440 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
441 | " " __LL "%0, %1 # test_and_change_bit \n" | 441 | " " __LL "%0, %1 # test_and_change_bit \n" |
442 | " xor %2, %0, %3 \n" | 442 | " xor %2, %0, %3 \n" |
443 | " " __SC "\t%2, %1 \n" | 443 | " " __SC "\t%2, %1 \n" |
444 | " .set mips0 \n" | 444 | " .set mips0 \n" |
445 | : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) | 445 | : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) |
446 | : "r" (1UL << bit) | 446 | : "r" (1UL << bit) |
447 | : "memory"); | 447 | : "memory"); |
448 | } while (unlikely(!res)); | 448 | } while (unlikely(!res)); |
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word) | |||
485 | __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { | 485 | __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { |
486 | __asm__( | 486 | __asm__( |
487 | " .set push \n" | 487 | " .set push \n" |
488 | " .set mips32 \n" | 488 | " .set "MIPS_ISA_LEVEL" \n" |
489 | " clz %0, %1 \n" | 489 | " clz %0, %1 \n" |
490 | " .set pop \n" | 490 | " .set pop \n" |
491 | : "=r" (num) | 491 | : "=r" (num) |
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word) | |||
498 | __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { | 498 | __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { |
499 | __asm__( | 499 | __asm__( |
500 | " .set push \n" | 500 | " .set push \n" |
501 | " .set mips64 \n" | 501 | " .set "MIPS_ISA_LEVEL" \n" |
502 | " dclz %0, %1 \n" | 502 | " dclz %0, %1 \n" |
503 | " .set pop \n" | 503 | " .set pop \n" |
504 | : "=r" (num) | 504 | : "=r" (num) |
@@ -562,7 +562,7 @@ static inline int fls(int x) | |||
562 | if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { | 562 | if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { |
563 | __asm__( | 563 | __asm__( |
564 | " .set push \n" | 564 | " .set push \n" |
565 | " .set mips32 \n" | 565 | " .set "MIPS_ISA_LEVEL" \n" |
566 | " clz %0, %1 \n" | 566 | " clz %0, %1 \n" |
567 | " .set pop \n" | 567 | " .set pop \n" |
568 | : "=r" (x) | 568 | : "=r" (x) |
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 3418c51e1151..5c585c5c1c3e 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h | |||
@@ -12,6 +12,10 @@ | |||
12 | #ifndef _ASM_CHECKSUM_H | 12 | #ifndef _ASM_CHECKSUM_H |
13 | #define _ASM_CHECKSUM_H | 13 | #define _ASM_CHECKSUM_H |
14 | 14 | ||
15 | #ifdef CONFIG_GENERIC_CSUM | ||
16 | #include <asm-generic/checksum.h> | ||
17 | #else | ||
18 | |||
15 | #include <linux/in6.h> | 19 | #include <linux/in6.h> |
16 | 20 | ||
17 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
@@ -99,27 +103,23 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, | |||
99 | */ | 103 | */ |
100 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, | 104 | __wsum csum_partial_copy_nocheck(const void *src, void *dst, |
101 | int len, __wsum sum); | 105 | int len, __wsum sum); |
106 | #define csum_partial_copy_nocheck csum_partial_copy_nocheck | ||
102 | 107 | ||
103 | /* | 108 | /* |
104 | * Fold a partial checksum without adding pseudo headers | 109 | * Fold a partial checksum without adding pseudo headers |
105 | */ | 110 | */ |
106 | static inline __sum16 csum_fold(__wsum sum) | 111 | static inline __sum16 csum_fold(__wsum csum) |
107 | { | 112 | { |
108 | __asm__( | 113 | u32 sum = (__force u32)csum;; |
109 | " .set push # csum_fold\n" | ||
110 | " .set noat \n" | ||
111 | " sll $1, %0, 16 \n" | ||
112 | " addu %0, $1 \n" | ||
113 | " sltu $1, %0, $1 \n" | ||
114 | " srl %0, %0, 16 \n" | ||
115 | " addu %0, $1 \n" | ||
116 | " xori %0, 0xffff \n" | ||
117 | " .set pop" | ||
118 | : "=r" (sum) | ||
119 | : "0" (sum)); | ||
120 | 114 | ||
121 | return (__force __sum16)sum; | 115 | sum += (sum << 16); |
116 | csum = (sum < csum); | ||
117 | sum >>= 16; | ||
118 | sum += csum; | ||
119 | |||
120 | return (__force __sum16)~sum; | ||
122 | } | 121 | } |
122 | #define csum_fold csum_fold | ||
123 | 123 | ||
124 | /* | 124 | /* |
125 | * This is a version of ip_compute_csum() optimized for IP headers, | 125 | * This is a version of ip_compute_csum() optimized for IP headers, |
@@ -158,6 +158,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) | |||
158 | 158 | ||
159 | return csum_fold(csum); | 159 | return csum_fold(csum); |
160 | } | 160 | } |
161 | #define ip_fast_csum ip_fast_csum | ||
161 | 162 | ||
162 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, | 163 | static inline __wsum csum_tcpudp_nofold(__be32 saddr, |
163 | __be32 daddr, unsigned short len, unsigned short proto, | 164 | __be32 daddr, unsigned short len, unsigned short proto, |
@@ -200,18 +201,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, | |||
200 | 201 | ||
201 | return sum; | 202 | return sum; |
202 | } | 203 | } |
203 | 204 | #define csum_tcpudp_nofold csum_tcpudp_nofold | |
204 | /* | ||
205 | * computes the checksum of the TCP/UDP pseudo-header | ||
206 | * returns a 16-bit checksum, already complemented | ||
207 | */ | ||
208 | static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, | ||
209 | unsigned short len, | ||
210 | unsigned short proto, | ||
211 | __wsum sum) | ||
212 | { | ||
213 | return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); | ||
214 | } | ||
215 | 205 | ||
216 | /* | 206 | /* |
217 | * this routine is used for miscellaneous IP-like checksums, mainly | 207 | * this routine is used for miscellaneous IP-like checksums, mainly |
@@ -287,4 +277,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, | |||
287 | return csum_fold(sum); | 277 | return csum_fold(sum); |
288 | } | 278 | } |
289 | 279 | ||
280 | #include <asm-generic/checksum.h> | ||
281 | #endif /* CONFIG_GENERIC_CSUM */ | ||
282 | |||
290 | #endif /* _ASM_CHECKSUM_H */ | 283 | #endif /* _ASM_CHECKSUM_H */ |
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 28b1edf19501..d0a2a68ca600 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h | |||
@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | |||
31 | " sc %2, %1 \n" | 31 | " sc %2, %1 \n" |
32 | " beqzl %2, 1b \n" | 32 | " beqzl %2, 1b \n" |
33 | " .set mips0 \n" | 33 | " .set mips0 \n" |
34 | : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) | 34 | : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) |
35 | : GCC_OFF12_ASM() (*m), "Jr" (val) | 35 | : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) |
36 | : "memory"); | 36 | : "memory"); |
37 | } else if (kernel_uses_llsc) { | 37 | } else if (kernel_uses_llsc) { |
38 | unsigned long dummy; | 38 | unsigned long dummy; |
39 | 39 | ||
40 | do { | 40 | do { |
41 | __asm__ __volatile__( | 41 | __asm__ __volatile__( |
42 | " .set arch=r4000 \n" | 42 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
43 | " ll %0, %3 # xchg_u32 \n" | 43 | " ll %0, %3 # xchg_u32 \n" |
44 | " .set mips0 \n" | 44 | " .set mips0 \n" |
45 | " move %2, %z4 \n" | 45 | " move %2, %z4 \n" |
46 | " .set arch=r4000 \n" | 46 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
47 | " sc %2, %1 \n" | 47 | " sc %2, %1 \n" |
48 | " .set mips0 \n" | 48 | " .set mips0 \n" |
49 | : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), | 49 | : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), |
50 | "=&r" (dummy) | 50 | "=&r" (dummy) |
51 | : GCC_OFF12_ASM() (*m), "Jr" (val) | 51 | : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) |
52 | : "memory"); | 52 | : "memory"); |
53 | } while (unlikely(!dummy)); | 53 | } while (unlikely(!dummy)); |
54 | } else { | 54 | } else { |
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | |||
82 | " scd %2, %1 \n" | 82 | " scd %2, %1 \n" |
83 | " beqzl %2, 1b \n" | 83 | " beqzl %2, 1b \n" |
84 | " .set mips0 \n" | 84 | " .set mips0 \n" |
85 | : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) | 85 | : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) |
86 | : GCC_OFF12_ASM() (*m), "Jr" (val) | 86 | : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) |
87 | : "memory"); | 87 | : "memory"); |
88 | } else if (kernel_uses_llsc) { | 88 | } else if (kernel_uses_llsc) { |
89 | unsigned long dummy; | 89 | unsigned long dummy; |
90 | 90 | ||
91 | do { | 91 | do { |
92 | __asm__ __volatile__( | 92 | __asm__ __volatile__( |
93 | " .set arch=r4000 \n" | 93 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
94 | " lld %0, %3 # xchg_u64 \n" | 94 | " lld %0, %3 # xchg_u64 \n" |
95 | " move %2, %z4 \n" | 95 | " move %2, %z4 \n" |
96 | " scd %2, %1 \n" | 96 | " scd %2, %1 \n" |
97 | " .set mips0 \n" | 97 | " .set mips0 \n" |
98 | : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), | 98 | : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), |
99 | "=&r" (dummy) | 99 | "=&r" (dummy) |
100 | : GCC_OFF12_ASM() (*m), "Jr" (val) | 100 | : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) |
101 | : "memory"); | 101 | : "memory"); |
102 | } while (unlikely(!dummy)); | 102 | } while (unlikely(!dummy)); |
103 | } else { | 103 | } else { |
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz | |||
158 | " beqzl $1, 1b \n" \ | 158 | " beqzl $1, 1b \n" \ |
159 | "2: \n" \ | 159 | "2: \n" \ |
160 | " .set pop \n" \ | 160 | " .set pop \n" \ |
161 | : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ | 161 | : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ |
162 | : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ | 162 | : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ |
163 | : "memory"); \ | 163 | : "memory"); \ |
164 | } else if (kernel_uses_llsc) { \ | 164 | } else if (kernel_uses_llsc) { \ |
165 | __asm__ __volatile__( \ | 165 | __asm__ __volatile__( \ |
166 | " .set push \n" \ | 166 | " .set push \n" \ |
167 | " .set noat \n" \ | 167 | " .set noat \n" \ |
168 | " .set arch=r4000 \n" \ | 168 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ |
169 | "1: " ld " %0, %2 # __cmpxchg_asm \n" \ | 169 | "1: " ld " %0, %2 # __cmpxchg_asm \n" \ |
170 | " bne %0, %z3, 2f \n" \ | 170 | " bne %0, %z3, 2f \n" \ |
171 | " .set mips0 \n" \ | 171 | " .set mips0 \n" \ |
172 | " move $1, %z4 \n" \ | 172 | " move $1, %z4 \n" \ |
173 | " .set arch=r4000 \n" \ | 173 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ |
174 | " " st " $1, %1 \n" \ | 174 | " " st " $1, %1 \n" \ |
175 | " beqz $1, 1b \n" \ | 175 | " beqz $1, 1b \n" \ |
176 | " .set pop \n" \ | 176 | " .set pop \n" \ |
177 | "2: \n" \ | 177 | "2: \n" \ |
178 | : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ | 178 | : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ |
179 | : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ | 179 | : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ |
180 | : "memory"); \ | 180 | : "memory"); \ |
181 | } else { \ | 181 | } else { \ |
182 | unsigned long __flags; \ | 182 | unsigned long __flags; \ |
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index c73815e0123a..e081a265f422 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h | |||
@@ -16,12 +16,30 @@ | |||
16 | #define GCC_REG_ACCUM "accum" | 16 | #define GCC_REG_ACCUM "accum" |
17 | #endif | 17 | #endif |
18 | 18 | ||
19 | #ifdef CONFIG_CPU_MIPSR6 | ||
20 | /* All MIPS R6 toolchains support the ZC constrain */ | ||
21 | #define GCC_OFF_SMALL_ASM() "ZC" | ||
22 | #else | ||
19 | #ifndef CONFIG_CPU_MICROMIPS | 23 | #ifndef CONFIG_CPU_MICROMIPS |
20 | #define GCC_OFF12_ASM() "R" | 24 | #define GCC_OFF_SMALL_ASM() "R" |
21 | #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) | 25 | #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) |
22 | #define GCC_OFF12_ASM() "ZC" | 26 | #define GCC_OFF_SMALL_ASM() "ZC" |
23 | #else | 27 | #else |
24 | #error "microMIPS compilation unsupported with GCC older than 4.9" | 28 | #error "microMIPS compilation unsupported with GCC older than 4.9" |
25 | #endif | 29 | #endif /* CONFIG_CPU_MICROMIPS */ |
30 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
31 | |||
32 | #ifdef CONFIG_CPU_MIPSR6 | ||
33 | #define MIPS_ISA_LEVEL "mips64r6" | ||
34 | #define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL | ||
35 | #define MIPS_ISA_LEVEL_RAW mips64r6 | ||
36 | #define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW | ||
37 | #else | ||
38 | /* MIPS64 is a superset of MIPS32 */ | ||
39 | #define MIPS_ISA_LEVEL "mips64r2" | ||
40 | #define MIPS_ISA_ARCH_LEVEL "arch=r4000" | ||
41 | #define MIPS_ISA_LEVEL_RAW mips64r2 | ||
42 | #define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW | ||
43 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
26 | 44 | ||
27 | #endif /* _ASM_COMPILER_H */ | 45 | #endif /* _ASM_COMPILER_H */ |
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 2897cfafcaf0..0d8208de9a3f 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h | |||
@@ -38,6 +38,9 @@ | |||
38 | #ifndef cpu_has_maar | 38 | #ifndef cpu_has_maar |
39 | #define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) | 39 | #define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) |
40 | #endif | 40 | #endif |
41 | #ifndef cpu_has_rw_llb | ||
42 | #define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB) | ||
43 | #endif | ||
41 | 44 | ||
42 | /* | 45 | /* |
43 | * For the moment we don't consider R6000 and R8000 so we can assume that | 46 | * For the moment we don't consider R6000 and R8000 so we can assume that |
@@ -171,6 +174,9 @@ | |||
171 | #endif | 174 | #endif |
172 | #endif | 175 | #endif |
173 | 176 | ||
177 | #ifndef cpu_has_mips_1 | ||
178 | # define cpu_has_mips_1 (!cpu_has_mips_r6) | ||
179 | #endif | ||
174 | #ifndef cpu_has_mips_2 | 180 | #ifndef cpu_has_mips_2 |
175 | # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) | 181 | # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) |
176 | #endif | 182 | #endif |
@@ -189,12 +195,18 @@ | |||
189 | #ifndef cpu_has_mips32r2 | 195 | #ifndef cpu_has_mips32r2 |
190 | # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) | 196 | # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) |
191 | #endif | 197 | #endif |
198 | #ifndef cpu_has_mips32r6 | ||
199 | # define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6) | ||
200 | #endif | ||
192 | #ifndef cpu_has_mips64r1 | 201 | #ifndef cpu_has_mips64r1 |
193 | # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) | 202 | # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) |
194 | #endif | 203 | #endif |
195 | #ifndef cpu_has_mips64r2 | 204 | #ifndef cpu_has_mips64r2 |
196 | # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) | 205 | # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) |
197 | #endif | 206 | #endif |
207 | #ifndef cpu_has_mips64r6 | ||
208 | # define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6) | ||
209 | #endif | ||
198 | 210 | ||
199 | /* | 211 | /* |
200 | * Shortcuts ... | 212 | * Shortcuts ... |
@@ -208,17 +220,23 @@ | |||
208 | #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) | 220 | #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) |
209 | #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) | 221 | #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) |
210 | 222 | ||
211 | #define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) | 223 | #define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \ |
224 | cpu_has_mips_r6) | ||
212 | 225 | ||
213 | #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) | 226 | #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6) |
214 | #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) | 227 | #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6) |
215 | #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) | 228 | #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) |
216 | #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) | 229 | #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) |
230 | #define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6) | ||
217 | #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ | 231 | #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ |
218 | cpu_has_mips64r1 | cpu_has_mips64r2) | 232 | cpu_has_mips32r6 | cpu_has_mips64r1 | \ |
233 | cpu_has_mips64r2 | cpu_has_mips64r6) | ||
234 | |||
235 | /* MIPSR2 and MIPSR6 have a lot of similarities */ | ||
236 | #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) | ||
219 | 237 | ||
220 | #ifndef cpu_has_mips_r2_exec_hazard | 238 | #ifndef cpu_has_mips_r2_exec_hazard |
221 | #define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 | 239 | #define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6) |
222 | #endif | 240 | #endif |
223 | 241 | ||
224 | /* | 242 | /* |
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a6c9ccb33c5c..c3f4f2d2e108 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h | |||
@@ -84,6 +84,11 @@ struct cpuinfo_mips { | |||
84 | * (shifted by _CACHE_SHIFT) | 84 | * (shifted by _CACHE_SHIFT) |
85 | */ | 85 | */ |
86 | unsigned int writecombine; | 86 | unsigned int writecombine; |
87 | /* | ||
88 | * Simple counter to prevent enabling HTW in nested | ||
89 | * htw_start/htw_stop calls | ||
90 | */ | ||
91 | unsigned int htw_seq; | ||
87 | } __attribute__((aligned(SMP_CACHE_BYTES))); | 92 | } __attribute__((aligned(SMP_CACHE_BYTES))); |
88 | 93 | ||
89 | extern struct cpuinfo_mips cpu_data[]; | 94 | extern struct cpuinfo_mips cpu_data[]; |
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index b4e2bd87df50..8245875f8b33 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h | |||
@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type) | |||
54 | case CPU_M5150: | 54 | case CPU_M5150: |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | #if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \ | ||
58 | defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \ | ||
59 | defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \ | ||
60 | defined(CONFIG_SYS_HAS_CPU_MIPS64_R6) | ||
61 | case CPU_QEMU_GENERIC: | ||
62 | #endif | ||
63 | |||
57 | #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 | 64 | #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 |
58 | case CPU_5KC: | 65 | case CPU_5KC: |
59 | case CPU_5KE: | 66 | case CPU_5KE: |
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 33866fce4d63..15687234d70a 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h | |||
@@ -93,6 +93,7 @@ | |||
93 | * These are the PRID's for when 23:16 == PRID_COMP_MIPS | 93 | * These are the PRID's for when 23:16 == PRID_COMP_MIPS |
94 | */ | 94 | */ |
95 | 95 | ||
96 | #define PRID_IMP_QEMU_GENERIC 0x0000 | ||
96 | #define PRID_IMP_4KC 0x8000 | 97 | #define PRID_IMP_4KC 0x8000 |
97 | #define PRID_IMP_5KC 0x8100 | 98 | #define PRID_IMP_5KC 0x8100 |
98 | #define PRID_IMP_20KC 0x8200 | 99 | #define PRID_IMP_20KC 0x8200 |
@@ -312,6 +313,8 @@ enum cpu_type_enum { | |||
312 | CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, | 313 | CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, |
313 | CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, | 314 | CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, |
314 | 315 | ||
316 | CPU_QEMU_GENERIC, | ||
317 | |||
315 | CPU_LAST | 318 | CPU_LAST |
316 | }; | 319 | }; |
317 | 320 | ||
@@ -329,11 +332,14 @@ enum cpu_type_enum { | |||
329 | #define MIPS_CPU_ISA_M32R2 0x00000020 | 332 | #define MIPS_CPU_ISA_M32R2 0x00000020 |
330 | #define MIPS_CPU_ISA_M64R1 0x00000040 | 333 | #define MIPS_CPU_ISA_M64R1 0x00000040 |
331 | #define MIPS_CPU_ISA_M64R2 0x00000080 | 334 | #define MIPS_CPU_ISA_M64R2 0x00000080 |
335 | #define MIPS_CPU_ISA_M32R6 0x00000100 | ||
336 | #define MIPS_CPU_ISA_M64R6 0x00000200 | ||
332 | 337 | ||
333 | #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ | 338 | #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ |
334 | MIPS_CPU_ISA_M32R2) | 339 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6) |
335 | #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ | 340 | #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ |
336 | MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) | 341 | MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \ |
342 | MIPS_CPU_ISA_M64R6) | ||
337 | 343 | ||
338 | /* | 344 | /* |
339 | * CPU Option encodings | 345 | * CPU Option encodings |
@@ -370,6 +376,7 @@ enum cpu_type_enum { | |||
370 | #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ | 376 | #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ |
371 | #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ | 377 | #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ |
372 | #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ | 378 | #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ |
379 | #define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ | ||
373 | 380 | ||
374 | /* | 381 | /* |
375 | * CPU ASE encodings | 382 | * CPU ASE encodings |
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h index ae6fedcb0060..94105d3f58f4 100644 --- a/arch/mips/include/asm/edac.h +++ b/arch/mips/include/asm/edac.h | |||
@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size) | |||
26 | " sc %0, %1 \n" | 26 | " sc %0, %1 \n" |
27 | " beqz %0, 1b \n" | 27 | " beqz %0, 1b \n" |
28 | " .set mips0 \n" | 28 | " .set mips0 \n" |
29 | : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) | 29 | : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr) |
30 | : GCC_OFF12_ASM() (*virt_addr)); | 30 | : GCC_OFF_SMALL_ASM() (*virt_addr)); |
31 | 31 | ||
32 | virt_addr++; | 32 | virt_addr++; |
33 | } | 33 | } |
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index eb4d95de619c..535f196ffe02 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h | |||
@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm); | |||
417 | struct arch_elf_state { | 417 | struct arch_elf_state { |
418 | int fp_abi; | 418 | int fp_abi; |
419 | int interp_fp_abi; | 419 | int interp_fp_abi; |
420 | int overall_abi; | 420 | int overall_fp_mode; |
421 | }; | 421 | }; |
422 | 422 | ||
423 | #define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */ | ||
424 | |||
423 | #define INIT_ARCH_ELF_STATE { \ | 425 | #define INIT_ARCH_ELF_STATE { \ |
424 | .fp_abi = -1, \ | 426 | .fp_abi = MIPS_ABI_FP_UNKNOWN, \ |
425 | .interp_fp_abi = -1, \ | 427 | .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \ |
426 | .overall_abi = -1, \ | 428 | .overall_fp_mode = -1, \ |
427 | } | 429 | } |
428 | 430 | ||
429 | extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, | 431 | extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, |
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index affebb78f5d6..dd083e999b08 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h | |||
@@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode) | |||
68 | goto fr_common; | 68 | goto fr_common; |
69 | 69 | ||
70 | case FPU_64BIT: | 70 | case FPU_64BIT: |
71 | #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) | 71 | #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ |
72 | || defined(CONFIG_64BIT)) | ||
72 | /* we only have a 32-bit FPU */ | 73 | /* we only have a 32-bit FPU */ |
73 | return SIGFPE; | 74 | return SIGFPE; |
74 | #endif | 75 | #endif |
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index ef9987a61d88..1de190bdfb9c 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h | |||
@@ -45,19 +45,19 @@ | |||
45 | " "__UA_ADDR "\t2b, 4b \n" \ | 45 | " "__UA_ADDR "\t2b, 4b \n" \ |
46 | " .previous \n" \ | 46 | " .previous \n" \ |
47 | : "=r" (ret), "=&r" (oldval), \ | 47 | : "=r" (ret), "=&r" (oldval), \ |
48 | "=" GCC_OFF12_ASM() (*uaddr) \ | 48 | "=" GCC_OFF_SMALL_ASM() (*uaddr) \ |
49 | : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ | 49 | : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ |
50 | "i" (-EFAULT) \ | 50 | "i" (-EFAULT) \ |
51 | : "memory"); \ | 51 | : "memory"); \ |
52 | } else if (cpu_has_llsc) { \ | 52 | } else if (cpu_has_llsc) { \ |
53 | __asm__ __volatile__( \ | 53 | __asm__ __volatile__( \ |
54 | " .set push \n" \ | 54 | " .set push \n" \ |
55 | " .set noat \n" \ | 55 | " .set noat \n" \ |
56 | " .set arch=r4000 \n" \ | 56 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ |
57 | "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ | 57 | "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ |
58 | " .set mips0 \n" \ | 58 | " .set mips0 \n" \ |
59 | " " insn " \n" \ | 59 | " " insn " \n" \ |
60 | " .set arch=r4000 \n" \ | 60 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ |
61 | "2: "user_sc("$1", "%2")" \n" \ | 61 | "2: "user_sc("$1", "%2")" \n" \ |
62 | " beqz $1, 1b \n" \ | 62 | " beqz $1, 1b \n" \ |
63 | __WEAK_LLSC_MB \ | 63 | __WEAK_LLSC_MB \ |
@@ -74,8 +74,8 @@ | |||
74 | " "__UA_ADDR "\t2b, 4b \n" \ | 74 | " "__UA_ADDR "\t2b, 4b \n" \ |
75 | " .previous \n" \ | 75 | " .previous \n" \ |
76 | : "=r" (ret), "=&r" (oldval), \ | 76 | : "=r" (ret), "=&r" (oldval), \ |
77 | "=" GCC_OFF12_ASM() (*uaddr) \ | 77 | "=" GCC_OFF_SMALL_ASM() (*uaddr) \ |
78 | : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ | 78 | : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ |
79 | "i" (-EFAULT) \ | 79 | "i" (-EFAULT) \ |
80 | : "memory"); \ | 80 | : "memory"); \ |
81 | } else \ | 81 | } else \ |
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
174 | " "__UA_ADDR "\t1b, 4b \n" | 174 | " "__UA_ADDR "\t1b, 4b \n" |
175 | " "__UA_ADDR "\t2b, 4b \n" | 175 | " "__UA_ADDR "\t2b, 4b \n" |
176 | " .previous \n" | 176 | " .previous \n" |
177 | : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) | 177 | : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) |
178 | : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), | 178 | : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), |
179 | "i" (-EFAULT) | 179 | "i" (-EFAULT) |
180 | : "memory"); | 180 | : "memory"); |
181 | } else if (cpu_has_llsc) { | 181 | } else if (cpu_has_llsc) { |
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
183 | "# futex_atomic_cmpxchg_inatomic \n" | 183 | "# futex_atomic_cmpxchg_inatomic \n" |
184 | " .set push \n" | 184 | " .set push \n" |
185 | " .set noat \n" | 185 | " .set noat \n" |
186 | " .set arch=r4000 \n" | 186 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
187 | "1: "user_ll("%1", "%3")" \n" | 187 | "1: "user_ll("%1", "%3")" \n" |
188 | " bne %1, %z4, 3f \n" | 188 | " bne %1, %z4, 3f \n" |
189 | " .set mips0 \n" | 189 | " .set mips0 \n" |
190 | " move $1, %z5 \n" | 190 | " move $1, %z5 \n" |
191 | " .set arch=r4000 \n" | 191 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
192 | "2: "user_sc("$1", "%2")" \n" | 192 | "2: "user_sc("$1", "%2")" \n" |
193 | " beqz $1, 1b \n" | 193 | " beqz $1, 1b \n" |
194 | __WEAK_LLSC_MB | 194 | __WEAK_LLSC_MB |
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
203 | " "__UA_ADDR "\t1b, 4b \n" | 203 | " "__UA_ADDR "\t1b, 4b \n" |
204 | " "__UA_ADDR "\t2b, 4b \n" | 204 | " "__UA_ADDR "\t2b, 4b \n" |
205 | " .previous \n" | 205 | " .previous \n" |
206 | : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) | 206 | : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) |
207 | : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), | 207 | : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), |
208 | "i" (-EFAULT) | 208 | "i" (-EFAULT) |
209 | : "memory"); | 209 | : "memory"); |
210 | } else | 210 | } else |
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h index 4be1a57cdbb0..71a986e9b694 100644 --- a/arch/mips/include/asm/gio_device.h +++ b/arch/mips/include/asm/gio_device.h | |||
@@ -25,8 +25,6 @@ struct gio_driver { | |||
25 | 25 | ||
26 | int (*probe)(struct gio_device *, const struct gio_device_id *); | 26 | int (*probe)(struct gio_device *, const struct gio_device_id *); |
27 | void (*remove)(struct gio_device *); | 27 | void (*remove)(struct gio_device *); |
28 | int (*suspend)(struct gio_device *, pm_message_t); | ||
29 | int (*resume)(struct gio_device *); | ||
30 | void (*shutdown)(struct gio_device *); | 28 | void (*shutdown)(struct gio_device *); |
31 | 29 | ||
32 | struct device_driver driver; | 30 | struct device_driver driver; |
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index e3ee92d4dbe7..4087b47ad1cb 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define _ASM_HAZARDS_H | 11 | #define _ASM_HAZARDS_H |
12 | 12 | ||
13 | #include <linux/stringify.h> | 13 | #include <linux/stringify.h> |
14 | #include <asm/compiler.h> | ||
14 | 15 | ||
15 | #define ___ssnop \ | 16 | #define ___ssnop \ |
16 | sll $0, $0, 1 | 17 | sll $0, $0, 1 |
@@ -21,7 +22,7 @@ | |||
21 | /* | 22 | /* |
22 | * TLB hazards | 23 | * TLB hazards |
23 | */ | 24 | */ |
24 | #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) | 25 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON) |
25 | 26 | ||
26 | /* | 27 | /* |
27 | * MIPSR2 defines ehb for hazard avoidance | 28 | * MIPSR2 defines ehb for hazard avoidance |
@@ -58,7 +59,7 @@ do { \ | |||
58 | unsigned long tmp; \ | 59 | unsigned long tmp; \ |
59 | \ | 60 | \ |
60 | __asm__ __volatile__( \ | 61 | __asm__ __volatile__( \ |
61 | " .set mips64r2 \n" \ | 62 | " .set "MIPS_ISA_LEVEL" \n" \ |
62 | " dla %0, 1f \n" \ | 63 | " dla %0, 1f \n" \ |
63 | " jr.hb %0 \n" \ | 64 | " jr.hb %0 \n" \ |
64 | " .set mips0 \n" \ | 65 | " .set mips0 \n" \ |
@@ -132,7 +133,7 @@ do { \ | |||
132 | 133 | ||
133 | #define instruction_hazard() \ | 134 | #define instruction_hazard() \ |
134 | do { \ | 135 | do { \ |
135 | if (cpu_has_mips_r2) \ | 136 | if (cpu_has_mips_r2_r6) \ |
136 | __instruction_hazard(); \ | 137 | __instruction_hazard(); \ |
137 | } while (0) | 138 | } while (0) |
138 | 139 | ||
@@ -240,7 +241,7 @@ do { \ | |||
240 | 241 | ||
241 | #define __disable_fpu_hazard | 242 | #define __disable_fpu_hazard |
242 | 243 | ||
243 | #elif defined(CONFIG_CPU_MIPSR2) | 244 | #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
244 | 245 | ||
245 | #define __enable_fpu_hazard \ | 246 | #define __enable_fpu_hazard \ |
246 | ___ehb | 247 | ___ehb |
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 0fa5fdcd1f01..d60cc68fa31e 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h | |||
@@ -15,9 +15,10 @@ | |||
15 | 15 | ||
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/stringify.h> | 17 | #include <linux/stringify.h> |
18 | #include <asm/compiler.h> | ||
18 | #include <asm/hazards.h> | 19 | #include <asm/hazards.h> |
19 | 20 | ||
20 | #ifdef CONFIG_CPU_MIPSR2 | 21 | #if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6) |
21 | 22 | ||
22 | static inline void arch_local_irq_disable(void) | 23 | static inline void arch_local_irq_disable(void) |
23 | { | 24 | { |
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void); | |||
118 | unsigned long arch_local_irq_save(void); | 119 | unsigned long arch_local_irq_save(void); |
119 | void arch_local_irq_restore(unsigned long flags); | 120 | void arch_local_irq_restore(unsigned long flags); |
120 | void __arch_local_irq_restore(unsigned long flags); | 121 | void __arch_local_irq_restore(unsigned long flags); |
121 | #endif /* CONFIG_CPU_MIPSR2 */ | 122 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ |
122 | 123 | ||
123 | static inline void arch_local_irq_enable(void) | 124 | static inline void arch_local_irq_enable(void) |
124 | { | 125 | { |
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void) | |||
126 | " .set push \n" | 127 | " .set push \n" |
127 | " .set reorder \n" | 128 | " .set reorder \n" |
128 | " .set noat \n" | 129 | " .set noat \n" |
129 | #if defined(CONFIG_CPU_MIPSR2) | 130 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
130 | " ei \n" | 131 | " ei \n" |
131 | #else | 132 | #else |
132 | " mfc0 $1,$12 \n" | 133 | " mfc0 $1,$12 \n" |
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index 46dfc3c1fd49..8feaed62a2ab 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/bitops.h> | 5 | #include <linux/bitops.h> |
6 | #include <linux/atomic.h> | 6 | #include <linux/atomic.h> |
7 | #include <asm/cmpxchg.h> | 7 | #include <asm/cmpxchg.h> |
8 | #include <asm/compiler.h> | ||
8 | #include <asm/war.h> | 9 | #include <asm/war.h> |
9 | 10 | ||
10 | typedef struct | 11 | typedef struct |
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l) | |||
47 | unsigned long temp; | 48 | unsigned long temp; |
48 | 49 | ||
49 | __asm__ __volatile__( | 50 | __asm__ __volatile__( |
50 | " .set arch=r4000 \n" | 51 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
51 | "1:" __LL "%1, %2 # local_add_return \n" | 52 | "1:" __LL "%1, %2 # local_add_return \n" |
52 | " addu %0, %1, %3 \n" | 53 | " addu %0, %1, %3 \n" |
53 | __SC "%0, %2 \n" | 54 | __SC "%0, %2 \n" |
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l) | |||
92 | unsigned long temp; | 93 | unsigned long temp; |
93 | 94 | ||
94 | __asm__ __volatile__( | 95 | __asm__ __volatile__( |
95 | " .set arch=r4000 \n" | 96 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
96 | "1:" __LL "%1, %2 # local_sub_return \n" | 97 | "1:" __LL "%1, %2 # local_sub_return \n" |
97 | " subu %0, %1, %3 \n" | 98 | " subu %0, %1, %3 \n" |
98 | __SC "%0, %2 \n" | 99 | __SC "%0, %2 \n" |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 1668ee57acb9..cf92fe733995 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h | |||
@@ -8,11 +8,10 @@ | |||
8 | #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H | 8 | #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H |
9 | #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H | 9 | #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H |
10 | 10 | ||
11 | |||
12 | #define CP0_CYCLE_COUNTER $9, 6 | ||
13 | #define CP0_CVMCTL_REG $9, 7 | 11 | #define CP0_CVMCTL_REG $9, 7 |
14 | #define CP0_CVMMEMCTL_REG $11,7 | 12 | #define CP0_CVMMEMCTL_REG $11,7 |
15 | #define CP0_PRID_REG $15, 0 | 13 | #define CP0_PRID_REG $15, 0 |
14 | #define CP0_DCACHE_ERR_REG $27, 1 | ||
16 | #define CP0_PRID_OCTEON_PASS1 0x000d0000 | 15 | #define CP0_PRID_OCTEON_PASS1 0x000d0000 |
17 | #define CP0_PRID_OCTEON_CN30XX 0x000d0200 | 16 | #define CP0_PRID_OCTEON_CN30XX 0x000d0200 |
18 | 17 | ||
@@ -38,36 +37,55 @@ | |||
38 | # Needed for octeon specific memcpy | 37 | # Needed for octeon specific memcpy |
39 | or v0, v0, 0x5001 | 38 | or v0, v0, 0x5001 |
40 | xor v0, v0, 0x1001 | 39 | xor v0, v0, 0x1001 |
41 | # Read the processor ID register | ||
42 | mfc0 v1, CP0_PRID_REG | ||
43 | # Disable instruction prefetching (Octeon Pass1 errata) | ||
44 | or v0, v0, 0x2000 | ||
45 | # Skip reenable of prefetching for Octeon Pass1 | ||
46 | beq v1, CP0_PRID_OCTEON_PASS1, skip | ||
47 | nop | ||
48 | # Reenable instruction prefetching, not on Pass1 | ||
49 | xor v0, v0, 0x2000 | ||
50 | # Strip off pass number off of processor id | ||
51 | srl v1, 8 | ||
52 | sll v1, 8 | ||
53 | # CN30XX needs some extra stuff turned off for better performance | ||
54 | bne v1, CP0_PRID_OCTEON_CN30XX, skip | ||
55 | nop | ||
56 | # CN30XX Use random Icache replacement | ||
57 | or v0, v0, 0x400 | ||
58 | # CN30XX Disable instruction prefetching | ||
59 | or v0, v0, 0x2000 | ||
60 | skip: | ||
61 | # First clear off CvmCtl[IPPCI] bit and move the performance | 40 | # First clear off CvmCtl[IPPCI] bit and move the performance |
62 | # counters interrupt to IRQ 6 | 41 | # counters interrupt to IRQ 6 |
63 | li v1, ~(7 << 7) | 42 | dli v1, ~(7 << 7) |
64 | and v0, v0, v1 | 43 | and v0, v0, v1 |
65 | ori v0, v0, (6 << 7) | 44 | ori v0, v0, (6 << 7) |
45 | |||
46 | mfc0 v1, CP0_PRID_REG | ||
47 | and t1, v1, 0xfff8 | ||
48 | xor t1, t1, 0x9000 # 63-P1 | ||
49 | beqz t1, 4f | ||
50 | and t1, v1, 0xfff8 | ||
51 | xor t1, t1, 0x9008 # 63-P2 | ||
52 | beqz t1, 4f | ||
53 | and t1, v1, 0xfff8 | ||
54 | xor t1, t1, 0x9100 # 68-P1 | ||
55 | beqz t1, 4f | ||
56 | and t1, v1, 0xff00 | ||
57 | xor t1, t1, 0x9200 # 66-PX | ||
58 | bnez t1, 5f # Skip WAR for others. | ||
59 | and t1, v1, 0x00ff | ||
60 | slti t1, t1, 2 # 66-P1.2 and later good. | ||
61 | beqz t1, 5f | ||
62 | |||
63 | 4: # core-16057 work around | ||
64 | or v0, v0, 0x2000 # Set IPREF bit. | ||
65 | |||
66 | 5: # No core-16057 work around | ||
66 | # Write the cavium control register | 67 | # Write the cavium control register |
67 | dmtc0 v0, CP0_CVMCTL_REG | 68 | dmtc0 v0, CP0_CVMCTL_REG |
68 | sync | 69 | sync |
69 | # Flush dcache after config change | 70 | # Flush dcache after config change |
70 | cache 9, 0($0) | 71 | cache 9, 0($0) |
72 | # Zero all of CVMSEG to make sure parity is correct | ||
73 | dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE | ||
74 | dsll v0, 7 | ||
75 | beqz v0, 2f | ||
76 | 1: dsubu v0, 8 | ||
77 | sd $0, -32768(v0) | ||
78 | bnez v0, 1b | ||
79 | 2: | ||
80 | mfc0 v0, CP0_PRID_REG | ||
81 | bbit0 v0, 15, 1f | ||
82 | # OCTEON II or better have bit 15 set. Clear the error bits. | ||
83 | and t1, v0, 0xff00 | ||
84 | dli v0, 0x9500 | ||
85 | bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0 | ||
86 | dli v0, 0x27 | ||
87 | dmtc0 v0, CP0_DCACHE_ERR_REG | ||
88 | 1: | ||
71 | # Get my core id | 89 | # Get my core id |
72 | rdhwr v0, $0 | 90 | rdhwr v0, $0 |
73 | # Jump the master to kernel_entry | 91 | # Jump the master to kernel_entry |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h index eb72b35cf04b..35c80be92207 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/war.h +++ b/arch/mips/include/asm/mach-cavium-octeon/war.h | |||
@@ -22,4 +22,7 @@ | |||
22 | #define R10000_LLSC_WAR 0 | 22 | #define R10000_LLSC_WAR 0 |
23 | #define MIPS34K_MISSED_ITLB_WAR 0 | 23 | #define MIPS34K_MISSED_ITLB_WAR 0 |
24 | 24 | ||
25 | #define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \ | ||
26 | OCTEON_IS_MODEL(OCTEON_CN6XXX) | ||
27 | |||
25 | #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */ | 28 | #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */ |
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h index 986982db7c38..79cff26d8b36 100644 --- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h +++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h | |||
@@ -27,8 +27,6 @@ struct jz_nand_platform_data { | |||
27 | 27 | ||
28 | struct nand_ecclayout *ecc_layout; | 28 | struct nand_ecclayout *ecc_layout; |
29 | 29 | ||
30 | unsigned int busy_gpio; | ||
31 | |||
32 | unsigned char banks[JZ_NAND_NUM_BANKS]; | 30 | unsigned char banks[JZ_NAND_NUM_BANKS]; |
33 | 31 | ||
34 | void (*ident_callback)(struct platform_device *, struct nand_chip *, | 32 | void (*ident_callback)(struct platform_device *, struct nand_chip *, |
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h index 2e54b4bff5cf..90dbe43c8d27 100644 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h +++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h | |||
@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr, | |||
85 | " "__beqz"%0, 1b \n" | 85 | " "__beqz"%0, 1b \n" |
86 | " nop \n" | 86 | " nop \n" |
87 | " .set pop \n" | 87 | " .set pop \n" |
88 | : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) | 88 | : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) |
89 | : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); | 89 | : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr)); |
90 | } | 90 | } |
91 | 91 | ||
92 | /* | 92 | /* |
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr, | |||
106 | " "__beqz"%0, 1b \n" | 106 | " "__beqz"%0, 1b \n" |
107 | " nop \n" | 107 | " nop \n" |
108 | " .set pop \n" | 108 | " .set pop \n" |
109 | : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) | 109 | : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) |
110 | : "ir" (mask), GCC_OFF12_ASM() (*addr)); | 110 | : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); |
111 | } | 111 | } |
112 | 112 | ||
113 | /* | 113 | /* |
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr, | |||
127 | " "__beqz"%0, 1b \n" | 127 | " "__beqz"%0, 1b \n" |
128 | " nop \n" | 128 | " nop \n" |
129 | " .set pop \n" | 129 | " .set pop \n" |
130 | : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) | 130 | : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) |
131 | : "ir" (~mask), GCC_OFF12_ASM() (*addr)); | 131 | : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr)); |
132 | } | 132 | } |
133 | 133 | ||
134 | /* | 134 | /* |
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr, | |||
148 | " "__beqz"%0, 1b \n" | 148 | " "__beqz"%0, 1b \n" |
149 | " nop \n" | 149 | " nop \n" |
150 | " .set pop \n" | 150 | " .set pop \n" |
151 | : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) | 151 | : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) |
152 | : "ir" (mask), GCC_OFF12_ASM() (*addr)); | 152 | : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | 155 | /* |
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) | |||
220 | " .set arch=r4000 \n" \ | 220 | " .set arch=r4000 \n" \ |
221 | "1: ll %0, %1 #custom_read_reg32 \n" \ | 221 | "1: ll %0, %1 #custom_read_reg32 \n" \ |
222 | " .set pop \n" \ | 222 | " .set pop \n" \ |
223 | : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ | 223 | : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ |
224 | : GCC_OFF12_ASM() (*address)) | 224 | : GCC_OFF_SMALL_ASM() (*address)) |
225 | 225 | ||
226 | #define custom_write_reg32(address, tmp) \ | 226 | #define custom_write_reg32(address, tmp) \ |
227 | __asm__ __volatile__( \ | 227 | __asm__ __volatile__( \ |
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) | |||
231 | " "__beqz"%0, 1b \n" \ | 231 | " "__beqz"%0, 1b \n" \ |
232 | " nop \n" \ | 232 | " nop \n" \ |
233 | " .set pop \n" \ | 233 | " .set pop \n" \ |
234 | : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ | 234 | : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ |
235 | : "0" (tmp), GCC_OFF12_ASM() (*address)) | 235 | : "0" (tmp), GCC_OFF_SMALL_ASM() (*address)) |
236 | 236 | ||
237 | #endif /* __ASM_REGOPS_H__ */ | 237 | #endif /* __ASM_REGOPS_H__ */ |
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h new file mode 100644 index 000000000000..60570f2c3ba2 --- /dev/null +++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2014 Imagination Technologies Ltd. | ||
7 | * Author: Markos Chandras <markos.chandras@imgtec.com> | ||
8 | */ | ||
9 | |||
10 | #ifndef __ASM_MIPS_R2_TO_R6_EMUL_H | ||
11 | #define __ASM_MIPS_R2_TO_R6_EMUL_H | ||
12 | |||
13 | struct mips_r2_emulator_stats { | ||
14 | u64 movs; | ||
15 | u64 hilo; | ||
16 | u64 muls; | ||
17 | u64 divs; | ||
18 | u64 dsps; | ||
19 | u64 bops; | ||
20 | u64 traps; | ||
21 | u64 fpus; | ||
22 | u64 loads; | ||
23 | u64 stores; | ||
24 | u64 llsc; | ||
25 | u64 dsemul; | ||
26 | }; | ||
27 | |||
28 | struct mips_r2br_emulator_stats { | ||
29 | u64 jrs; | ||
30 | u64 bltzl; | ||
31 | u64 bgezl; | ||
32 | u64 bltzll; | ||
33 | u64 bgezll; | ||
34 | u64 bltzall; | ||
35 | u64 bgezall; | ||
36 | u64 bltzal; | ||
37 | u64 bgezal; | ||
38 | u64 beql; | ||
39 | u64 bnel; | ||
40 | u64 blezl; | ||
41 | u64 bgtzl; | ||
42 | }; | ||
43 | |||
44 | #ifdef CONFIG_DEBUG_FS | ||
45 | |||
46 | #define MIPS_R2_STATS(M) \ | ||
47 | do { \ | ||
48 | u32 nir; \ | ||
49 | int err; \ | ||
50 | \ | ||
51 | preempt_disable(); \ | ||
52 | __this_cpu_inc(mipsr2emustats.M); \ | ||
53 | err = __get_user(nir, (u32 __user *)regs->cp0_epc); \ | ||
54 | if (!err) { \ | ||
55 | if (nir == BREAK_MATH) \ | ||
56 | __this_cpu_inc(mipsr2bdemustats.M); \ | ||
57 | } \ | ||
58 | preempt_enable(); \ | ||
59 | } while (0) | ||
60 | |||
61 | #define MIPS_R2BR_STATS(M) \ | ||
62 | do { \ | ||
63 | preempt_disable(); \ | ||
64 | __this_cpu_inc(mipsr2bremustats.M); \ | ||
65 | preempt_enable(); \ | ||
66 | } while (0) | ||
67 | |||
68 | #else | ||
69 | |||
70 | #define MIPS_R2_STATS(M) do { } while (0) | ||
71 | #define MIPS_R2BR_STATS(M) do { } while (0) | ||
72 | |||
73 | #endif /* CONFIG_DEBUG_FS */ | ||
74 | |||
75 | struct r2_decoder_table { | ||
76 | u32 mask; | ||
77 | u32 code; | ||
78 | int (*func)(struct pt_regs *regs, u32 inst); | ||
79 | }; | ||
80 | |||
81 | |||
82 | extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, | ||
83 | const char *str); | ||
84 | |||
85 | #ifndef CONFIG_MIPSR2_TO_R6_EMULATOR | ||
86 | static int mipsr2_emulation; | ||
87 | static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; }; | ||
88 | #else | ||
89 | /* MIPS R2 Emulator ON/OFF */ | ||
90 | extern int mipsr2_emulation; | ||
91 | extern int mipsr2_decoder(struct pt_regs *regs, u32 inst); | ||
92 | #endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */ | ||
93 | |||
94 | #define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation) | ||
95 | |||
96 | #endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */ | ||
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 5b720d8c2745..fef004434096 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -653,6 +653,7 @@ | |||
653 | #define MIPS_CONF5_NF (_ULCAST_(1) << 0) | 653 | #define MIPS_CONF5_NF (_ULCAST_(1) << 0) |
654 | #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) | 654 | #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) |
655 | #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) | 655 | #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) |
656 | #define MIPS_CONF5_LLB (_ULCAST_(1) << 4) | ||
656 | #define MIPS_CONF5_MVH (_ULCAST_(1) << 5) | 657 | #define MIPS_CONF5_MVH (_ULCAST_(1) << 5) |
657 | #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) | 658 | #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) |
658 | #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) | 659 | #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) |
@@ -1127,6 +1128,8 @@ do { \ | |||
1127 | #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) | 1128 | #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) |
1128 | #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) | 1129 | #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) |
1129 | 1130 | ||
1131 | #define read_c0_lladdr() __read_ulong_c0_register($17, 0) | ||
1132 | #define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val) | ||
1130 | #define read_c0_maar() __read_ulong_c0_register($17, 1) | 1133 | #define read_c0_maar() __read_ulong_c0_register($17, 1) |
1131 | #define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) | 1134 | #define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) |
1132 | #define read_c0_maari() __read_32bit_c0_register($17, 2) | 1135 | #define read_c0_maari() __read_32bit_c0_register($17, 2) |
@@ -1909,6 +1912,7 @@ __BUILD_SET_C0(config5) | |||
1909 | __BUILD_SET_C0(intcontrol) | 1912 | __BUILD_SET_C0(intcontrol) |
1910 | __BUILD_SET_C0(intctl) | 1913 | __BUILD_SET_C0(intctl) |
1911 | __BUILD_SET_C0(srsmap) | 1914 | __BUILD_SET_C0(srsmap) |
1915 | __BUILD_SET_C0(pagegrain) | ||
1912 | __BUILD_SET_C0(brcm_config_0) | 1916 | __BUILD_SET_C0(brcm_config_0) |
1913 | __BUILD_SET_C0(brcm_bus_pll) | 1917 | __BUILD_SET_C0(brcm_bus_pll) |
1914 | __BUILD_SET_C0(brcm_reset) | 1918 | __BUILD_SET_C0(brcm_reset) |
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index c436138945a8..1afa1f986df8 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h | |||
@@ -1,9 +1,12 @@ | |||
1 | #ifndef __ASM_MMU_H | 1 | #ifndef __ASM_MMU_H |
2 | #define __ASM_MMU_H | 2 | #define __ASM_MMU_H |
3 | 3 | ||
4 | #include <linux/atomic.h> | ||
5 | |||
4 | typedef struct { | 6 | typedef struct { |
5 | unsigned long asid[NR_CPUS]; | 7 | unsigned long asid[NR_CPUS]; |
6 | void *vdso; | 8 | void *vdso; |
9 | atomic_t fp_mode_switching; | ||
7 | } mm_context_t; | 10 | } mm_context_t; |
8 | 11 | ||
9 | #endif /* __ASM_MMU_H */ | 12 | #endif /* __ASM_MMU_H */ |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 2f82568a3ee4..45914b59824c 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -25,7 +25,6 @@ do { \ | |||
25 | if (cpu_has_htw) { \ | 25 | if (cpu_has_htw) { \ |
26 | write_c0_pwbase(pgd); \ | 26 | write_c0_pwbase(pgd); \ |
27 | back_to_back_c0_hazard(); \ | 27 | back_to_back_c0_hazard(); \ |
28 | htw_reset(); \ | ||
29 | } \ | 28 | } \ |
30 | } while (0) | 29 | } while (0) |
31 | 30 | ||
@@ -132,6 +131,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
132 | for_each_possible_cpu(i) | 131 | for_each_possible_cpu(i) |
133 | cpu_context(i, mm) = 0; | 132 | cpu_context(i, mm) = 0; |
134 | 133 | ||
134 | atomic_set(&mm->context.fp_mode_switching, 0); | ||
135 | |||
135 | return 0; | 136 | return 0; |
136 | } | 137 | } |
137 | 138 | ||
@@ -142,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
142 | unsigned long flags; | 143 | unsigned long flags; |
143 | local_irq_save(flags); | 144 | local_irq_save(flags); |
144 | 145 | ||
146 | htw_stop(); | ||
145 | /* Check if our ASID is of an older version and thus invalid */ | 147 | /* Check if our ASID is of an older version and thus invalid */ |
146 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) | 148 | if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) |
147 | get_new_mmu_context(next, cpu); | 149 | get_new_mmu_context(next, cpu); |
@@ -154,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
154 | */ | 156 | */ |
155 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | 157 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
156 | cpumask_set_cpu(cpu, mm_cpumask(next)); | 158 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
159 | htw_start(); | ||
157 | 160 | ||
158 | local_irq_restore(flags); | 161 | local_irq_restore(flags); |
159 | } | 162 | } |
@@ -180,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
180 | 183 | ||
181 | local_irq_save(flags); | 184 | local_irq_save(flags); |
182 | 185 | ||
186 | htw_stop(); | ||
183 | /* Unconditionally get a new ASID. */ | 187 | /* Unconditionally get a new ASID. */ |
184 | get_new_mmu_context(next, cpu); | 188 | get_new_mmu_context(next, cpu); |
185 | 189 | ||
@@ -189,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
189 | /* mark mmu ownership change */ | 193 | /* mark mmu ownership change */ |
190 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | 194 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
191 | cpumask_set_cpu(cpu, mm_cpumask(next)); | 195 | cpumask_set_cpu(cpu, mm_cpumask(next)); |
196 | htw_start(); | ||
192 | 197 | ||
193 | local_irq_restore(flags); | 198 | local_irq_restore(flags); |
194 | } | 199 | } |
@@ -203,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) | |||
203 | unsigned long flags; | 208 | unsigned long flags; |
204 | 209 | ||
205 | local_irq_save(flags); | 210 | local_irq_save(flags); |
211 | htw_stop(); | ||
206 | 212 | ||
207 | if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { | 213 | if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
208 | get_new_mmu_context(mm, cpu); | 214 | get_new_mmu_context(mm, cpu); |
@@ -211,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) | |||
211 | /* will get a new context next time */ | 217 | /* will get a new context next time */ |
212 | cpu_context(cpu, mm) = 0; | 218 | cpu_context(cpu, mm) = 0; |
213 | } | 219 | } |
220 | htw_start(); | ||
214 | local_irq_restore(flags); | 221 | local_irq_restore(flags); |
215 | } | 222 | } |
216 | 223 | ||
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index 800fe578dc99..0aaf9a01ea50 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h | |||
@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr) | |||
88 | #define MODULE_PROC_FAMILY "MIPS32_R1 " | 88 | #define MODULE_PROC_FAMILY "MIPS32_R1 " |
89 | #elif defined CONFIG_CPU_MIPS32_R2 | 89 | #elif defined CONFIG_CPU_MIPS32_R2 |
90 | #define MODULE_PROC_FAMILY "MIPS32_R2 " | 90 | #define MODULE_PROC_FAMILY "MIPS32_R2 " |
91 | #elif defined CONFIG_CPU_MIPS32_R6 | ||
92 | #define MODULE_PROC_FAMILY "MIPS32_R6 " | ||
91 | #elif defined CONFIG_CPU_MIPS64_R1 | 93 | #elif defined CONFIG_CPU_MIPS64_R1 |
92 | #define MODULE_PROC_FAMILY "MIPS64_R1 " | 94 | #define MODULE_PROC_FAMILY "MIPS64_R1 " |
93 | #elif defined CONFIG_CPU_MIPS64_R2 | 95 | #elif defined CONFIG_CPU_MIPS64_R2 |
94 | #define MODULE_PROC_FAMILY "MIPS64_R2 " | 96 | #define MODULE_PROC_FAMILY "MIPS64_R2 " |
97 | #elif defined CONFIG_CPU_MIPS64_R6 | ||
98 | #define MODULE_PROC_FAMILY "MIPS64_R6 " | ||
95 | #elif defined CONFIG_CPU_R3000 | 99 | #elif defined CONFIG_CPU_R3000 |
96 | #define MODULE_PROC_FAMILY "R3000 " | 100 | #define MODULE_PROC_FAMILY "R3000 " |
97 | #elif defined CONFIG_CPU_TX39XX | 101 | #elif defined CONFIG_CPU_TX39XX |
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h index 75739c83f07e..8d05d9069823 100644 --- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h +++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h | |||
@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, | |||
275 | " lbu %[ticket], %[now_serving]\n" | 275 | " lbu %[ticket], %[now_serving]\n" |
276 | "4:\n" | 276 | "4:\n" |
277 | ".set pop\n" : | 277 | ".set pop\n" : |
278 | [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), | 278 | [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), |
279 | [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), | 279 | [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), |
280 | [my_ticket] "=r"(my_ticket) | 280 | [my_ticket] "=r"(my_ticket) |
281 | ); | 281 | ); |
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h new file mode 100644 index 000000000000..0c9c3e74d4ae --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h | |||
@@ -0,0 +1,306 @@ | |||
1 | /***********************license start*************** | ||
2 | * Author: Cavium Inc. | ||
3 | * | ||
4 | * Contact: support@cavium.com | ||
5 | * This file is part of the OCTEON SDK | ||
6 | * | ||
7 | * Copyright (c) 2003-2014 Cavium Inc. | ||
8 | * | ||
9 | * This file is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License, Version 2, as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This file is distributed in the hope that it will be useful, but | ||
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | ||
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | ||
16 | * NONINFRINGEMENT. See the GNU General Public License for more | ||
17 | * details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this file; if not, write to the Free Software | ||
21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
22 | * or visit http://www.gnu.org/licenses/. | ||
23 | * | ||
24 | * This file may also be available under a different license from Cavium. | ||
25 | * Contact Cavium Inc. for more information | ||
26 | ***********************license end**************************************/ | ||
27 | |||
28 | #ifndef __CVMX_RST_DEFS_H__ | ||
29 | #define __CVMX_RST_DEFS_H__ | ||
30 | |||
31 | #define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull)) | ||
32 | #define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull)) | ||
33 | #define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull)) | ||
34 | #define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8) | ||
35 | #define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull)) | ||
36 | #define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull)) | ||
37 | #define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull)) | ||
38 | #define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull)) | ||
39 | #define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull)) | ||
40 | #define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull)) | ||
41 | #define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8) | ||
42 | #define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull)) | ||
43 | |||
44 | union cvmx_rst_boot { | ||
45 | uint64_t u64; | ||
46 | struct cvmx_rst_boot_s { | ||
47 | #ifdef __BIG_ENDIAN_BITFIELD | ||
48 | uint64_t chipkill:1; | ||
49 | uint64_t jtcsrdis:1; | ||
50 | uint64_t ejtagdis:1; | ||
51 | uint64_t romen:1; | ||
52 | uint64_t ckill_ppdis:1; | ||
53 | uint64_t jt_tstmode:1; | ||
54 | uint64_t vrm_err:1; | ||
55 | uint64_t reserved_37_56:20; | ||
56 | uint64_t c_mul:7; | ||
57 | uint64_t pnr_mul:6; | ||
58 | uint64_t reserved_21_23:3; | ||
59 | uint64_t lboot_oci:3; | ||
60 | uint64_t lboot_ext:6; | ||
61 | uint64_t lboot:10; | ||
62 | uint64_t rboot:1; | ||
63 | uint64_t rboot_pin:1; | ||
64 | #else | ||
65 | uint64_t rboot_pin:1; | ||
66 | uint64_t rboot:1; | ||
67 | uint64_t lboot:10; | ||
68 | uint64_t lboot_ext:6; | ||
69 | uint64_t lboot_oci:3; | ||
70 | uint64_t reserved_21_23:3; | ||
71 | uint64_t pnr_mul:6; | ||
72 | uint64_t c_mul:7; | ||
73 | uint64_t reserved_37_56:20; | ||
74 | uint64_t vrm_err:1; | ||
75 | uint64_t jt_tstmode:1; | ||
76 | uint64_t ckill_ppdis:1; | ||
77 | uint64_t romen:1; | ||
78 | uint64_t ejtagdis:1; | ||
79 | uint64_t jtcsrdis:1; | ||
80 | uint64_t chipkill:1; | ||
81 | #endif | ||
82 | } s; | ||
83 | struct cvmx_rst_boot_s cn70xx; | ||
84 | struct cvmx_rst_boot_s cn70xxp1; | ||
85 | struct cvmx_rst_boot_s cn78xx; | ||
86 | }; | ||
87 | |||
88 | union cvmx_rst_cfg { | ||
89 | uint64_t u64; | ||
90 | struct cvmx_rst_cfg_s { | ||
91 | #ifdef __BIG_ENDIAN_BITFIELD | ||
92 | uint64_t bist_delay:58; | ||
93 | uint64_t reserved_3_5:3; | ||
94 | uint64_t cntl_clr_bist:1; | ||
95 | uint64_t warm_clr_bist:1; | ||
96 | uint64_t soft_clr_bist:1; | ||
97 | #else | ||
98 | uint64_t soft_clr_bist:1; | ||
99 | uint64_t warm_clr_bist:1; | ||
100 | uint64_t cntl_clr_bist:1; | ||
101 | uint64_t reserved_3_5:3; | ||
102 | uint64_t bist_delay:58; | ||
103 | #endif | ||
104 | } s; | ||
105 | struct cvmx_rst_cfg_s cn70xx; | ||
106 | struct cvmx_rst_cfg_s cn70xxp1; | ||
107 | struct cvmx_rst_cfg_s cn78xx; | ||
108 | }; | ||
109 | |||
110 | union cvmx_rst_ckill { | ||
111 | uint64_t u64; | ||
112 | struct cvmx_rst_ckill_s { | ||
113 | #ifdef __BIG_ENDIAN_BITFIELD | ||
114 | uint64_t reserved_47_63:17; | ||
115 | uint64_t timer:47; | ||
116 | #else | ||
117 | uint64_t timer:47; | ||
118 | uint64_t reserved_47_63:17; | ||
119 | #endif | ||
120 | } s; | ||
121 | struct cvmx_rst_ckill_s cn70xx; | ||
122 | struct cvmx_rst_ckill_s cn70xxp1; | ||
123 | struct cvmx_rst_ckill_s cn78xx; | ||
124 | }; | ||
125 | |||
126 | union cvmx_rst_ctlx { | ||
127 | uint64_t u64; | ||
128 | struct cvmx_rst_ctlx_s { | ||
129 | #ifdef __BIG_ENDIAN_BITFIELD | ||
130 | uint64_t reserved_10_63:54; | ||
131 | uint64_t prst_link:1; | ||
132 | uint64_t rst_done:1; | ||
133 | uint64_t rst_link:1; | ||
134 | uint64_t host_mode:1; | ||
135 | uint64_t reserved_4_5:2; | ||
136 | uint64_t rst_drv:1; | ||
137 | uint64_t rst_rcv:1; | ||
138 | uint64_t rst_chip:1; | ||
139 | uint64_t rst_val:1; | ||
140 | #else | ||
141 | uint64_t rst_val:1; | ||
142 | uint64_t rst_chip:1; | ||
143 | uint64_t rst_rcv:1; | ||
144 | uint64_t rst_drv:1; | ||
145 | uint64_t reserved_4_5:2; | ||
146 | uint64_t host_mode:1; | ||
147 | uint64_t rst_link:1; | ||
148 | uint64_t rst_done:1; | ||
149 | uint64_t prst_link:1; | ||
150 | uint64_t reserved_10_63:54; | ||
151 | #endif | ||
152 | } s; | ||
153 | struct cvmx_rst_ctlx_s cn70xx; | ||
154 | struct cvmx_rst_ctlx_s cn70xxp1; | ||
155 | struct cvmx_rst_ctlx_s cn78xx; | ||
156 | }; | ||
157 | |||
158 | union cvmx_rst_delay { | ||
159 | uint64_t u64; | ||
160 | struct cvmx_rst_delay_s { | ||
161 | #ifdef __BIG_ENDIAN_BITFIELD | ||
162 | uint64_t reserved_32_63:32; | ||
163 | uint64_t warm_rst_dly:16; | ||
164 | uint64_t soft_rst_dly:16; | ||
165 | #else | ||
166 | uint64_t soft_rst_dly:16; | ||
167 | uint64_t warm_rst_dly:16; | ||
168 | uint64_t reserved_32_63:32; | ||
169 | #endif | ||
170 | } s; | ||
171 | struct cvmx_rst_delay_s cn70xx; | ||
172 | struct cvmx_rst_delay_s cn70xxp1; | ||
173 | struct cvmx_rst_delay_s cn78xx; | ||
174 | }; | ||
175 | |||
176 | union cvmx_rst_eco { | ||
177 | uint64_t u64; | ||
178 | struct cvmx_rst_eco_s { | ||
179 | #ifdef __BIG_ENDIAN_BITFIELD | ||
180 | uint64_t reserved_32_63:32; | ||
181 | uint64_t eco_rw:32; | ||
182 | #else | ||
183 | uint64_t eco_rw:32; | ||
184 | uint64_t reserved_32_63:32; | ||
185 | #endif | ||
186 | } s; | ||
187 | struct cvmx_rst_eco_s cn78xx; | ||
188 | }; | ||
189 | |||
190 | union cvmx_rst_int { | ||
191 | uint64_t u64; | ||
192 | struct cvmx_rst_int_s { | ||
193 | #ifdef __BIG_ENDIAN_BITFIELD | ||
194 | uint64_t reserved_12_63:52; | ||
195 | uint64_t perst:4; | ||
196 | uint64_t reserved_4_7:4; | ||
197 | uint64_t rst_link:4; | ||
198 | #else | ||
199 | uint64_t rst_link:4; | ||
200 | uint64_t reserved_4_7:4; | ||
201 | uint64_t perst:4; | ||
202 | uint64_t reserved_12_63:52; | ||
203 | #endif | ||
204 | } s; | ||
205 | struct cvmx_rst_int_cn70xx { | ||
206 | #ifdef __BIG_ENDIAN_BITFIELD | ||
207 | uint64_t reserved_11_63:53; | ||
208 | uint64_t perst:3; | ||
209 | uint64_t reserved_3_7:5; | ||
210 | uint64_t rst_link:3; | ||
211 | #else | ||
212 | uint64_t rst_link:3; | ||
213 | uint64_t reserved_3_7:5; | ||
214 | uint64_t perst:3; | ||
215 | uint64_t reserved_11_63:53; | ||
216 | #endif | ||
217 | } cn70xx; | ||
218 | struct cvmx_rst_int_cn70xx cn70xxp1; | ||
219 | struct cvmx_rst_int_s cn78xx; | ||
220 | }; | ||
221 | |||
222 | union cvmx_rst_ocx { | ||
223 | uint64_t u64; | ||
224 | struct cvmx_rst_ocx_s { | ||
225 | #ifdef __BIG_ENDIAN_BITFIELD | ||
226 | uint64_t reserved_3_63:61; | ||
227 | uint64_t rst_link:3; | ||
228 | #else | ||
229 | uint64_t rst_link:3; | ||
230 | uint64_t reserved_3_63:61; | ||
231 | #endif | ||
232 | } s; | ||
233 | struct cvmx_rst_ocx_s cn78xx; | ||
234 | }; | ||
235 | |||
236 | union cvmx_rst_power_dbg { | ||
237 | uint64_t u64; | ||
238 | struct cvmx_rst_power_dbg_s { | ||
239 | #ifdef __BIG_ENDIAN_BITFIELD | ||
240 | uint64_t reserved_3_63:61; | ||
241 | uint64_t str:3; | ||
242 | #else | ||
243 | uint64_t str:3; | ||
244 | uint64_t reserved_3_63:61; | ||
245 | #endif | ||
246 | } s; | ||
247 | struct cvmx_rst_power_dbg_s cn78xx; | ||
248 | }; | ||
249 | |||
250 | union cvmx_rst_pp_power { | ||
251 | uint64_t u64; | ||
252 | struct cvmx_rst_pp_power_s { | ||
253 | #ifdef __BIG_ENDIAN_BITFIELD | ||
254 | uint64_t reserved_48_63:16; | ||
255 | uint64_t gate:48; | ||
256 | #else | ||
257 | uint64_t gate:48; | ||
258 | uint64_t reserved_48_63:16; | ||
259 | #endif | ||
260 | } s; | ||
261 | struct cvmx_rst_pp_power_cn70xx { | ||
262 | #ifdef __BIG_ENDIAN_BITFIELD | ||
263 | uint64_t reserved_4_63:60; | ||
264 | uint64_t gate:4; | ||
265 | #else | ||
266 | uint64_t gate:4; | ||
267 | uint64_t reserved_4_63:60; | ||
268 | #endif | ||
269 | } cn70xx; | ||
270 | struct cvmx_rst_pp_power_cn70xx cn70xxp1; | ||
271 | struct cvmx_rst_pp_power_s cn78xx; | ||
272 | }; | ||
273 | |||
274 | union cvmx_rst_soft_prstx { | ||
275 | uint64_t u64; | ||
276 | struct cvmx_rst_soft_prstx_s { | ||
277 | #ifdef __BIG_ENDIAN_BITFIELD | ||
278 | uint64_t reserved_1_63:63; | ||
279 | uint64_t soft_prst:1; | ||
280 | #else | ||
281 | uint64_t soft_prst:1; | ||
282 | uint64_t reserved_1_63:63; | ||
283 | #endif | ||
284 | } s; | ||
285 | struct cvmx_rst_soft_prstx_s cn70xx; | ||
286 | struct cvmx_rst_soft_prstx_s cn70xxp1; | ||
287 | struct cvmx_rst_soft_prstx_s cn78xx; | ||
288 | }; | ||
289 | |||
290 | union cvmx_rst_soft_rst { | ||
291 | uint64_t u64; | ||
292 | struct cvmx_rst_soft_rst_s { | ||
293 | #ifdef __BIG_ENDIAN_BITFIELD | ||
294 | uint64_t reserved_1_63:63; | ||
295 | uint64_t soft_rst:1; | ||
296 | #else | ||
297 | uint64_t soft_rst:1; | ||
298 | uint64_t reserved_1_63:63; | ||
299 | #endif | ||
300 | } s; | ||
301 | struct cvmx_rst_soft_rst_s cn70xx; | ||
302 | struct cvmx_rst_soft_rst_s cn70xxp1; | ||
303 | struct cvmx_rst_soft_rst_s cn78xx; | ||
304 | }; | ||
305 | |||
306 | #endif | ||
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h index e8a1c2fd52cd..92b377e36dac 100644 --- a/arch/mips/include/asm/octeon/octeon-model.h +++ b/arch/mips/include/asm/octeon/octeon-model.h | |||
@@ -45,6 +45,7 @@ | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | #define OCTEON_FAMILY_MASK 0x00ffff00 | 47 | #define OCTEON_FAMILY_MASK 0x00ffff00 |
48 | #define OCTEON_PRID_MASK 0x00ffffff | ||
48 | 49 | ||
49 | /* Flag bits in top byte */ | 50 | /* Flag bits in top byte */ |
50 | /* Ignores revision in model checks */ | 51 | /* Ignores revision in model checks */ |
@@ -63,11 +64,52 @@ | |||
63 | #define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 | 64 | #define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 |
64 | /* Match all cnf7XXX Octeon models. */ | 65 | /* Match all cnf7XXX Octeon models. */ |
65 | #define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 | 66 | #define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 |
67 | /* Match all cn7XXX Octeon models. */ | ||
68 | #define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000 | ||
69 | #define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \ | ||
70 | OM_MATCH_6XXX_FAMILY_MODELS | \ | ||
71 | OM_MATCH_F7XXX_FAMILY_MODELS | \ | ||
72 | OM_MATCH_7XXX_FAMILY_MODELS) | ||
73 | /* | ||
74 | * CN7XXX models with new revision encoding | ||
75 | */ | ||
76 | |||
77 | #define OCTEON_CN73XX_PASS1_0 0x000d9700 | ||
78 | #define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION) | ||
79 | #define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \ | ||
80 | OM_IGNORE_MINOR_REVISION) | ||
81 | |||
82 | #define OCTEON_CN70XX_PASS1_0 0x000d9600 | ||
83 | #define OCTEON_CN70XX_PASS1_1 0x000d9601 | ||
84 | #define OCTEON_CN70XX_PASS1_2 0x000d9602 | ||
85 | |||
86 | #define OCTEON_CN70XX_PASS2_0 0x000d9608 | ||
87 | |||
88 | #define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION) | ||
89 | #define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \ | ||
90 | OM_IGNORE_MINOR_REVISION) | ||
91 | #define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \ | ||
92 | OM_IGNORE_MINOR_REVISION) | ||
93 | |||
94 | #define OCTEON_CN71XX OCTEON_CN70XX | ||
95 | |||
96 | #define OCTEON_CN78XX_PASS1_0 0x000d9500 | ||
97 | #define OCTEON_CN78XX_PASS1_1 0x000d9501 | ||
98 | #define OCTEON_CN78XX_PASS2_0 0x000d9508 | ||
99 | |||
100 | #define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION) | ||
101 | #define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \ | ||
102 | OM_IGNORE_MINOR_REVISION) | ||
103 | #define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \ | ||
104 | OM_IGNORE_MINOR_REVISION) | ||
105 | |||
106 | #define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL) | ||
66 | 107 | ||
67 | /* | 108 | /* |
68 | * CNF7XXX models with new revision encoding | 109 | * CNF7XXX models with new revision encoding |
69 | */ | 110 | */ |
70 | #define OCTEON_CNF71XX_PASS1_0 0x000d9400 | 111 | #define OCTEON_CNF71XX_PASS1_0 0x000d9400 |
112 | #define OCTEON_CNF71XX_PASS1_1 0x000d9401 | ||
71 | 113 | ||
72 | #define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) | 114 | #define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) |
73 | #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) | 115 | #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) |
@@ -79,6 +121,8 @@ | |||
79 | #define OCTEON_CN68XX_PASS1_1 0x000d9101 | 121 | #define OCTEON_CN68XX_PASS1_1 0x000d9101 |
80 | #define OCTEON_CN68XX_PASS1_2 0x000d9102 | 122 | #define OCTEON_CN68XX_PASS1_2 0x000d9102 |
81 | #define OCTEON_CN68XX_PASS2_0 0x000d9108 | 123 | #define OCTEON_CN68XX_PASS2_0 0x000d9108 |
124 | #define OCTEON_CN68XX_PASS2_1 0x000d9109 | ||
125 | #define OCTEON_CN68XX_PASS2_2 0x000d910a | ||
82 | 126 | ||
83 | #define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) | 127 | #define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) |
84 | #define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) | 128 | #define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) |
@@ -104,11 +148,18 @@ | |||
104 | #define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) | 148 | #define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) |
105 | #define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) | 149 | #define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) |
106 | 150 | ||
151 | /* CN62XX is same as CN63XX with 1 MB cache */ | ||
152 | #define OCTEON_CN62XX OCTEON_CN63XX | ||
153 | |||
107 | #define OCTEON_CN61XX_PASS1_0 0x000d9300 | 154 | #define OCTEON_CN61XX_PASS1_0 0x000d9300 |
155 | #define OCTEON_CN61XX_PASS1_1 0x000d9301 | ||
108 | 156 | ||
109 | #define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) | 157 | #define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) |
110 | #define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) | 158 | #define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) |
111 | 159 | ||
160 | /* CN60XX is same as CN61XX with 512 KB cache */ | ||
161 | #define OCTEON_CN60XX OCTEON_CN61XX | ||
162 | |||
112 | /* | 163 | /* |
113 | * CN5XXX models with new revision encoding | 164 | * CN5XXX models with new revision encoding |
114 | */ | 165 | */ |
@@ -120,7 +171,7 @@ | |||
120 | #define OCTEON_CN58XX_PASS2_2 0x000d030a | 171 | #define OCTEON_CN58XX_PASS2_2 0x000d030a |
121 | #define OCTEON_CN58XX_PASS2_3 0x000d030b | 172 | #define OCTEON_CN58XX_PASS2_3 0x000d030b |
122 | 173 | ||
123 | #define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION) | 174 | #define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION) |
124 | #define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) | 175 | #define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) |
125 | #define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) | 176 | #define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) |
126 | #define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X | 177 | #define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X |
@@ -217,12 +268,10 @@ | |||
217 | #define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) | 268 | #define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) |
218 | #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) | 269 | #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) |
219 | #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) | 270 | #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) |
220 | 271 | #define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \ | |
221 | /* These are used to cover entire families of OCTEON processors */ | 272 | OM_MATCH_F7XXX_FAMILY_MODELS) |
222 | #define OCTEON_FAM_1 (OCTEON_CN3XXX) | 273 | #define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \ |
223 | #define OCTEON_FAM_PLUS (OCTEON_CN5XXX) | 274 | OM_MATCH_7XXX_FAMILY_MODELS) |
224 | #define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS) | ||
225 | #define OCTEON_FAM_2 (OCTEON_CN6XXX) | ||
226 | 275 | ||
227 | /* The revision byte (low byte) has two different encodings. | 276 | /* The revision byte (low byte) has two different encodings. |
228 | * CN3XXX: | 277 | * CN3XXX: |
@@ -232,7 +281,7 @@ | |||
232 | * <4>: alternate package | 281 | * <4>: alternate package |
233 | * <3:0>: revision | 282 | * <3:0>: revision |
234 | * | 283 | * |
235 | * CN5XXX: | 284 | * CN5XXX and older models: |
236 | * | 285 | * |
237 | * bits | 286 | * bits |
238 | * <7>: reserved (0) | 287 | * <7>: reserved (0) |
@@ -251,17 +300,21 @@ | |||
251 | /* CN5XXX and later use different layout of bits in the revision ID field */ | 300 | /* CN5XXX and later use different layout of bits in the revision ID field */ |
252 | #define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK | 301 | #define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK |
253 | #define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f | 302 | #define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f |
254 | #define OCTEON_58XX_MODEL_MASK 0x00ffffc0 | 303 | #define OCTEON_58XX_MODEL_MASK 0x00ffff40 |
255 | #define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) | 304 | #define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) |
256 | #define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8) | 305 | #define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38) |
257 | #define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 | 306 | #define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 |
258 | 307 | ||
259 | /* forward declarations */ | ||
260 | static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); | 308 | static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); |
261 | static inline uint64_t cvmx_read_csr(uint64_t csr_addr); | 309 | static inline uint64_t cvmx_read_csr(uint64_t csr_addr); |
262 | 310 | ||
263 | #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) | 311 | #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) |
264 | 312 | ||
313 | /* | ||
314 | * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) | ||
315 | * returns true if chip_model is identical or belong to the OCTEON | ||
316 | * model group specified in arg_model. | ||
317 | */ | ||
265 | /* NOTE: This for internal use only! */ | 318 | /* NOTE: This for internal use only! */ |
266 | #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ | 319 | #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ |
267 | ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ | 320 | ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ |
@@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr); | |||
286 | ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ | 339 | ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ |
287 | && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ | 340 | && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ |
288 | ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ | 341 | ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ |
289 | && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \ | 342 | && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \ |
290 | ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ | 343 | ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ |
291 | && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \ | 344 | && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \ |
345 | && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \ | ||
292 | ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ | 346 | ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ |
293 | && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \ | 347 | && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \ |
348 | && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \ | ||
349 | ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \ | ||
350 | && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \ | ||
351 | && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \ | ||
352 | ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \ | ||
353 | && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \ | ||
294 | ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ | 354 | ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ |
295 | && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ | 355 | && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ |
296 | ))) | 356 | ))) |
@@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model) | |||
300 | { | 360 | { |
301 | uint32_t cpuid = cvmx_get_proc_id(); | 361 | uint32_t cpuid = cvmx_get_proc_id(); |
302 | 362 | ||
303 | /* | ||
304 | * Check for special case of mismarked 3005 samples. We only | ||
305 | * need to check if the sub model isn't being ignored | ||
306 | */ | ||
307 | if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) { | ||
308 | if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34))) | ||
309 | cpuid |= 0x10; | ||
310 | } | ||
311 | return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); | 363 | return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); |
312 | } | 364 | } |
313 | 365 | ||
@@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model) | |||
326 | #define OCTEON_IS_COMMON_BINARY() 1 | 378 | #define OCTEON_IS_COMMON_BINARY() 1 |
327 | #undef OCTEON_MODEL | 379 | #undef OCTEON_MODEL |
328 | 380 | ||
381 | #define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX) | ||
382 | #define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX) | ||
383 | #define OCTEON_IS_OCTEON2() \ | ||
384 | (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) | ||
385 | |||
386 | #define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX) | ||
387 | |||
388 | #define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS()) | ||
389 | |||
329 | const char *__init octeon_model_get_string(uint32_t chip_id); | 390 | const char *__init octeon_model_get_string(uint32_t chip_id); |
330 | 391 | ||
331 | /* | 392 | /* |
332 | * Return the octeon family, i.e., ProcessorID of the PrID register. | 393 | * Return the octeon family, i.e., ProcessorID of the PrID register. |
394 | * | ||
395 | * @return the octeon family on success, ((unint32_t)-1) on error. | ||
333 | */ | 396 | */ |
334 | static inline uint32_t cvmx_get_octeon_family(void) | 397 | static inline uint32_t cvmx_get_octeon_family(void) |
335 | { | 398 | { |
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 6dfefd2d5cdf..041596570856 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #define __ASM_OCTEON_OCTEON_H | 9 | #define __ASM_OCTEON_OCTEON_H |
10 | 10 | ||
11 | #include <asm/octeon/cvmx.h> | 11 | #include <asm/octeon/cvmx.h> |
12 | #include <asm/bitfield.h> | ||
12 | 13 | ||
13 | extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, | 14 | extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, |
14 | uint64_t alignment, | 15 | uint64_t alignment, |
@@ -53,6 +54,7 @@ extern void octeon_io_clk_delay(unsigned long); | |||
53 | #define OCTOEN_SERIAL_LEN 20 | 54 | #define OCTOEN_SERIAL_LEN 20 |
54 | 55 | ||
55 | struct octeon_boot_descriptor { | 56 | struct octeon_boot_descriptor { |
57 | #ifdef __BIG_ENDIAN_BITFIELD | ||
56 | /* Start of block referenced by assembly code - do not change! */ | 58 | /* Start of block referenced by assembly code - do not change! */ |
57 | uint32_t desc_version; | 59 | uint32_t desc_version; |
58 | uint32_t desc_size; | 60 | uint32_t desc_size; |
@@ -104,77 +106,149 @@ struct octeon_boot_descriptor { | |||
104 | uint8_t mac_addr_base[6]; | 106 | uint8_t mac_addr_base[6]; |
105 | uint8_t mac_addr_count; | 107 | uint8_t mac_addr_count; |
106 | uint64_t cvmx_desc_vaddr; | 108 | uint64_t cvmx_desc_vaddr; |
109 | #else | ||
110 | uint32_t desc_size; | ||
111 | uint32_t desc_version; | ||
112 | uint64_t stack_top; | ||
113 | uint64_t heap_base; | ||
114 | uint64_t heap_end; | ||
115 | /* Only used by bootloader */ | ||
116 | uint64_t entry_point; | ||
117 | uint64_t desc_vaddr; | ||
118 | /* End of This block referenced by assembly code - do not change! */ | ||
119 | uint32_t stack_size; | ||
120 | uint32_t exception_base_addr; | ||
121 | uint32_t argc; | ||
122 | uint32_t heap_size; | ||
123 | /* | ||
124 | * Argc count for application. | ||
125 | * Warning low bit scrambled in little-endian. | ||
126 | */ | ||
127 | uint32_t argv[OCTEON_ARGV_MAX_ARGS]; | ||
128 | |||
129 | #define BOOT_FLAG_INIT_CORE (1 << 0) | ||
130 | #define OCTEON_BL_FLAG_DEBUG (1 << 1) | ||
131 | #define OCTEON_BL_FLAG_NO_MAGIC (1 << 2) | ||
132 | /* If set, use uart1 for console */ | ||
133 | #define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3) | ||
134 | /* If set, use PCI console */ | ||
135 | #define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4) | ||
136 | /* Call exit on break on serial port */ | ||
137 | #define OCTEON_BL_FLAG_BREAK (1 << 5) | ||
138 | |||
139 | uint32_t core_mask; | ||
140 | uint32_t flags; | ||
141 | /* physical address of free memory descriptor block. */ | ||
142 | uint32_t phy_mem_desc_addr; | ||
143 | /* DRAM size in megabyes. */ | ||
144 | uint32_t dram_size; | ||
145 | /* CPU clock speed, in hz. */ | ||
146 | uint32_t eclock_hz; | ||
147 | /* used to pass flags from app to debugger. */ | ||
148 | uint32_t debugger_flags_base_addr; | ||
149 | /* SPI4 clock in hz. */ | ||
150 | uint32_t spi_clock_hz; | ||
151 | /* DRAM clock speed, in hz. */ | ||
152 | uint32_t dclock_hz; | ||
153 | uint8_t chip_rev_minor; | ||
154 | uint8_t chip_rev_major; | ||
155 | uint16_t chip_type; | ||
156 | uint8_t board_rev_minor; | ||
157 | uint8_t board_rev_major; | ||
158 | uint16_t board_type; | ||
159 | |||
160 | uint64_t unused1[4]; /* Not even filled in by bootloader. */ | ||
161 | |||
162 | uint64_t cvmx_desc_vaddr; | ||
163 | #endif | ||
107 | }; | 164 | }; |
108 | 165 | ||
109 | union octeon_cvmemctl { | 166 | union octeon_cvmemctl { |
110 | uint64_t u64; | 167 | uint64_t u64; |
111 | struct { | 168 | struct { |
112 | /* RO 1 = BIST fail, 0 = BIST pass */ | 169 | /* RO 1 = BIST fail, 0 = BIST pass */ |
113 | uint64_t tlbbist:1; | 170 | __BITFIELD_FIELD(uint64_t tlbbist:1, |
114 | /* RO 1 = BIST fail, 0 = BIST pass */ | 171 | /* RO 1 = BIST fail, 0 = BIST pass */ |
115 | uint64_t l1cbist:1; | 172 | __BITFIELD_FIELD(uint64_t l1cbist:1, |
116 | /* RO 1 = BIST fail, 0 = BIST pass */ | 173 | /* RO 1 = BIST fail, 0 = BIST pass */ |
117 | uint64_t l1dbist:1; | 174 | __BITFIELD_FIELD(uint64_t l1dbist:1, |
118 | /* RO 1 = BIST fail, 0 = BIST pass */ | 175 | /* RO 1 = BIST fail, 0 = BIST pass */ |
119 | uint64_t dcmbist:1; | 176 | __BITFIELD_FIELD(uint64_t dcmbist:1, |
120 | /* RO 1 = BIST fail, 0 = BIST pass */ | 177 | /* RO 1 = BIST fail, 0 = BIST pass */ |
121 | uint64_t ptgbist:1; | 178 | __BITFIELD_FIELD(uint64_t ptgbist:1, |
122 | /* RO 1 = BIST fail, 0 = BIST pass */ | 179 | /* RO 1 = BIST fail, 0 = BIST pass */ |
123 | uint64_t wbfbist:1; | 180 | __BITFIELD_FIELD(uint64_t wbfbist:1, |
124 | /* Reserved */ | 181 | /* Reserved */ |
125 | uint64_t reserved:22; | 182 | __BITFIELD_FIELD(uint64_t reserved:17, |
183 | /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU. | ||
184 | * This field selects between the TLB replacement policies: | ||
185 | * bitmask LRU or NLU. Bitmask LRU maintains a mask of | ||
186 | * recently used TLB entries and avoids them as new entries | ||
187 | * are allocated. NLU simply guarantees that the next | ||
188 | * allocation is not the last used TLB entry. */ | ||
189 | __BITFIELD_FIELD(uint64_t tlbnlu:1, | ||
190 | /* OCTEON II - Selects the bit in the counter used for | ||
191 | * releasing a PAUSE. This counter trips every 2(8+PAUSETIME) | ||
192 | * cycles. If not already released, the cnMIPS II core will | ||
193 | * always release a given PAUSE instruction within | ||
194 | * 2(8+PAUSETIME). If the counter trip happens to line up, | ||
195 | * the cnMIPS II core may release the PAUSE instantly. */ | ||
196 | __BITFIELD_FIELD(uint64_t pausetime:3, | ||
197 | /* OCTEON II - This field is an extension of | ||
198 | * CvmMemCtl[DIDTTO] */ | ||
199 | __BITFIELD_FIELD(uint64_t didtto2:1, | ||
126 | /* R/W If set, marked write-buffer entries time out | 200 | /* R/W If set, marked write-buffer entries time out |
127 | * the same as as other entries; if clear, marked | 201 | * the same as as other entries; if clear, marked |
128 | * write-buffer entries use the maximum timeout. */ | 202 | * write-buffer entries use the maximum timeout. */ |
129 | uint64_t dismarkwblongto:1; | 203 | __BITFIELD_FIELD(uint64_t dismarkwblongto:1, |
130 | /* R/W If set, a merged store does not clear the | 204 | /* R/W If set, a merged store does not clear the |
131 | * write-buffer entry timeout state. */ | 205 | * write-buffer entry timeout state. */ |
132 | uint64_t dismrgclrwbto:1; | 206 | __BITFIELD_FIELD(uint64_t dismrgclrwbto:1, |
133 | /* R/W Two bits that are the MSBs of the resultant | 207 | /* R/W Two bits that are the MSBs of the resultant |
134 | * CVMSEG LM word location for an IOBDMA. The other 8 | 208 | * CVMSEG LM word location for an IOBDMA. The other 8 |
135 | * bits come from the SCRADDR field of the IOBDMA. */ | 209 | * bits come from the SCRADDR field of the IOBDMA. */ |
136 | uint64_t iobdmascrmsb:2; | 210 | __BITFIELD_FIELD(uint64_t iobdmascrmsb:2, |
137 | /* R/W If set, SYNCWS and SYNCS only order marked | 211 | /* R/W If set, SYNCWS and SYNCS only order marked |
138 | * stores; if clear, SYNCWS and SYNCS only order | 212 | * stores; if clear, SYNCWS and SYNCS only order |
139 | * unmarked stores. SYNCWSMARKED has no effect when | 213 | * unmarked stores. SYNCWSMARKED has no effect when |
140 | * DISSYNCWS is set. */ | 214 | * DISSYNCWS is set. */ |
141 | uint64_t syncwsmarked:1; | 215 | __BITFIELD_FIELD(uint64_t syncwsmarked:1, |
142 | /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as | 216 | /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as |
143 | * SYNC. */ | 217 | * SYNC. */ |
144 | uint64_t dissyncws:1; | 218 | __BITFIELD_FIELD(uint64_t dissyncws:1, |
145 | /* R/W If set, no stall happens on write buffer | 219 | /* R/W If set, no stall happens on write buffer |
146 | * full. */ | 220 | * full. */ |
147 | uint64_t diswbfst:1; | 221 | __BITFIELD_FIELD(uint64_t diswbfst:1, |
148 | /* R/W If set (and SX set), supervisor-level | 222 | /* R/W If set (and SX set), supervisor-level |
149 | * loads/stores can use XKPHYS addresses with | 223 | * loads/stores can use XKPHYS addresses with |
150 | * VA<48>==0 */ | 224 | * VA<48>==0 */ |
151 | uint64_t xkmemenas:1; | 225 | __BITFIELD_FIELD(uint64_t xkmemenas:1, |
152 | /* R/W If set (and UX set), user-level loads/stores | 226 | /* R/W If set (and UX set), user-level loads/stores |
153 | * can use XKPHYS addresses with VA<48>==0 */ | 227 | * can use XKPHYS addresses with VA<48>==0 */ |
154 | uint64_t xkmemenau:1; | 228 | __BITFIELD_FIELD(uint64_t xkmemenau:1, |
155 | /* R/W If set (and SX set), supervisor-level | 229 | /* R/W If set (and SX set), supervisor-level |
156 | * loads/stores can use XKPHYS addresses with | 230 | * loads/stores can use XKPHYS addresses with |
157 | * VA<48>==1 */ | 231 | * VA<48>==1 */ |
158 | uint64_t xkioenas:1; | 232 | __BITFIELD_FIELD(uint64_t xkioenas:1, |
159 | /* R/W If set (and UX set), user-level loads/stores | 233 | /* R/W If set (and UX set), user-level loads/stores |
160 | * can use XKPHYS addresses with VA<48>==1 */ | 234 | * can use XKPHYS addresses with VA<48>==1 */ |
161 | uint64_t xkioenau:1; | 235 | __BITFIELD_FIELD(uint64_t xkioenau:1, |
162 | /* R/W If set, all stores act as SYNCW (NOMERGE must | 236 | /* R/W If set, all stores act as SYNCW (NOMERGE must |
163 | * be set when this is set) RW, reset to 0. */ | 237 | * be set when this is set) RW, reset to 0. */ |
164 | uint64_t allsyncw:1; | 238 | __BITFIELD_FIELD(uint64_t allsyncw:1, |
165 | /* R/W If set, no stores merge, and all stores reach | 239 | /* R/W If set, no stores merge, and all stores reach |
166 | * the coherent bus in order. */ | 240 | * the coherent bus in order. */ |
167 | uint64_t nomerge:1; | 241 | __BITFIELD_FIELD(uint64_t nomerge:1, |
168 | /* R/W Selects the bit in the counter used for DID | 242 | /* R/W Selects the bit in the counter used for DID |
169 | * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = | 243 | * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = |
170 | * 214. Actual time-out is between 1x and 2x this | 244 | * 214. Actual time-out is between 1x and 2x this |
171 | * interval. For example, with DIDTTO=3, expiration | 245 | * interval. For example, with DIDTTO=3, expiration |
172 | * interval is between 16K and 32K. */ | 246 | * interval is between 16K and 32K. */ |
173 | uint64_t didtto:2; | 247 | __BITFIELD_FIELD(uint64_t didtto:2, |
174 | /* R/W If set, the (mem) CSR clock never turns off. */ | 248 | /* R/W If set, the (mem) CSR clock never turns off. */ |
175 | uint64_t csrckalwys:1; | 249 | __BITFIELD_FIELD(uint64_t csrckalwys:1, |
176 | /* R/W If set, mclk never turns off. */ | 250 | /* R/W If set, mclk never turns off. */ |
177 | uint64_t mclkalwys:1; | 251 | __BITFIELD_FIELD(uint64_t mclkalwys:1, |
178 | /* R/W Selects the bit in the counter used for write | 252 | /* R/W Selects the bit in the counter used for write |
179 | * buffer flush time-outs (WBFLT+11) is the bit | 253 | * buffer flush time-outs (WBFLT+11) is the bit |
180 | * position in an internal counter used to determine | 254 | * position in an internal counter used to determine |
@@ -182,25 +256,26 @@ union octeon_cvmemctl { | |||
182 | * 2x this interval. For example, with WBFLT = 0, a | 256 | * 2x this interval. For example, with WBFLT = 0, a |
183 | * write buffer expires between 2K and 4K cycles after | 257 | * write buffer expires between 2K and 4K cycles after |
184 | * the write buffer entry is allocated. */ | 258 | * the write buffer entry is allocated. */ |
185 | uint64_t wbfltime:3; | 259 | __BITFIELD_FIELD(uint64_t wbfltime:3, |
186 | /* R/W If set, do not put Istream in the L2 cache. */ | 260 | /* R/W If set, do not put Istream in the L2 cache. */ |
187 | uint64_t istrnol2:1; | 261 | __BITFIELD_FIELD(uint64_t istrnol2:1, |
188 | /* R/W The write buffer threshold. */ | 262 | /* R/W The write buffer threshold. */ |
189 | uint64_t wbthresh:4; | 263 | __BITFIELD_FIELD(uint64_t wbthresh:4, |
190 | /* Reserved */ | 264 | /* Reserved */ |
191 | uint64_t reserved2:2; | 265 | __BITFIELD_FIELD(uint64_t reserved2:2, |
192 | /* R/W If set, CVMSEG is available for loads/stores in | 266 | /* R/W If set, CVMSEG is available for loads/stores in |
193 | * kernel/debug mode. */ | 267 | * kernel/debug mode. */ |
194 | uint64_t cvmsegenak:1; | 268 | __BITFIELD_FIELD(uint64_t cvmsegenak:1, |
195 | /* R/W If set, CVMSEG is available for loads/stores in | 269 | /* R/W If set, CVMSEG is available for loads/stores in |
196 | * supervisor mode. */ | 270 | * supervisor mode. */ |
197 | uint64_t cvmsegenas:1; | 271 | __BITFIELD_FIELD(uint64_t cvmsegenas:1, |
198 | /* R/W If set, CVMSEG is available for loads/stores in | 272 | /* R/W If set, CVMSEG is available for loads/stores in |
199 | * user mode. */ | 273 | * user mode. */ |
200 | uint64_t cvmsegenau:1; | 274 | __BITFIELD_FIELD(uint64_t cvmsegenau:1, |
201 | /* R/W Size of local memory in cache blocks, 54 (6912 | 275 | /* R/W Size of local memory in cache blocks, 54 (6912 |
202 | * bytes) is max legal value. */ | 276 | * bytes) is max legal value. */ |
203 | uint64_t lmemsz:6; | 277 | __BITFIELD_FIELD(uint64_t lmemsz:6, |
278 | ;))))))))))))))))))))))))))))))))) | ||
204 | } s; | 279 | } s; |
205 | }; | 280 | }; |
206 | 281 | ||
@@ -224,6 +299,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val) | |||
224 | cvmx_read64_uint32(address ^ 4); | 299 | cvmx_read64_uint32(address ^ 4); |
225 | } | 300 | } |
226 | 301 | ||
302 | /* Octeon multiplier save/restore routines from octeon_switch.S */ | ||
303 | void octeon_mult_save(void); | ||
304 | void octeon_mult_restore(void); | ||
305 | void octeon_mult_save_end(void); | ||
306 | void octeon_mult_restore_end(void); | ||
307 | void octeon_mult_save3(void); | ||
308 | void octeon_mult_save3_end(void); | ||
309 | void octeon_mult_save2(void); | ||
310 | void octeon_mult_save2_end(void); | ||
311 | void octeon_mult_restore3(void); | ||
312 | void octeon_mult_restore3_end(void); | ||
313 | void octeon_mult_restore2(void); | ||
314 | void octeon_mult_restore2_end(void); | ||
227 | 315 | ||
228 | /** | 316 | /** |
229 | * Read a 32bit value from the Octeon NPI register space | 317 | * Read a 32bit value from the Octeon NPI register space |
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 69529624a005..193b4c6b7541 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h | |||
@@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, | |||
121 | } | 121 | } |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | #ifdef CONFIG_PCI_DOMAINS | ||
124 | #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index | 125 | #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index |
125 | 126 | ||
126 | static inline int pci_proc_domain(struct pci_bus *bus) | 127 | static inline int pci_proc_domain(struct pci_bus *bus) |
@@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus) | |||
128 | struct pci_controller *hose = bus->sysdata; | 129 | struct pci_controller *hose = bus->sysdata; |
129 | return hose->need_domain_info; | 130 | return hose->need_domain_info; |
130 | } | 131 | } |
132 | #endif /* CONFIG_PCI_DOMAINS */ | ||
131 | 133 | ||
132 | #endif /* __KERNEL__ */ | 134 | #endif /* __KERNEL__ */ |
133 | 135 | ||
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index fc807aa5ec8d..91747c282bb3 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) | 35 | #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * The following bits are directly used by the TLB hardware | 38 | * The following bits are implemented by the TLB hardware |
39 | */ | 39 | */ |
40 | #define _PAGE_GLOBAL_SHIFT 0 | 40 | #define _PAGE_GLOBAL_SHIFT 0 |
41 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) | 41 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) |
@@ -60,43 +60,40 @@ | |||
60 | #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) | 60 | #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) |
61 | #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) | 61 | #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) |
62 | 62 | ||
63 | #define _PAGE_SILENT_READ _PAGE_VALID | ||
64 | #define _PAGE_SILENT_WRITE _PAGE_DIRTY | ||
65 | |||
66 | #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) | 63 | #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) |
67 | 64 | ||
68 | #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | 65 | #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) |
69 | 66 | ||
70 | /* | 67 | /* |
71 | * The following are implemented by software | 68 | * The following bits are implemented in software |
72 | */ | 69 | */ |
73 | #define _PAGE_PRESENT_SHIFT 0 | 70 | #define _PAGE_PRESENT_SHIFT (0) |
74 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) | 71 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) |
75 | #define _PAGE_READ_SHIFT 1 | 72 | #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) |
76 | #define _PAGE_READ (1 << _PAGE_READ_SHIFT) | 73 | #define _PAGE_READ (1 << _PAGE_READ_SHIFT) |
77 | #define _PAGE_WRITE_SHIFT 2 | 74 | #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) |
78 | #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) | 75 | #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) |
79 | #define _PAGE_ACCESSED_SHIFT 3 | 76 | #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) |
80 | #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) | 77 | #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) |
81 | #define _PAGE_MODIFIED_SHIFT 4 | 78 | #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) |
82 | #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) | 79 | #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) |
83 | 80 | ||
84 | /* | 81 | /* |
85 | * And these are the hardware TLB bits | 82 | * The following bits are implemented by the TLB hardware |
86 | */ | 83 | */ |
87 | #define _PAGE_GLOBAL_SHIFT 8 | 84 | #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) |
88 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) | 85 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) |
89 | #define _PAGE_VALID_SHIFT 9 | 86 | #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) |
90 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) | 87 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) |
91 | #define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ | 88 | #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) |
92 | #define _PAGE_DIRTY_SHIFT 10 | ||
93 | #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) | 89 | #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) |
94 | #define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) | 90 | #define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) |
95 | #define _CACHE_UNCACHED_SHIFT 11 | ||
96 | #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) | 91 | #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) |
97 | #define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) | 92 | #define _CACHE_MASK _CACHE_UNCACHED |
98 | 93 | ||
99 | #else /* 'Normal' r4K case */ | 94 | #define _PFN_SHIFT PAGE_SHIFT |
95 | |||
96 | #else | ||
100 | /* | 97 | /* |
101 | * When using the RI/XI bit support, we have 13 bits of flags below | 98 | * When using the RI/XI bit support, we have 13 bits of flags below |
102 | * the physical address. The RI/XI bits are placed such that a SRL 5 | 99 | * the physical address. The RI/XI bits are placed such that a SRL 5 |
@@ -107,10 +104,8 @@ | |||
107 | 104 | ||
108 | /* | 105 | /* |
109 | * The following bits are implemented in software | 106 | * The following bits are implemented in software |
110 | * | ||
111 | * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi. | ||
112 | */ | 107 | */ |
113 | #define _PAGE_PRESENT_SHIFT (0) | 108 | #define _PAGE_PRESENT_SHIFT 0 |
114 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) | 109 | #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) |
115 | #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) | 110 | #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) |
116 | #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) | 111 | #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) |
@@ -125,16 +120,11 @@ | |||
125 | /* huge tlb page */ | 120 | /* huge tlb page */ |
126 | #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) | 121 | #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) |
127 | #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) | 122 | #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) |
128 | #else | ||
129 | #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) | ||
130 | #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ | ||
131 | #endif | ||
132 | |||
133 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT | ||
134 | /* huge tlb page */ | ||
135 | #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) | 123 | #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) |
136 | #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) | 124 | #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) |
137 | #else | 125 | #else |
126 | #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) | ||
127 | #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ | ||
138 | #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) | 128 | #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) |
139 | #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ | 129 | #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ |
140 | #endif | 130 | #endif |
@@ -149,17 +139,10 @@ | |||
149 | 139 | ||
150 | #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) | 140 | #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) |
151 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) | 141 | #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) |
152 | |||
153 | #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) | 142 | #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) |
154 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) | 143 | #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) |
155 | /* synonym */ | ||
156 | #define _PAGE_SILENT_READ (_PAGE_VALID) | ||
157 | |||
158 | /* The MIPS dirty bit */ | ||
159 | #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) | 144 | #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) |
160 | #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) | 145 | #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) |
161 | #define _PAGE_SILENT_WRITE (_PAGE_DIRTY) | ||
162 | |||
163 | #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) | 146 | #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) |
164 | #define _CACHE_MASK (7 << _CACHE_SHIFT) | 147 | #define _CACHE_MASK (7 << _CACHE_SHIFT) |
165 | 148 | ||
@@ -167,9 +150,9 @@ | |||
167 | 150 | ||
168 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ | 151 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ |
169 | 152 | ||
170 | #ifndef _PFN_SHIFT | 153 | #define _PAGE_SILENT_READ _PAGE_VALID |
171 | #define _PFN_SHIFT PAGE_SHIFT | 154 | #define _PAGE_SILENT_WRITE _PAGE_DIRTY |
172 | #endif | 155 | |
173 | #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) | 156 | #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) |
174 | 157 | ||
175 | #ifndef _PAGE_NO_READ | 158 | #ifndef _PAGE_NO_READ |
@@ -179,9 +162,6 @@ | |||
179 | #ifndef _PAGE_NO_EXEC | 162 | #ifndef _PAGE_NO_EXEC |
180 | #define _PAGE_NO_EXEC ({BUG(); 0; }) | 163 | #define _PAGE_NO_EXEC ({BUG(); 0; }) |
181 | #endif | 164 | #endif |
182 | #ifndef _PAGE_GLOBAL_SHIFT | ||
183 | #define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL) | ||
184 | #endif | ||
185 | 165 | ||
186 | 166 | ||
187 | #ifndef __ASSEMBLY__ | 167 | #ifndef __ASSEMBLY__ |
@@ -266,8 +246,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) | |||
266 | #endif | 246 | #endif |
267 | 247 | ||
268 | #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) | 248 | #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) |
269 | #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) | 249 | #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) |
270 | 250 | ||
271 | #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) | 251 | #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ |
252 | _PFN_MASK | _CACHE_MASK) | ||
272 | 253 | ||
273 | #endif /* _ASM_PGTABLE_BITS_H */ | 254 | #endif /* _ASM_PGTABLE_BITS_H */ |
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 583ff4215479..bef782c4a44b 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h | |||
@@ -99,29 +99,35 @@ extern void paging_init(void); | |||
99 | 99 | ||
100 | #define htw_stop() \ | 100 | #define htw_stop() \ |
101 | do { \ | 101 | do { \ |
102 | if (cpu_has_htw) \ | 102 | unsigned long flags; \ |
103 | write_c0_pwctl(read_c0_pwctl() & \ | 103 | \ |
104 | ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ | 104 | if (cpu_has_htw) { \ |
105 | local_irq_save(flags); \ | ||
106 | if(!raw_current_cpu_data.htw_seq++) { \ | ||
107 | write_c0_pwctl(read_c0_pwctl() & \ | ||
108 | ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ | ||
109 | back_to_back_c0_hazard(); \ | ||
110 | } \ | ||
111 | local_irq_restore(flags); \ | ||
112 | } \ | ||
105 | } while(0) | 113 | } while(0) |
106 | 114 | ||
107 | #define htw_start() \ | 115 | #define htw_start() \ |
108 | do { \ | 116 | do { \ |
109 | if (cpu_has_htw) \ | 117 | unsigned long flags; \ |
110 | write_c0_pwctl(read_c0_pwctl() | \ | 118 | \ |
111 | (1 << MIPS_PWCTL_PWEN_SHIFT)); \ | ||
112 | } while(0) | ||
113 | |||
114 | |||
115 | #define htw_reset() \ | ||
116 | do { \ | ||
117 | if (cpu_has_htw) { \ | 119 | if (cpu_has_htw) { \ |
118 | htw_stop(); \ | 120 | local_irq_save(flags); \ |
119 | back_to_back_c0_hazard(); \ | 121 | if (!--raw_current_cpu_data.htw_seq) { \ |
120 | htw_start(); \ | 122 | write_c0_pwctl(read_c0_pwctl() | \ |
121 | back_to_back_c0_hazard(); \ | 123 | (1 << MIPS_PWCTL_PWEN_SHIFT)); \ |
124 | back_to_back_c0_hazard(); \ | ||
125 | } \ | ||
126 | local_irq_restore(flags); \ | ||
122 | } \ | 127 | } \ |
123 | } while(0) | 128 | } while(0) |
124 | 129 | ||
130 | |||
125 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, | 131 | extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
126 | pte_t pteval); | 132 | pte_t pteval); |
127 | 133 | ||
@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt | |||
153 | { | 159 | { |
154 | pte_t null = __pte(0); | 160 | pte_t null = __pte(0); |
155 | 161 | ||
162 | htw_stop(); | ||
156 | /* Preserve global status for the pair */ | 163 | /* Preserve global status for the pair */ |
157 | if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) | 164 | if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) |
158 | null.pte_low = null.pte_high = _PAGE_GLOBAL; | 165 | null.pte_low = null.pte_high = _PAGE_GLOBAL; |
159 | 166 | ||
160 | set_pte_at(mm, addr, ptep, null); | 167 | set_pte_at(mm, addr, ptep, null); |
161 | htw_reset(); | 168 | htw_start(); |
162 | } | 169 | } |
163 | #else | 170 | #else |
164 | 171 | ||
@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) | |||
188 | 195 | ||
189 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 196 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
190 | { | 197 | { |
198 | htw_stop(); | ||
191 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) | 199 | #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) |
192 | /* Preserve global status for the pair */ | 200 | /* Preserve global status for the pair */ |
193 | if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) | 201 | if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) |
@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt | |||
195 | else | 203 | else |
196 | #endif | 204 | #endif |
197 | set_pte_at(mm, addr, ptep, __pte(0)); | 205 | set_pte_at(mm, addr, ptep, __pte(0)); |
198 | htw_reset(); | 206 | htw_start(); |
199 | } | 207 | } |
200 | #endif | 208 | #endif |
201 | 209 | ||
@@ -334,7 +342,7 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
334 | return pte; | 342 | return pte; |
335 | } | 343 | } |
336 | 344 | ||
337 | #ifdef _PAGE_HUGE | 345 | #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
338 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } | 346 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } |
339 | 347 | ||
340 | static inline pte_t pte_mkhuge(pte_t pte) | 348 | static inline pte_t pte_mkhuge(pte_t pte) |
@@ -342,7 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte) | |||
342 | pte_val(pte) |= _PAGE_HUGE; | 350 | pte_val(pte) |= _PAGE_HUGE; |
343 | return pte; | 351 | return pte; |
344 | } | 352 | } |
345 | #endif /* _PAGE_HUGE */ | 353 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
346 | #endif | 354 | #endif |
347 | static inline int pte_special(pte_t pte) { return 0; } | 355 | static inline int pte_special(pte_t pte) { return 0; } |
348 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | 356 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } |
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index f1df4cb4a286..b5dcbee01fd7 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
@@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count; | |||
54 | #define TASK_SIZE 0x7fff8000UL | 54 | #define TASK_SIZE 0x7fff8000UL |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | #ifdef __KERNEL__ | ||
58 | #define STACK_TOP_MAX TASK_SIZE | 57 | #define STACK_TOP_MAX TASK_SIZE |
59 | #endif | ||
60 | 58 | ||
61 | #define TASK_IS_32BIT_ADDR 1 | 59 | #define TASK_IS_32BIT_ADDR 1 |
62 | 60 | ||
@@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count; | |||
73 | #define TASK_SIZE32 0x7fff8000UL | 71 | #define TASK_SIZE32 0x7fff8000UL |
74 | #define TASK_SIZE64 0x10000000000UL | 72 | #define TASK_SIZE64 0x10000000000UL |
75 | #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) | 73 | #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) |
76 | |||
77 | #ifdef __KERNEL__ | ||
78 | #define STACK_TOP_MAX TASK_SIZE64 | 74 | #define STACK_TOP_MAX TASK_SIZE64 |
79 | #endif | ||
80 | |||
81 | 75 | ||
82 | #define TASK_SIZE_OF(tsk) \ | 76 | #define TASK_SIZE_OF(tsk) \ |
83 | (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) | 77 | (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) |
@@ -211,6 +205,8 @@ struct octeon_cop2_state { | |||
211 | unsigned long cop2_gfm_poly; | 205 | unsigned long cop2_gfm_poly; |
212 | /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ | 206 | /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ |
213 | unsigned long cop2_gfm_result[2]; | 207 | unsigned long cop2_gfm_result[2]; |
208 | /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */ | ||
209 | unsigned long cop2_sha3[2]; | ||
214 | }; | 210 | }; |
215 | #define COP2_INIT \ | 211 | #define COP2_INIT \ |
216 | .cp2 = {0,}, | 212 | .cp2 = {0,}, |
@@ -399,4 +395,15 @@ unsigned long get_wchan(struct task_struct *p); | |||
399 | 395 | ||
400 | #endif | 396 | #endif |
401 | 397 | ||
398 | /* | ||
399 | * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options | ||
400 | * to the prctl syscall. | ||
401 | */ | ||
402 | extern int mips_get_process_fp_mode(struct task_struct *task); | ||
403 | extern int mips_set_process_fp_mode(struct task_struct *task, | ||
404 | unsigned int value); | ||
405 | |||
406 | #define GET_FP_MODE(task) mips_get_process_fp_mode(task) | ||
407 | #define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value) | ||
408 | |||
402 | #endif /* _ASM_PROCESSOR_H */ | 409 | #endif /* _ASM_PROCESSOR_H */ |
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index eaa26270a5e5..8ebc2aa5f3e1 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h | |||
@@ -24,13 +24,6 @@ struct boot_param_header; | |||
24 | extern void __dt_setup_arch(void *bph); | 24 | extern void __dt_setup_arch(void *bph); |
25 | extern int __dt_register_buses(const char *bus0, const char *bus1); | 25 | extern int __dt_register_buses(const char *bus0, const char *bus1); |
26 | 26 | ||
27 | #define dt_setup_arch(sym) \ | ||
28 | ({ \ | ||
29 | extern char __dtb_##sym##_begin[]; \ | ||
30 | \ | ||
31 | __dt_setup_arch(__dtb_##sym##_begin); \ | ||
32 | }) | ||
33 | |||
34 | #else /* CONFIG_OF */ | 27 | #else /* CONFIG_OF */ |
35 | static inline void device_tree_init(void) { } | 28 | static inline void device_tree_init(void) { } |
36 | #endif /* CONFIG_OF */ | 29 | #endif /* CONFIG_OF */ |
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index fc783f843bdc..ffc320389f40 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h | |||
@@ -40,8 +40,8 @@ struct pt_regs { | |||
40 | unsigned long cp0_cause; | 40 | unsigned long cp0_cause; |
41 | unsigned long cp0_epc; | 41 | unsigned long cp0_epc; |
42 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 42 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
43 | unsigned long long mpl[3]; /* MTM{0,1,2} */ | 43 | unsigned long long mpl[6]; /* MTM{0-5} */ |
44 | unsigned long long mtp[3]; /* MTP{0,1,2} */ | 44 | unsigned long long mtp[6]; /* MTP{0-5} */ |
45 | #endif | 45 | #endif |
46 | } __aligned(8); | 46 | } __aligned(8); |
47 | 47 | ||
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index e293a8d89a6d..1b22d2da88a1 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <asm/asm.h> | 15 | #include <asm/asm.h> |
16 | #include <asm/cacheops.h> | 16 | #include <asm/cacheops.h> |
17 | #include <asm/compiler.h> | ||
17 | #include <asm/cpu-features.h> | 18 | #include <asm/cpu-features.h> |
18 | #include <asm/cpu-type.h> | 19 | #include <asm/cpu-type.h> |
19 | #include <asm/mipsmtregs.h> | 20 | #include <asm/mipsmtregs.h> |
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void); | |||
39 | __asm__ __volatile__( \ | 40 | __asm__ __volatile__( \ |
40 | " .set push \n" \ | 41 | " .set push \n" \ |
41 | " .set noreorder \n" \ | 42 | " .set noreorder \n" \ |
42 | " .set arch=r4000 \n" \ | 43 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ |
43 | " cache %0, %1 \n" \ | 44 | " cache %0, %1 \n" \ |
44 | " .set pop \n" \ | 45 | " .set pop \n" \ |
45 | : \ | 46 | : \ |
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr) | |||
147 | __asm__ __volatile__( \ | 148 | __asm__ __volatile__( \ |
148 | " .set push \n" \ | 149 | " .set push \n" \ |
149 | " .set noreorder \n" \ | 150 | " .set noreorder \n" \ |
150 | " .set arch=r4000 \n" \ | 151 | " .set "MIPS_ISA_ARCH_LEVEL" \n" \ |
151 | "1: cache %0, (%1) \n" \ | 152 | "1: cache %0, (%1) \n" \ |
152 | "2: .set pop \n" \ | 153 | "2: .set pop \n" \ |
153 | " .section __ex_table,\"a\" \n" \ | 154 | " .section __ex_table,\"a\" \n" \ |
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
218 | cache_op(Page_Invalidate_T, addr); | 219 | cache_op(Page_Invalidate_T, addr); |
219 | } | 220 | } |
220 | 221 | ||
222 | #ifndef CONFIG_CPU_MIPSR6 | ||
221 | #define cache16_unroll32(base,op) \ | 223 | #define cache16_unroll32(base,op) \ |
222 | __asm__ __volatile__( \ | 224 | __asm__ __volatile__( \ |
223 | " .set push \n" \ | 225 | " .set push \n" \ |
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
322 | : "r" (base), \ | 324 | : "r" (base), \ |
323 | "i" (op)); | 325 | "i" (op)); |
324 | 326 | ||
327 | #else | ||
328 | /* | ||
329 | * MIPS R6 changed the cache opcode and moved to a 8-bit offset field. | ||
330 | * This means we now need to increment the base register before we flush | ||
331 | * more cache lines | ||
332 | */ | ||
333 | #define cache16_unroll32(base,op) \ | ||
334 | __asm__ __volatile__( \ | ||
335 | " .set push\n" \ | ||
336 | " .set noreorder\n" \ | ||
337 | " .set mips64r6\n" \ | ||
338 | " .set noat\n" \ | ||
339 | " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \ | ||
340 | " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \ | ||
341 | " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \ | ||
342 | " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \ | ||
343 | " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \ | ||
344 | " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ | ||
345 | " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ | ||
346 | " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ | ||
347 | " addiu $1, $0, 0x100 \n" \ | ||
348 | " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ | ||
349 | " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ | ||
350 | " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ | ||
351 | " cache %1, 0x060($1); cache %1, 0x070($1)\n" \ | ||
352 | " cache %1, 0x080($1); cache %1, 0x090($1)\n" \ | ||
353 | " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \ | ||
354 | " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \ | ||
355 | " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \ | ||
356 | " .set pop\n" \ | ||
357 | : \ | ||
358 | : "r" (base), \ | ||
359 | "i" (op)); | ||
360 | |||
361 | #define cache32_unroll32(base,op) \ | ||
362 | __asm__ __volatile__( \ | ||
363 | " .set push\n" \ | ||
364 | " .set noreorder\n" \ | ||
365 | " .set mips64r6\n" \ | ||
366 | " .set noat\n" \ | ||
367 | " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \ | ||
368 | " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ | ||
369 | " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ | ||
370 | " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ | ||
371 | " addiu $1, %0, 0x100\n" \ | ||
372 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ | ||
373 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ | ||
374 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ | ||
375 | " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ | ||
376 | " addiu $1, $1, 0x100\n" \ | ||
377 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ | ||
378 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ | ||
379 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ | ||
380 | " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ | ||
381 | " addiu $1, $1, 0x100\n" \ | ||
382 | " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ | ||
383 | " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ | ||
384 | " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ | ||
385 | " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ | ||
386 | " .set pop\n" \ | ||
387 | : \ | ||
388 | : "r" (base), \ | ||
389 | "i" (op)); | ||
390 | |||
391 | #define cache64_unroll32(base,op) \ | ||
392 | __asm__ __volatile__( \ | ||
393 | " .set push\n" \ | ||
394 | " .set noreorder\n" \ | ||
395 | " .set mips64r6\n" \ | ||
396 | " .set noat\n" \ | ||
397 | " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ | ||
398 | " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ | ||
399 | " addiu $1, %0, 0x100\n" \ | ||
400 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | ||
401 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | ||
402 | " addiu $1, %0, 0x100\n" \ | ||
403 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | ||
404 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | ||
405 | " addiu $1, %0, 0x100\n" \ | ||
406 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | ||
407 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | ||
408 | " addiu $1, %0, 0x100\n" \ | ||
409 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | ||
410 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | ||
411 | " addiu $1, %0, 0x100\n" \ | ||
412 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | ||
413 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | ||
414 | " addiu $1, %0, 0x100\n" \ | ||
415 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | ||
416 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | ||
417 | " addiu $1, %0, 0x100\n" \ | ||
418 | " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ | ||
419 | " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ | ||
420 | " .set pop\n" \ | ||
421 | : \ | ||
422 | : "r" (base), \ | ||
423 | "i" (op)); | ||
424 | |||
425 | #define cache128_unroll32(base,op) \ | ||
426 | __asm__ __volatile__( \ | ||
427 | " .set push\n" \ | ||
428 | " .set noreorder\n" \ | ||
429 | " .set mips64r6\n" \ | ||
430 | " .set noat\n" \ | ||
431 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
432 | " addiu $1, %0, 0x100\n" \ | ||
433 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
434 | " addiu $1, %0, 0x100\n" \ | ||
435 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
436 | " addiu $1, %0, 0x100\n" \ | ||
437 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
438 | " addiu $1, %0, 0x100\n" \ | ||
439 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
440 | " addiu $1, %0, 0x100\n" \ | ||
441 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
442 | " addiu $1, %0, 0x100\n" \ | ||
443 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
444 | " addiu $1, %0, 0x100\n" \ | ||
445 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
446 | " addiu $1, %0, 0x100\n" \ | ||
447 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
448 | " addiu $1, %0, 0x100\n" \ | ||
449 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
450 | " addiu $1, %0, 0x100\n" \ | ||
451 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
452 | " addiu $1, %0, 0x100\n" \ | ||
453 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
454 | " addiu $1, %0, 0x100\n" \ | ||
455 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
456 | " addiu $1, %0, 0x100\n" \ | ||
457 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
458 | " addiu $1, %0, 0x100\n" \ | ||
459 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
460 | " addiu $1, %0, 0x100\n" \ | ||
461 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
462 | " addiu $1, %0, 0x100\n" \ | ||
463 | " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ | ||
464 | " addiu $1, %0, 0x100\n" \ | ||
465 | " .set pop\n" \ | ||
466 | : \ | ||
467 | : "r" (base), \ | ||
468 | "i" (op)); | ||
469 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
470 | |||
325 | /* | 471 | /* |
326 | * Perform the cache operation specified by op using a user mode virtual | 472 | * Perform the cache operation specified by op using a user mode virtual |
327 | * address while in kernel mode. | 473 | * address while in kernel mode. |
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h index 753275accd18..195db5045ae5 100644 --- a/arch/mips/include/asm/sgialib.h +++ b/arch/mips/include/asm/sgialib.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #ifndef _ASM_SGIALIB_H | 11 | #ifndef _ASM_SGIALIB_H |
12 | #define _ASM_SGIALIB_H | 12 | #define _ASM_SGIALIB_H |
13 | 13 | ||
14 | #include <linux/compiler.h> | ||
14 | #include <asm/sgiarcs.h> | 15 | #include <asm/sgiarcs.h> |
15 | 16 | ||
16 | extern struct linux_romvec *romvec; | 17 | extern struct linux_romvec *romvec; |
@@ -70,8 +71,11 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt); | |||
70 | extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); | 71 | extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); |
71 | 72 | ||
72 | /* Misc. routines. */ | 73 | /* Misc. routines. */ |
73 | extern VOID ArcReboot(VOID) __attribute__((noreturn)); | 74 | extern VOID ArcHalt(VOID) __noreturn; |
74 | extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn)); | 75 | extern VOID ArcPowerDown(VOID) __noreturn; |
76 | extern VOID ArcRestart(VOID) __noreturn; | ||
77 | extern VOID ArcReboot(VOID) __noreturn; | ||
78 | extern VOID ArcEnterInteractiveMode(VOID) __noreturn; | ||
75 | extern VOID ArcFlushAllCaches(VOID); | 79 | extern VOID ArcFlushAllCaches(VOID); |
76 | extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); | 80 | extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); |
77 | 81 | ||
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h deleted file mode 100644 index dd9a762646fc..000000000000 --- a/arch/mips/include/asm/siginfo.h +++ /dev/null | |||
@@ -1,29 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle | ||
7 | * Copyright (C) 2000, 2001 Silicon Graphics, Inc. | ||
8 | */ | ||
9 | #ifndef _ASM_SIGINFO_H | ||
10 | #define _ASM_SIGINFO_H | ||
11 | |||
12 | #include <uapi/asm/siginfo.h> | ||
13 | |||
14 | |||
15 | /* | ||
16 | * Duplicated here because of <asm-generic/siginfo.h> braindamage ... | ||
17 | */ | ||
18 | #include <linux/string.h> | ||
19 | |||
20 | static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) | ||
21 | { | ||
22 | if (from->si_code < 0) | ||
23 | memcpy(to, from, sizeof(*to)); | ||
24 | else | ||
25 | /* _sigchld is currently the largest know union member */ | ||
26 | memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); | ||
27 | } | ||
28 | |||
29 | #endif /* _ASM_SIGINFO_H */ | ||
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index c6d06d383ef9..b4548690ade9 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h | |||
@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
89 | " subu %[ticket], %[ticket], 1 \n" | 89 | " subu %[ticket], %[ticket], 1 \n" |
90 | " .previous \n" | 90 | " .previous \n" |
91 | " .set pop \n" | 91 | " .set pop \n" |
92 | : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), | 92 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
93 | [serving_now_ptr] "+m" (lock->h.serving_now), | 93 | [serving_now_ptr] "+m" (lock->h.serving_now), |
94 | [ticket] "=&r" (tmp), | 94 | [ticket] "=&r" (tmp), |
95 | [my_ticket] "=&r" (my_ticket) | 95 | [my_ticket] "=&r" (my_ticket) |
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
122 | " subu %[ticket], %[ticket], 1 \n" | 122 | " subu %[ticket], %[ticket], 1 \n" |
123 | " .previous \n" | 123 | " .previous \n" |
124 | " .set pop \n" | 124 | " .set pop \n" |
125 | : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), | 125 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
126 | [serving_now_ptr] "+m" (lock->h.serving_now), | 126 | [serving_now_ptr] "+m" (lock->h.serving_now), |
127 | [ticket] "=&r" (tmp), | 127 | [ticket] "=&r" (tmp), |
128 | [my_ticket] "=&r" (my_ticket) | 128 | [my_ticket] "=&r" (my_ticket) |
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) | |||
164 | " li %[ticket], 0 \n" | 164 | " li %[ticket], 0 \n" |
165 | " .previous \n" | 165 | " .previous \n" |
166 | " .set pop \n" | 166 | " .set pop \n" |
167 | : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), | 167 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
168 | [ticket] "=&r" (tmp), | 168 | [ticket] "=&r" (tmp), |
169 | [my_ticket] "=&r" (tmp2), | 169 | [my_ticket] "=&r" (tmp2), |
170 | [now_serving] "=&r" (tmp3) | 170 | [now_serving] "=&r" (tmp3) |
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) | |||
188 | " li %[ticket], 0 \n" | 188 | " li %[ticket], 0 \n" |
189 | " .previous \n" | 189 | " .previous \n" |
190 | " .set pop \n" | 190 | " .set pop \n" |
191 | : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), | 191 | : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), |
192 | [ticket] "=&r" (tmp), | 192 | [ticket] "=&r" (tmp), |
193 | [my_ticket] "=&r" (tmp2), | 193 | [my_ticket] "=&r" (tmp2), |
194 | [now_serving] "=&r" (tmp3) | 194 | [now_serving] "=&r" (tmp3) |
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
235 | " beqzl %1, 1b \n" | 235 | " beqzl %1, 1b \n" |
236 | " nop \n" | 236 | " nop \n" |
237 | " .set reorder \n" | 237 | " .set reorder \n" |
238 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) | 238 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
239 | : GCC_OFF12_ASM() (rw->lock) | 239 | : GCC_OFF_SMALL_ASM() (rw->lock) |
240 | : "memory"); | 240 | : "memory"); |
241 | } else { | 241 | } else { |
242 | do { | 242 | do { |
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
245 | " bltz %1, 1b \n" | 245 | " bltz %1, 1b \n" |
246 | " addu %1, 1 \n" | 246 | " addu %1, 1 \n" |
247 | "2: sc %1, %0 \n" | 247 | "2: sc %1, %0 \n" |
248 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) | 248 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
249 | : GCC_OFF12_ASM() (rw->lock) | 249 | : GCC_OFF_SMALL_ASM() (rw->lock) |
250 | : "memory"); | 250 | : "memory"); |
251 | } while (unlikely(!tmp)); | 251 | } while (unlikely(!tmp)); |
252 | } | 252 | } |
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw) | |||
254 | smp_llsc_mb(); | 254 | smp_llsc_mb(); |
255 | } | 255 | } |
256 | 256 | ||
257 | /* Note the use of sub, not subu which will make the kernel die with an | ||
258 | overflow exception if we ever try to unlock an rwlock that is already | ||
259 | unlocked or is being held by a writer. */ | ||
260 | static inline void arch_read_unlock(arch_rwlock_t *rw) | 257 | static inline void arch_read_unlock(arch_rwlock_t *rw) |
261 | { | 258 | { |
262 | unsigned int tmp; | 259 | unsigned int tmp; |
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
266 | if (R10000_LLSC_WAR) { | 263 | if (R10000_LLSC_WAR) { |
267 | __asm__ __volatile__( | 264 | __asm__ __volatile__( |
268 | "1: ll %1, %2 # arch_read_unlock \n" | 265 | "1: ll %1, %2 # arch_read_unlock \n" |
269 | " sub %1, 1 \n" | 266 | " addiu %1, 1 \n" |
270 | " sc %1, %0 \n" | 267 | " sc %1, %0 \n" |
271 | " beqzl %1, 1b \n" | 268 | " beqzl %1, 1b \n" |
272 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) | 269 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
273 | : GCC_OFF12_ASM() (rw->lock) | 270 | : GCC_OFF_SMALL_ASM() (rw->lock) |
274 | : "memory"); | 271 | : "memory"); |
275 | } else { | 272 | } else { |
276 | do { | 273 | do { |
277 | __asm__ __volatile__( | 274 | __asm__ __volatile__( |
278 | "1: ll %1, %2 # arch_read_unlock \n" | 275 | "1: ll %1, %2 # arch_read_unlock \n" |
279 | " sub %1, 1 \n" | 276 | " addiu %1, -1 \n" |
280 | " sc %1, %0 \n" | 277 | " sc %1, %0 \n" |
281 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) | 278 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
282 | : GCC_OFF12_ASM() (rw->lock) | 279 | : GCC_OFF_SMALL_ASM() (rw->lock) |
283 | : "memory"); | 280 | : "memory"); |
284 | } while (unlikely(!tmp)); | 281 | } while (unlikely(!tmp)); |
285 | } | 282 | } |
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
299 | " beqzl %1, 1b \n" | 296 | " beqzl %1, 1b \n" |
300 | " nop \n" | 297 | " nop \n" |
301 | " .set reorder \n" | 298 | " .set reorder \n" |
302 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) | 299 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
303 | : GCC_OFF12_ASM() (rw->lock) | 300 | : GCC_OFF_SMALL_ASM() (rw->lock) |
304 | : "memory"); | 301 | : "memory"); |
305 | } else { | 302 | } else { |
306 | do { | 303 | do { |
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
309 | " bnez %1, 1b \n" | 306 | " bnez %1, 1b \n" |
310 | " lui %1, 0x8000 \n" | 307 | " lui %1, 0x8000 \n" |
311 | "2: sc %1, %0 \n" | 308 | "2: sc %1, %0 \n" |
312 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) | 309 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) |
313 | : GCC_OFF12_ASM() (rw->lock) | 310 | : GCC_OFF_SMALL_ASM() (rw->lock) |
314 | : "memory"); | 311 | : "memory"); |
315 | } while (unlikely(!tmp)); | 312 | } while (unlikely(!tmp)); |
316 | } | 313 | } |
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
349 | __WEAK_LLSC_MB | 346 | __WEAK_LLSC_MB |
350 | " li %2, 1 \n" | 347 | " li %2, 1 \n" |
351 | "2: \n" | 348 | "2: \n" |
352 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) | 349 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
353 | : GCC_OFF12_ASM() (rw->lock) | 350 | : GCC_OFF_SMALL_ASM() (rw->lock) |
354 | : "memory"); | 351 | : "memory"); |
355 | } else { | 352 | } else { |
356 | __asm__ __volatile__( | 353 | __asm__ __volatile__( |
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) | |||
366 | __WEAK_LLSC_MB | 363 | __WEAK_LLSC_MB |
367 | " li %2, 1 \n" | 364 | " li %2, 1 \n" |
368 | "2: \n" | 365 | "2: \n" |
369 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) | 366 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
370 | : GCC_OFF12_ASM() (rw->lock) | 367 | : GCC_OFF_SMALL_ASM() (rw->lock) |
371 | : "memory"); | 368 | : "memory"); |
372 | } | 369 | } |
373 | 370 | ||
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
393 | " li %2, 1 \n" | 390 | " li %2, 1 \n" |
394 | " .set reorder \n" | 391 | " .set reorder \n" |
395 | "2: \n" | 392 | "2: \n" |
396 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) | 393 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) |
397 | : GCC_OFF12_ASM() (rw->lock) | 394 | : GCC_OFF_SMALL_ASM() (rw->lock) |
398 | : "memory"); | 395 | : "memory"); |
399 | } else { | 396 | } else { |
400 | do { | 397 | do { |
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) | |||
406 | " sc %1, %0 \n" | 403 | " sc %1, %0 \n" |
407 | " li %2, 1 \n" | 404 | " li %2, 1 \n" |
408 | "2: \n" | 405 | "2: \n" |
409 | : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), | 406 | : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), |
410 | "=&r" (ret) | 407 | "=&r" (ret) |
411 | : GCC_OFF12_ASM() (rw->lock) | 408 | : GCC_OFF_SMALL_ASM() (rw->lock) |
412 | : "memory"); | 409 | : "memory"); |
413 | } while (unlikely(!tmp)); | 410 | } while (unlikely(!tmp)); |
414 | 411 | ||
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h index 0b89006e4907..0f90d88e464d 100644 --- a/arch/mips/include/asm/spram.h +++ b/arch/mips/include/asm/spram.h | |||
@@ -1,10 +1,10 @@ | |||
1 | #ifndef _MIPS_SPRAM_H | 1 | #ifndef _MIPS_SPRAM_H |
2 | #define _MIPS_SPRAM_H | 2 | #define _MIPS_SPRAM_H |
3 | 3 | ||
4 | #ifdef CONFIG_CPU_MIPSR2 | 4 | #if defined(CONFIG_MIPS_SPRAM) |
5 | extern __init void spram_config(void); | 5 | extern __init void spram_config(void); |
6 | #else | 6 | #else |
7 | static inline void spram_config(void) { }; | 7 | static inline void spram_config(void) { }; |
8 | #endif /* CONFIG_CPU_MIPSR2 */ | 8 | #endif /* CONFIG_MIPS_SPRAM */ |
9 | 9 | ||
10 | #endif /* _MIPS_SPRAM_H */ | 10 | #endif /* _MIPS_SPRAM_H */ |
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index b188c797565c..28d6d9364bd1 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h | |||
@@ -40,7 +40,7 @@ | |||
40 | LONG_S v1, PT_HI(sp) | 40 | LONG_S v1, PT_HI(sp) |
41 | mflhxu v1 | 41 | mflhxu v1 |
42 | LONG_S v1, PT_ACX(sp) | 42 | LONG_S v1, PT_ACX(sp) |
43 | #else | 43 | #elif !defined(CONFIG_CPU_MIPSR6) |
44 | mfhi v1 | 44 | mfhi v1 |
45 | #endif | 45 | #endif |
46 | #ifdef CONFIG_32BIT | 46 | #ifdef CONFIG_32BIT |
@@ -50,7 +50,7 @@ | |||
50 | LONG_S $10, PT_R10(sp) | 50 | LONG_S $10, PT_R10(sp) |
51 | LONG_S $11, PT_R11(sp) | 51 | LONG_S $11, PT_R11(sp) |
52 | LONG_S $12, PT_R12(sp) | 52 | LONG_S $12, PT_R12(sp) |
53 | #ifndef CONFIG_CPU_HAS_SMARTMIPS | 53 | #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) |
54 | LONG_S v1, PT_HI(sp) | 54 | LONG_S v1, PT_HI(sp) |
55 | mflo v1 | 55 | mflo v1 |
56 | #endif | 56 | #endif |
@@ -58,7 +58,7 @@ | |||
58 | LONG_S $14, PT_R14(sp) | 58 | LONG_S $14, PT_R14(sp) |
59 | LONG_S $15, PT_R15(sp) | 59 | LONG_S $15, PT_R15(sp) |
60 | LONG_S $24, PT_R24(sp) | 60 | LONG_S $24, PT_R24(sp) |
61 | #ifndef CONFIG_CPU_HAS_SMARTMIPS | 61 | #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) |
62 | LONG_S v1, PT_LO(sp) | 62 | LONG_S v1, PT_LO(sp) |
63 | #endif | 63 | #endif |
64 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 64 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
@@ -226,7 +226,7 @@ | |||
226 | mtlhx $24 | 226 | mtlhx $24 |
227 | LONG_L $24, PT_LO(sp) | 227 | LONG_L $24, PT_LO(sp) |
228 | mtlhx $24 | 228 | mtlhx $24 |
229 | #else | 229 | #elif !defined(CONFIG_CPU_MIPSR6) |
230 | LONG_L $24, PT_LO(sp) | 230 | LONG_L $24, PT_LO(sp) |
231 | mtlo $24 | 231 | mtlo $24 |
232 | LONG_L $24, PT_HI(sp) | 232 | LONG_L $24, PT_HI(sp) |
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index b928b6f898cd..e92d6c4b5ed1 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h | |||
@@ -75,9 +75,12 @@ do { \ | |||
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | #define __clear_software_ll_bit() \ | 77 | #define __clear_software_ll_bit() \ |
78 | do { \ | 78 | do { if (cpu_has_rw_llb) { \ |
79 | if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ | 79 | write_c0_lladdr(0); \ |
80 | ll_bit = 0; \ | 80 | } else { \ |
81 | if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\ | ||
82 | ll_bit = 0; \ | ||
83 | } \ | ||
81 | } while (0) | 84 | } while (0) |
82 | 85 | ||
83 | #define switch_to(prev, next, last) \ | 86 | #define switch_to(prev, next, last) \ |
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 9e1295f874f0..55ed6602204c 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h | |||
@@ -28,7 +28,7 @@ struct thread_info { | |||
28 | unsigned long tp_value; /* thread pointer */ | 28 | unsigned long tp_value; /* thread pointer */ |
29 | __u32 cpu; /* current CPU */ | 29 | __u32 cpu; /* current CPU */ |
30 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 30 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
31 | 31 | int r2_emul_return; /* 1 => Returning from R2 emulator */ | |
32 | mm_segment_t addr_limit; /* | 32 | mm_segment_t addr_limit; /* |
33 | * thread address space limit: | 33 | * thread address space limit: |
34 | * 0x7fffffff for user-thead | 34 | * 0x7fffffff for user-thead |
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 89c22433b1c6..fc0cf5ac0cf7 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h | |||
@@ -21,20 +21,20 @@ | |||
21 | enum major_op { | 21 | enum major_op { |
22 | spec_op, bcond_op, j_op, jal_op, | 22 | spec_op, bcond_op, j_op, jal_op, |
23 | beq_op, bne_op, blez_op, bgtz_op, | 23 | beq_op, bne_op, blez_op, bgtz_op, |
24 | addi_op, addiu_op, slti_op, sltiu_op, | 24 | addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op, |
25 | andi_op, ori_op, xori_op, lui_op, | 25 | andi_op, ori_op, xori_op, lui_op, |
26 | cop0_op, cop1_op, cop2_op, cop1x_op, | 26 | cop0_op, cop1_op, cop2_op, cop1x_op, |
27 | beql_op, bnel_op, blezl_op, bgtzl_op, | 27 | beql_op, bnel_op, blezl_op, bgtzl_op, |
28 | daddi_op, daddiu_op, ldl_op, ldr_op, | 28 | daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op, |
29 | spec2_op, jalx_op, mdmx_op, spec3_op, | 29 | spec2_op, jalx_op, mdmx_op, spec3_op, |
30 | lb_op, lh_op, lwl_op, lw_op, | 30 | lb_op, lh_op, lwl_op, lw_op, |
31 | lbu_op, lhu_op, lwr_op, lwu_op, | 31 | lbu_op, lhu_op, lwr_op, lwu_op, |
32 | sb_op, sh_op, swl_op, sw_op, | 32 | sb_op, sh_op, swl_op, sw_op, |
33 | sdl_op, sdr_op, swr_op, cache_op, | 33 | sdl_op, sdr_op, swr_op, cache_op, |
34 | ll_op, lwc1_op, lwc2_op, pref_op, | 34 | ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, |
35 | lld_op, ldc1_op, ldc2_op, ld_op, | 35 | lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op, |
36 | sc_op, swc1_op, swc2_op, major_3b_op, | 36 | sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, |
37 | scd_op, sdc1_op, sdc2_op, sd_op | 37 | scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op |
38 | }; | 38 | }; |
39 | 39 | ||
40 | /* | 40 | /* |
@@ -83,9 +83,12 @@ enum spec3_op { | |||
83 | swe_op = 0x1f, bshfl_op = 0x20, | 83 | swe_op = 0x1f, bshfl_op = 0x20, |
84 | swle_op = 0x21, swre_op = 0x22, | 84 | swle_op = 0x21, swre_op = 0x22, |
85 | prefe_op = 0x23, dbshfl_op = 0x24, | 85 | prefe_op = 0x23, dbshfl_op = 0x24, |
86 | lbue_op = 0x28, lhue_op = 0x29, | 86 | cache6_op = 0x25, sc6_op = 0x26, |
87 | lbe_op = 0x2c, lhe_op = 0x2d, | 87 | scd6_op = 0x27, lbue_op = 0x28, |
88 | lle_op = 0x2e, lwe_op = 0x2f, | 88 | lhue_op = 0x29, lbe_op = 0x2c, |
89 | lhe_op = 0x2d, lle_op = 0x2e, | ||
90 | lwe_op = 0x2f, pref6_op = 0x35, | ||
91 | ll6_op = 0x36, lld6_op = 0x37, | ||
89 | rdhwr_op = 0x3b | 92 | rdhwr_op = 0x3b |
90 | }; | 93 | }; |
91 | 94 | ||
@@ -112,7 +115,8 @@ enum cop_op { | |||
112 | mfhc_op = 0x03, mtc_op = 0x04, | 115 | mfhc_op = 0x03, mtc_op = 0x04, |
113 | dmtc_op = 0x05, ctc_op = 0x06, | 116 | dmtc_op = 0x05, ctc_op = 0x06, |
114 | mthc0_op = 0x06, mthc_op = 0x07, | 117 | mthc0_op = 0x06, mthc_op = 0x07, |
115 | bc_op = 0x08, cop_op = 0x10, | 118 | bc_op = 0x08, bc1eqz_op = 0x09, |
119 | bc1nez_op = 0x0d, cop_op = 0x10, | ||
116 | copm_op = 0x18 | 120 | copm_op = 0x18 |
117 | }; | 121 | }; |
118 | 122 | ||
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index d08f83f19db5..2cb7fdead570 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h | |||
@@ -16,13 +16,6 @@ | |||
16 | #define HAVE_ARCH_SIGINFO_T | 16 | #define HAVE_ARCH_SIGINFO_T |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked | ||
20 | * by design ... | ||
21 | */ | ||
22 | #define HAVE_ARCH_COPY_SIGINFO | ||
23 | struct siginfo; | ||
24 | |||
25 | /* | ||
26 | * Careful to keep union _sifields from shifting ... | 19 | * Careful to keep union _sifields from shifting ... |
27 | */ | 20 | */ |
28 | #if _MIPS_SZLONG == 32 | 21 | #if _MIPS_SZLONG == 32 |
@@ -35,8 +28,9 @@ struct siginfo; | |||
35 | 28 | ||
36 | #define __ARCH_SIGSYS | 29 | #define __ARCH_SIGSYS |
37 | 30 | ||
38 | #include <asm-generic/siginfo.h> | 31 | #include <uapi/asm-generic/siginfo.h> |
39 | 32 | ||
33 | /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ | ||
40 | typedef struct siginfo { | 34 | typedef struct siginfo { |
41 | int si_signo; | 35 | int si_signo; |
42 | int si_code; | 36 | int si_code; |
@@ -124,5 +118,6 @@ typedef struct siginfo { | |||
124 | #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ | 118 | #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ |
125 | #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ | 119 | #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ |
126 | 120 | ||
121 | #include <asm-generic/siginfo.h> | ||
127 | 122 | ||
128 | #endif /* _UAPI_ASM_SIGINFO_H */ | 123 | #endif /* _UAPI_ASM_SIGINFO_H */ |
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index c454525e7695..9dd051edb411 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c | |||
@@ -140,10 +140,18 @@ static void qi_lb60_nand_ident(struct platform_device *pdev, | |||
140 | 140 | ||
141 | static struct jz_nand_platform_data qi_lb60_nand_pdata = { | 141 | static struct jz_nand_platform_data qi_lb60_nand_pdata = { |
142 | .ident_callback = qi_lb60_nand_ident, | 142 | .ident_callback = qi_lb60_nand_ident, |
143 | .busy_gpio = 94, | ||
144 | .banks = { 1 }, | 143 | .banks = { 1 }, |
145 | }; | 144 | }; |
146 | 145 | ||
146 | static struct gpiod_lookup_table qi_lb60_nand_gpio_table = { | ||
147 | .dev_id = "jz4740-nand.0", | ||
148 | .table = { | ||
149 | GPIO_LOOKUP("Bank C", 30, "busy", 0), | ||
150 | { }, | ||
151 | }, | ||
152 | }; | ||
153 | |||
154 | |||
147 | /* Keyboard*/ | 155 | /* Keyboard*/ |
148 | 156 | ||
149 | #define KEY_QI_QI KEY_F13 | 157 | #define KEY_QI_QI KEY_F13 |
@@ -472,6 +480,7 @@ static int __init qi_lb60_init_platform_devices(void) | |||
472 | jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata; | 480 | jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata; |
473 | 481 | ||
474 | gpiod_add_lookup_table(&qi_lb60_audio_gpio_table); | 482 | gpiod_add_lookup_table(&qi_lb60_audio_gpio_table); |
483 | gpiod_add_lookup_table(&qi_lb60_nand_gpio_table); | ||
475 | 484 | ||
476 | jz4740_serial_device_register(); | 485 | jz4740_serial_device_register(); |
477 | 486 | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 92987d1bbe5f..d3d2ff2d76dc 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o | |||
52 | obj-$(CONFIG_MIPS_CMP) += smp-cmp.o | 52 | obj-$(CONFIG_MIPS_CMP) += smp-cmp.o |
53 | obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o | 53 | obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o |
54 | obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o | 54 | obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o |
55 | obj-$(CONFIG_CPU_MIPSR2) += spram.o | 55 | obj-$(CONFIG_MIPS_SPRAM) += spram.o |
56 | 56 | ||
57 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | 57 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o |
58 | obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o | 58 | obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o |
@@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | |||
90 | obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o | 90 | obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o |
91 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o | 91 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o |
92 | obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o | 92 | obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o |
93 | obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o | ||
93 | 94 | ||
94 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) | 95 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) |
95 | 96 | ||
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 3b2dfdb4865f..750d67ac41e9 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -97,6 +97,7 @@ void output_thread_info_defines(void) | |||
97 | OFFSET(TI_TP_VALUE, thread_info, tp_value); | 97 | OFFSET(TI_TP_VALUE, thread_info, tp_value); |
98 | OFFSET(TI_CPU, thread_info, cpu); | 98 | OFFSET(TI_CPU, thread_info, cpu); |
99 | OFFSET(TI_PRE_COUNT, thread_info, preempt_count); | 99 | OFFSET(TI_PRE_COUNT, thread_info, preempt_count); |
100 | OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return); | ||
100 | OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); | 101 | OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); |
101 | OFFSET(TI_REGS, thread_info, regs); | 102 | OFFSET(TI_REGS, thread_info, regs); |
102 | DEFINE(_THREAD_SIZE, THREAD_SIZE); | 103 | DEFINE(_THREAD_SIZE, THREAD_SIZE); |
@@ -381,6 +382,7 @@ void output_octeon_cop2_state_defines(void) | |||
381 | OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); | 382 | OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); |
382 | OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); | 383 | OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); |
383 | OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); | 384 | OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); |
385 | OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); | ||
384 | OFFSET(THREAD_CP2, task_struct, thread.cp2); | 386 | OFFSET(THREAD_CP2, task_struct, thread.cp2); |
385 | OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); | 387 | OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); |
386 | BLANK(); | 388 | BLANK(); |
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d7d99d601cc..c2e0f45ddf6c 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/fpu.h> | 16 | #include <asm/fpu.h> |
17 | #include <asm/fpu_emulator.h> | 17 | #include <asm/fpu_emulator.h> |
18 | #include <asm/inst.h> | 18 | #include <asm/inst.h> |
19 | #include <asm/mips-r2-to-r6-emul.h> | ||
19 | #include <asm/ptrace.h> | 20 | #include <asm/ptrace.h> |
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | 22 | ||
@@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) | |||
399 | * @returns: -EFAULT on error and forces SIGBUS, and on success | 400 | * @returns: -EFAULT on error and forces SIGBUS, and on success |
400 | * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after | 401 | * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after |
401 | * evaluating the branch. | 402 | * evaluating the branch. |
403 | * | ||
404 | * MIPS R6 Compact branches and forbidden slots: | ||
405 | * Compact branches do not throw exceptions because they do | ||
406 | * not have delay slots. The forbidden slot instruction ($PC+4) | ||
407 | * is only executed if the branch was not taken. Otherwise the | ||
408 | * forbidden slot is skipped entirely. This means that the | ||
409 | * only possible reason to be here because of a MIPS R6 compact | ||
410 | * branch instruction is that the forbidden slot has thrown one. | ||
411 | * In that case the branch was not taken, so the EPC can be safely | ||
412 | * set to EPC + 8. | ||
402 | */ | 413 | */ |
403 | int __compute_return_epc_for_insn(struct pt_regs *regs, | 414 | int __compute_return_epc_for_insn(struct pt_regs *regs, |
404 | union mips_instruction insn) | 415 | union mips_instruction insn) |
405 | { | 416 | { |
406 | unsigned int bit, fcr31, dspcontrol; | 417 | unsigned int bit, fcr31, dspcontrol, reg; |
407 | long epc = regs->cp0_epc; | 418 | long epc = regs->cp0_epc; |
408 | int ret = 0; | 419 | int ret = 0; |
409 | 420 | ||
@@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
417 | regs->regs[insn.r_format.rd] = epc + 8; | 428 | regs->regs[insn.r_format.rd] = epc + 8; |
418 | /* Fall through */ | 429 | /* Fall through */ |
419 | case jr_op: | 430 | case jr_op: |
431 | if (NO_R6EMU && insn.r_format.func == jr_op) | ||
432 | goto sigill_r6; | ||
420 | regs->cp0_epc = regs->regs[insn.r_format.rs]; | 433 | regs->cp0_epc = regs->regs[insn.r_format.rs]; |
421 | break; | 434 | break; |
422 | } | 435 | } |
@@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
429 | */ | 442 | */ |
430 | case bcond_op: | 443 | case bcond_op: |
431 | switch (insn.i_format.rt) { | 444 | switch (insn.i_format.rt) { |
432 | case bltz_op: | ||
433 | case bltzl_op: | 445 | case bltzl_op: |
446 | if (NO_R6EMU) | ||
447 | goto sigill_r6; | ||
448 | case bltz_op: | ||
434 | if ((long)regs->regs[insn.i_format.rs] < 0) { | 449 | if ((long)regs->regs[insn.i_format.rs] < 0) { |
435 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 450 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
436 | if (insn.i_format.rt == bltzl_op) | 451 | if (insn.i_format.rt == bltzl_op) |
@@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
440 | regs->cp0_epc = epc; | 455 | regs->cp0_epc = epc; |
441 | break; | 456 | break; |
442 | 457 | ||
443 | case bgez_op: | ||
444 | case bgezl_op: | 458 | case bgezl_op: |
459 | if (NO_R6EMU) | ||
460 | goto sigill_r6; | ||
461 | case bgez_op: | ||
445 | if ((long)regs->regs[insn.i_format.rs] >= 0) { | 462 | if ((long)regs->regs[insn.i_format.rs] >= 0) { |
446 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 463 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
447 | if (insn.i_format.rt == bgezl_op) | 464 | if (insn.i_format.rt == bgezl_op) |
@@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
453 | 470 | ||
454 | case bltzal_op: | 471 | case bltzal_op: |
455 | case bltzall_op: | 472 | case bltzall_op: |
473 | if (NO_R6EMU && (insn.i_format.rs || | ||
474 | insn.i_format.rt == bltzall_op)) { | ||
475 | ret = -SIGILL; | ||
476 | break; | ||
477 | } | ||
456 | regs->regs[31] = epc + 8; | 478 | regs->regs[31] = epc + 8; |
479 | /* | ||
480 | * OK we are here either because we hit a NAL | ||
481 | * instruction or because we are emulating an | ||
482 | * old bltzal{,l} one. Lets figure out what the | ||
483 | * case really is. | ||
484 | */ | ||
485 | if (!insn.i_format.rs) { | ||
486 | /* | ||
487 | * NAL or BLTZAL with rs == 0 | ||
488 | * Doesn't matter if we are R6 or not. The | ||
489 | * result is the same | ||
490 | */ | ||
491 | regs->cp0_epc += 4 + | ||
492 | (insn.i_format.simmediate << 2); | ||
493 | break; | ||
494 | } | ||
495 | /* Now do the real thing for non-R6 BLTZAL{,L} */ | ||
457 | if ((long)regs->regs[insn.i_format.rs] < 0) { | 496 | if ((long)regs->regs[insn.i_format.rs] < 0) { |
458 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 497 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
459 | if (insn.i_format.rt == bltzall_op) | 498 | if (insn.i_format.rt == bltzall_op) |
@@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
465 | 504 | ||
466 | case bgezal_op: | 505 | case bgezal_op: |
467 | case bgezall_op: | 506 | case bgezall_op: |
507 | if (NO_R6EMU && (insn.i_format.rs || | ||
508 | insn.i_format.rt == bgezall_op)) { | ||
509 | ret = -SIGILL; | ||
510 | break; | ||
511 | } | ||
468 | regs->regs[31] = epc + 8; | 512 | regs->regs[31] = epc + 8; |
513 | /* | ||
514 | * OK we are here either because we hit a BAL | ||
515 | * instruction or because we are emulating an | ||
516 | * old bgezal{,l} one. Lets figure out what the | ||
517 | * case really is. | ||
518 | */ | ||
519 | if (!insn.i_format.rs) { | ||
520 | /* | ||
521 | * BAL or BGEZAL with rs == 0 | ||
522 | * Doesn't matter if we are R6 or not. The | ||
523 | * result is the same | ||
524 | */ | ||
525 | regs->cp0_epc += 4 + | ||
526 | (insn.i_format.simmediate << 2); | ||
527 | break; | ||
528 | } | ||
529 | /* Now do the real thing for non-R6 BGEZAL{,L} */ | ||
469 | if ((long)regs->regs[insn.i_format.rs] >= 0) { | 530 | if ((long)regs->regs[insn.i_format.rs] >= 0) { |
470 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 531 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
471 | if (insn.i_format.rt == bgezall_op) | 532 | if (insn.i_format.rt == bgezall_op) |
@@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
477 | 538 | ||
478 | case bposge32_op: | 539 | case bposge32_op: |
479 | if (!cpu_has_dsp) | 540 | if (!cpu_has_dsp) |
480 | goto sigill; | 541 | goto sigill_dsp; |
481 | 542 | ||
482 | dspcontrol = rddsp(0x01); | 543 | dspcontrol = rddsp(0x01); |
483 | 544 | ||
@@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
508 | /* | 569 | /* |
509 | * These are conditional and in i_format. | 570 | * These are conditional and in i_format. |
510 | */ | 571 | */ |
511 | case beq_op: | ||
512 | case beql_op: | 572 | case beql_op: |
573 | if (NO_R6EMU) | ||
574 | goto sigill_r6; | ||
575 | case beq_op: | ||
513 | if (regs->regs[insn.i_format.rs] == | 576 | if (regs->regs[insn.i_format.rs] == |
514 | regs->regs[insn.i_format.rt]) { | 577 | regs->regs[insn.i_format.rt]) { |
515 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 578 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
@@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
520 | regs->cp0_epc = epc; | 583 | regs->cp0_epc = epc; |
521 | break; | 584 | break; |
522 | 585 | ||
523 | case bne_op: | ||
524 | case bnel_op: | 586 | case bnel_op: |
587 | if (NO_R6EMU) | ||
588 | goto sigill_r6; | ||
589 | case bne_op: | ||
525 | if (regs->regs[insn.i_format.rs] != | 590 | if (regs->regs[insn.i_format.rs] != |
526 | regs->regs[insn.i_format.rt]) { | 591 | regs->regs[insn.i_format.rt]) { |
527 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 592 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
@@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
532 | regs->cp0_epc = epc; | 597 | regs->cp0_epc = epc; |
533 | break; | 598 | break; |
534 | 599 | ||
535 | case blez_op: /* not really i_format */ | 600 | case blezl_op: /* not really i_format */ |
536 | case blezl_op: | 601 | if (NO_R6EMU) |
602 | goto sigill_r6; | ||
603 | case blez_op: | ||
604 | /* | ||
605 | * Compact branches for R6 for the | ||
606 | * blez and blezl opcodes. | ||
607 | * BLEZ | rs = 0 | rt != 0 == BLEZALC | ||
608 | * BLEZ | rs = rt != 0 == BGEZALC | ||
609 | * BLEZ | rs != 0 | rt != 0 == BGEUC | ||
610 | * BLEZL | rs = 0 | rt != 0 == BLEZC | ||
611 | * BLEZL | rs = rt != 0 == BGEZC | ||
612 | * BLEZL | rs != 0 | rt != 0 == BGEC | ||
613 | * | ||
614 | * For real BLEZ{,L}, rt is always 0. | ||
615 | */ | ||
616 | |||
617 | if (cpu_has_mips_r6 && insn.i_format.rt) { | ||
618 | if ((insn.i_format.opcode == blez_op) && | ||
619 | ((!insn.i_format.rs && insn.i_format.rt) || | ||
620 | (insn.i_format.rs == insn.i_format.rt))) | ||
621 | regs->regs[31] = epc + 4; | ||
622 | regs->cp0_epc += 8; | ||
623 | break; | ||
624 | } | ||
537 | /* rt field assumed to be zero */ | 625 | /* rt field assumed to be zero */ |
538 | if ((long)regs->regs[insn.i_format.rs] <= 0) { | 626 | if ((long)regs->regs[insn.i_format.rs] <= 0) { |
539 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 627 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
@@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
544 | regs->cp0_epc = epc; | 632 | regs->cp0_epc = epc; |
545 | break; | 633 | break; |
546 | 634 | ||
547 | case bgtz_op: | ||
548 | case bgtzl_op: | 635 | case bgtzl_op: |
636 | if (NO_R6EMU) | ||
637 | goto sigill_r6; | ||
638 | case bgtz_op: | ||
639 | /* | ||
640 | * Compact branches for R6 for the | ||
641 | * bgtz and bgtzl opcodes. | ||
642 | * BGTZ | rs = 0 | rt != 0 == BGTZALC | ||
643 | * BGTZ | rs = rt != 0 == BLTZALC | ||
644 | * BGTZ | rs != 0 | rt != 0 == BLTUC | ||
645 | * BGTZL | rs = 0 | rt != 0 == BGTZC | ||
646 | * BGTZL | rs = rt != 0 == BLTZC | ||
647 | * BGTZL | rs != 0 | rt != 0 == BLTC | ||
648 | * | ||
649 | * *ZALC varint for BGTZ &&& rt != 0 | ||
650 | * For real GTZ{,L}, rt is always 0. | ||
651 | */ | ||
652 | if (cpu_has_mips_r6 && insn.i_format.rt) { | ||
653 | if ((insn.i_format.opcode == blez_op) && | ||
654 | ((!insn.i_format.rs && insn.i_format.rt) || | ||
655 | (insn.i_format.rs == insn.i_format.rt))) | ||
656 | regs->regs[31] = epc + 4; | ||
657 | regs->cp0_epc += 8; | ||
658 | break; | ||
659 | } | ||
660 | |||
549 | /* rt field assumed to be zero */ | 661 | /* rt field assumed to be zero */ |
550 | if ((long)regs->regs[insn.i_format.rs] > 0) { | 662 | if ((long)regs->regs[insn.i_format.rs] > 0) { |
551 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 663 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
@@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
560 | * And now the FPA/cp1 branch instructions. | 672 | * And now the FPA/cp1 branch instructions. |
561 | */ | 673 | */ |
562 | case cop1_op: | 674 | case cop1_op: |
563 | preempt_disable(); | 675 | if (cpu_has_mips_r6 && |
564 | if (is_fpu_owner()) | 676 | ((insn.i_format.rs == bc1eqz_op) || |
565 | fcr31 = read_32bit_cp1_register(CP1_STATUS); | 677 | (insn.i_format.rs == bc1nez_op))) { |
566 | else | 678 | if (!used_math()) { /* First time FPU user */ |
567 | fcr31 = current->thread.fpu.fcr31; | 679 | ret = init_fpu(); |
568 | preempt_enable(); | 680 | if (ret && NO_R6EMU) { |
569 | 681 | ret = -ret; | |
570 | bit = (insn.i_format.rt >> 2); | 682 | break; |
571 | bit += (bit != 0); | 683 | } |
572 | bit += 23; | 684 | ret = 0; |
573 | switch (insn.i_format.rt & 3) { | 685 | set_used_math(); |
574 | case 0: /* bc1f */ | 686 | } |
575 | case 2: /* bc1fl */ | 687 | lose_fpu(1); /* Save FPU state for the emulator. */ |
576 | if (~fcr31 & (1 << bit)) { | 688 | reg = insn.i_format.rt; |
577 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 689 | bit = 0; |
578 | if (insn.i_format.rt == 2) | 690 | switch (insn.i_format.rs) { |
579 | ret = BRANCH_LIKELY_TAKEN; | 691 | case bc1eqz_op: |
580 | } else | 692 | /* Test bit 0 */ |
693 | if (get_fpr32(¤t->thread.fpu.fpr[reg], 0) | ||
694 | & 0x1) | ||
695 | bit = 1; | ||
696 | break; | ||
697 | case bc1nez_op: | ||
698 | /* Test bit 0 */ | ||
699 | if (!(get_fpr32(¤t->thread.fpu.fpr[reg], 0) | ||
700 | & 0x1)) | ||
701 | bit = 1; | ||
702 | break; | ||
703 | } | ||
704 | own_fpu(1); | ||
705 | if (bit) | ||
706 | epc = epc + 4 + | ||
707 | (insn.i_format.simmediate << 2); | ||
708 | else | ||
581 | epc += 8; | 709 | epc += 8; |
582 | regs->cp0_epc = epc; | 710 | regs->cp0_epc = epc; |
711 | |||
583 | break; | 712 | break; |
713 | } else { | ||
584 | 714 | ||
585 | case 1: /* bc1t */ | 715 | preempt_disable(); |
586 | case 3: /* bc1tl */ | 716 | if (is_fpu_owner()) |
587 | if (fcr31 & (1 << bit)) { | 717 | fcr31 = read_32bit_cp1_register(CP1_STATUS); |
588 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 718 | else |
589 | if (insn.i_format.rt == 3) | 719 | fcr31 = current->thread.fpu.fcr31; |
590 | ret = BRANCH_LIKELY_TAKEN; | 720 | preempt_enable(); |
591 | } else | 721 | |
592 | epc += 8; | 722 | bit = (insn.i_format.rt >> 2); |
593 | regs->cp0_epc = epc; | 723 | bit += (bit != 0); |
724 | bit += 23; | ||
725 | switch (insn.i_format.rt & 3) { | ||
726 | case 0: /* bc1f */ | ||
727 | case 2: /* bc1fl */ | ||
728 | if (~fcr31 & (1 << bit)) { | ||
729 | epc = epc + 4 + | ||
730 | (insn.i_format.simmediate << 2); | ||
731 | if (insn.i_format.rt == 2) | ||
732 | ret = BRANCH_LIKELY_TAKEN; | ||
733 | } else | ||
734 | epc += 8; | ||
735 | regs->cp0_epc = epc; | ||
736 | break; | ||
737 | |||
738 | case 1: /* bc1t */ | ||
739 | case 3: /* bc1tl */ | ||
740 | if (fcr31 & (1 << bit)) { | ||
741 | epc = epc + 4 + | ||
742 | (insn.i_format.simmediate << 2); | ||
743 | if (insn.i_format.rt == 3) | ||
744 | ret = BRANCH_LIKELY_TAKEN; | ||
745 | } else | ||
746 | epc += 8; | ||
747 | regs->cp0_epc = epc; | ||
748 | break; | ||
749 | } | ||
594 | break; | 750 | break; |
595 | } | 751 | } |
596 | break; | ||
597 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 752 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
598 | case lwc2_op: /* This is bbit0 on Octeon */ | 753 | case lwc2_op: /* This is bbit0 on Octeon */ |
599 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) | 754 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) |
@@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, | |||
626 | epc += 8; | 781 | epc += 8; |
627 | regs->cp0_epc = epc; | 782 | regs->cp0_epc = epc; |
628 | break; | 783 | break; |
784 | #else | ||
785 | case bc6_op: | ||
786 | /* Only valid for MIPS R6 */ | ||
787 | if (!cpu_has_mips_r6) { | ||
788 | ret = -SIGILL; | ||
789 | break; | ||
790 | } | ||
791 | regs->cp0_epc += 8; | ||
792 | break; | ||
793 | case balc6_op: | ||
794 | if (!cpu_has_mips_r6) { | ||
795 | ret = -SIGILL; | ||
796 | break; | ||
797 | } | ||
798 | /* Compact branch: BALC */ | ||
799 | regs->regs[31] = epc + 4; | ||
800 | epc += 4 + (insn.i_format.simmediate << 2); | ||
801 | regs->cp0_epc = epc; | ||
802 | break; | ||
803 | case beqzcjic_op: | ||
804 | if (!cpu_has_mips_r6) { | ||
805 | ret = -SIGILL; | ||
806 | break; | ||
807 | } | ||
808 | /* Compact branch: BEQZC || JIC */ | ||
809 | regs->cp0_epc += 8; | ||
810 | break; | ||
811 | case bnezcjialc_op: | ||
812 | if (!cpu_has_mips_r6) { | ||
813 | ret = -SIGILL; | ||
814 | break; | ||
815 | } | ||
816 | /* Compact branch: BNEZC || JIALC */ | ||
817 | if (insn.i_format.rs) | ||
818 | regs->regs[31] = epc + 4; | ||
819 | regs->cp0_epc += 8; | ||
820 | break; | ||
629 | #endif | 821 | #endif |
822 | case cbcond0_op: | ||
823 | case cbcond1_op: | ||
824 | /* Only valid for MIPS R6 */ | ||
825 | if (!cpu_has_mips_r6) { | ||
826 | ret = -SIGILL; | ||
827 | break; | ||
828 | } | ||
829 | /* | ||
830 | * Compact branches: | ||
831 | * bovc, beqc, beqzalc, bnvc, bnec, bnezlac | ||
832 | */ | ||
833 | if (insn.i_format.rt && !insn.i_format.rs) | ||
834 | regs->regs[31] = epc + 4; | ||
835 | regs->cp0_epc += 8; | ||
836 | break; | ||
630 | } | 837 | } |
631 | 838 | ||
632 | return ret; | 839 | return ret; |
633 | 840 | ||
634 | sigill: | 841 | sigill_dsp: |
635 | printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); | 842 | printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); |
636 | force_sig(SIGBUS, current); | 843 | force_sig(SIGBUS, current); |
637 | return -EFAULT; | 844 | return -EFAULT; |
845 | sigill_r6: | ||
846 | pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", | ||
847 | current->comm); | ||
848 | force_sig(SIGILL, current); | ||
849 | return -EFAULT; | ||
638 | } | 850 | } |
639 | EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); | 851 | EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); |
640 | 852 | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 6acaad0480af..82bd2b278a24 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/percpu.h> | 11 | #include <linux/percpu.h> |
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
14 | #include <linux/irqchip/mips-gic.h> | ||
15 | 14 | ||
16 | #include <asm/time.h> | 15 | #include <asm/time.h> |
17 | #include <asm/cevt-r4k.h> | 16 | #include <asm/cevt-r4k.h> |
@@ -40,7 +39,7 @@ int cp0_timer_irq_installed; | |||
40 | 39 | ||
41 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | 40 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
42 | { | 41 | { |
43 | const int r2 = cpu_has_mips_r2; | 42 | const int r2 = cpu_has_mips_r2_r6; |
44 | struct clock_event_device *cd; | 43 | struct clock_event_device *cd; |
45 | int cpu = smp_processor_id(); | 44 | int cpu = smp_processor_id(); |
46 | 45 | ||
@@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev) | |||
85 | */ | 84 | */ |
86 | static int c0_compare_int_pending(void) | 85 | static int c0_compare_int_pending(void) |
87 | { | 86 | { |
88 | #ifdef CONFIG_MIPS_GIC | 87 | /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ |
89 | if (gic_present) | ||
90 | return gic_get_timer_pending(); | ||
91 | #endif | ||
92 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); | 88 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); |
93 | } | 89 | } |
94 | 90 | ||
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 0384b05ab5a0..55b759a0019e 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S | |||
@@ -99,11 +99,11 @@ not_nmi: | |||
99 | xori t2, t1, 0x7 | 99 | xori t2, t1, 0x7 |
100 | beqz t2, 1f | 100 | beqz t2, 1f |
101 | li t3, 32 | 101 | li t3, 32 |
102 | addi t1, t1, 1 | 102 | addiu t1, t1, 1 |
103 | sllv t1, t3, t1 | 103 | sllv t1, t3, t1 |
104 | 1: /* At this point t1 == I-cache sets per way */ | 104 | 1: /* At this point t1 == I-cache sets per way */ |
105 | _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ | 105 | _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ |
106 | addi t2, t2, 1 | 106 | addiu t2, t2, 1 |
107 | mul t1, t1, t0 | 107 | mul t1, t1, t0 |
108 | mul t1, t1, t2 | 108 | mul t1, t1, t2 |
109 | 109 | ||
@@ -126,11 +126,11 @@ icache_done: | |||
126 | xori t2, t1, 0x7 | 126 | xori t2, t1, 0x7 |
127 | beqz t2, 1f | 127 | beqz t2, 1f |
128 | li t3, 32 | 128 | li t3, 32 |
129 | addi t1, t1, 1 | 129 | addiu t1, t1, 1 |
130 | sllv t1, t3, t1 | 130 | sllv t1, t3, t1 |
131 | 1: /* At this point t1 == D-cache sets per way */ | 131 | 1: /* At this point t1 == D-cache sets per way */ |
132 | _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ | 132 | _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ |
133 | addi t2, t2, 1 | 133 | addiu t2, t2, 1 |
134 | mul t1, t1, t0 | 134 | mul t1, t1, t0 |
135 | mul t1, t1, t2 | 135 | mul t1, t1, t2 |
136 | 136 | ||
@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init) | |||
250 | mfc0 t0, CP0_MVPCONF0 | 250 | mfc0 t0, CP0_MVPCONF0 |
251 | srl t0, t0, MVPCONF0_PVPE_SHIFT | 251 | srl t0, t0, MVPCONF0_PVPE_SHIFT |
252 | andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) | 252 | andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) |
253 | addi t7, t0, 1 | 253 | addiu t7, t0, 1 |
254 | 254 | ||
255 | /* If there's only 1, we're done */ | 255 | /* If there's only 1, we're done */ |
256 | beqz t0, 2f | 256 | beqz t0, 2f |
@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init) | |||
280 | mttc0 t0, CP0_TCHALT | 280 | mttc0 t0, CP0_TCHALT |
281 | 281 | ||
282 | /* Next VPE */ | 282 | /* Next VPE */ |
283 | addi t5, t5, 1 | 283 | addiu t5, t5, 1 |
284 | slt t0, t5, t7 | 284 | slt t0, t5, t7 |
285 | bnez t0, 1b | 285 | bnez t0, 1b |
286 | nop | 286 | nop |
@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes) | |||
317 | mfc0 t1, CP0_MVPCONF0 | 317 | mfc0 t1, CP0_MVPCONF0 |
318 | srl t1, t1, MVPCONF0_PVPE_SHIFT | 318 | srl t1, t1, MVPCONF0_PVPE_SHIFT |
319 | andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT | 319 | andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT |
320 | addi t1, t1, 1 | 320 | addiu t1, t1, 1 |
321 | 321 | ||
322 | /* Calculate a mask for the VPE ID from EBase.CPUNum */ | 322 | /* Calculate a mask for the VPE ID from EBase.CPUNum */ |
323 | clz t1, t1 | 323 | clz t1, t1 |
@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes) | |||
424 | 424 | ||
425 | /* Next VPE */ | 425 | /* Next VPE */ |
426 | 2: srl t6, t6, 1 | 426 | 2: srl t6, t6, 1 |
427 | addi t5, t5, 1 | 427 | addiu t5, t5, 1 |
428 | bnez t6, 1b | 428 | bnez t6, 1b |
429 | nop | 429 | nop |
430 | 430 | ||
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index 2d80b5f1aeae..09f4034f239f 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c | |||
@@ -244,7 +244,7 @@ static inline void check_daddi(void) | |||
244 | panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); | 244 | panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); |
245 | } | 245 | } |
246 | 246 | ||
247 | int daddiu_bug = -1; | 247 | int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1; |
248 | 248 | ||
249 | static inline void check_daddiu(void) | 249 | static inline void check_daddiu(void) |
250 | { | 250 | { |
@@ -314,11 +314,14 @@ static inline void check_daddiu(void) | |||
314 | 314 | ||
315 | void __init check_bugs64_early(void) | 315 | void __init check_bugs64_early(void) |
316 | { | 316 | { |
317 | check_mult_sh(); | 317 | if (!config_enabled(CONFIG_CPU_MIPSR6)) { |
318 | check_daddiu(); | 318 | check_mult_sh(); |
319 | check_daddiu(); | ||
320 | } | ||
319 | } | 321 | } |
320 | 322 | ||
321 | void __init check_bugs64(void) | 323 | void __init check_bugs64(void) |
322 | { | 324 | { |
323 | check_daddi(); | 325 | if (!config_enabled(CONFIG_CPU_MIPSR6)) |
326 | check_daddi(); | ||
324 | } | 327 | } |
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5342674842f5..48dfb9de853d 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) | |||
237 | c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; | 237 | c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; |
238 | break; | 238 | break; |
239 | 239 | ||
240 | /* R6 incompatible with everything else */ | ||
241 | case MIPS_CPU_ISA_M64R6: | ||
242 | c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; | ||
243 | case MIPS_CPU_ISA_M32R6: | ||
244 | c->isa_level |= MIPS_CPU_ISA_M32R6; | ||
245 | /* Break here so we don't add incompatible ISAs */ | ||
246 | break; | ||
240 | case MIPS_CPU_ISA_M32R2: | 247 | case MIPS_CPU_ISA_M32R2: |
241 | c->isa_level |= MIPS_CPU_ISA_M32R2; | 248 | c->isa_level |= MIPS_CPU_ISA_M32R2; |
242 | case MIPS_CPU_ISA_M32R1: | 249 | case MIPS_CPU_ISA_M32R1: |
@@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) | |||
326 | case 1: | 333 | case 1: |
327 | set_isa(c, MIPS_CPU_ISA_M32R2); | 334 | set_isa(c, MIPS_CPU_ISA_M32R2); |
328 | break; | 335 | break; |
336 | case 2: | ||
337 | set_isa(c, MIPS_CPU_ISA_M32R6); | ||
338 | break; | ||
329 | default: | 339 | default: |
330 | goto unknown; | 340 | goto unknown; |
331 | } | 341 | } |
@@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) | |||
338 | case 1: | 348 | case 1: |
339 | set_isa(c, MIPS_CPU_ISA_M64R2); | 349 | set_isa(c, MIPS_CPU_ISA_M64R2); |
340 | break; | 350 | break; |
351 | case 2: | ||
352 | set_isa(c, MIPS_CPU_ISA_M64R6); | ||
353 | break; | ||
341 | default: | 354 | default: |
342 | goto unknown; | 355 | goto unknown; |
343 | } | 356 | } |
@@ -424,8 +437,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) | |||
424 | if (config3 & MIPS_CONF3_MSA) | 437 | if (config3 & MIPS_CONF3_MSA) |
425 | c->ases |= MIPS_ASE_MSA; | 438 | c->ases |= MIPS_ASE_MSA; |
426 | /* Only tested on 32-bit cores */ | 439 | /* Only tested on 32-bit cores */ |
427 | if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) | 440 | if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) { |
441 | c->htw_seq = 0; | ||
428 | c->options |= MIPS_CPU_HTW; | 442 | c->options |= MIPS_CPU_HTW; |
443 | } | ||
429 | 444 | ||
430 | return config3 & MIPS_CONF_M; | 445 | return config3 & MIPS_CONF_M; |
431 | } | 446 | } |
@@ -499,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) | |||
499 | c->options |= MIPS_CPU_EVA; | 514 | c->options |= MIPS_CPU_EVA; |
500 | if (config5 & MIPS_CONF5_MRP) | 515 | if (config5 & MIPS_CONF5_MRP) |
501 | c->options |= MIPS_CPU_MAAR; | 516 | c->options |= MIPS_CPU_MAAR; |
517 | if (config5 & MIPS_CONF5_LLB) | ||
518 | c->options |= MIPS_CPU_RW_LLB; | ||
502 | 519 | ||
503 | return config5 & MIPS_CONF_M; | 520 | return config5 & MIPS_CONF_M; |
504 | } | 521 | } |
@@ -533,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c) | |||
533 | 550 | ||
534 | if (cpu_has_rixi) { | 551 | if (cpu_has_rixi) { |
535 | /* Enable the RIXI exceptions */ | 552 | /* Enable the RIXI exceptions */ |
536 | write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); | 553 | set_c0_pagegrain(PG_IEC); |
537 | back_to_back_c0_hazard(); | 554 | back_to_back_c0_hazard(); |
538 | /* Verify the IEC bit is set */ | 555 | /* Verify the IEC bit is set */ |
539 | if (read_c0_pagegrain() & PG_IEC) | 556 | if (read_c0_pagegrain() & PG_IEC) |
@@ -541,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c) | |||
541 | } | 558 | } |
542 | 559 | ||
543 | #ifndef CONFIG_MIPS_CPS | 560 | #ifndef CONFIG_MIPS_CPS |
544 | if (cpu_has_mips_r2) { | 561 | if (cpu_has_mips_r2_r6) { |
545 | c->core = get_ebase_cpunum(); | 562 | c->core = get_ebase_cpunum(); |
546 | if (cpu_has_mipsmt) | 563 | if (cpu_has_mipsmt) |
547 | c->core >>= fls(core_nvpes()) - 1; | 564 | c->core >>= fls(core_nvpes()) - 1; |
@@ -896,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) | |||
896 | { | 913 | { |
897 | c->writecombine = _CACHE_UNCACHED_ACCELERATED; | 914 | c->writecombine = _CACHE_UNCACHED_ACCELERATED; |
898 | switch (c->processor_id & PRID_IMP_MASK) { | 915 | switch (c->processor_id & PRID_IMP_MASK) { |
916 | case PRID_IMP_QEMU_GENERIC: | ||
917 | c->writecombine = _CACHE_UNCACHED; | ||
918 | c->cputype = CPU_QEMU_GENERIC; | ||
919 | __cpu_name[cpu] = "MIPS GENERIC QEMU"; | ||
920 | break; | ||
899 | case PRID_IMP_4KC: | 921 | case PRID_IMP_4KC: |
900 | c->cputype = CPU_4KC; | 922 | c->cputype = CPU_4KC; |
901 | c->writecombine = _CACHE_UNCACHED; | 923 | c->writecombine = _CACHE_UNCACHED; |
@@ -1345,8 +1367,7 @@ void cpu_probe(void) | |||
1345 | if (c->options & MIPS_CPU_FPU) { | 1367 | if (c->options & MIPS_CPU_FPU) { |
1346 | c->fpu_id = cpu_get_fpu_id(); | 1368 | c->fpu_id = cpu_get_fpu_id(); |
1347 | 1369 | ||
1348 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | | 1370 | if (c->isa_level & cpu_has_mips_r) { |
1349 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { | ||
1350 | if (c->fpu_id & MIPS_FPIR_3D) | 1371 | if (c->fpu_id & MIPS_FPIR_3D) |
1351 | c->ases |= MIPS_ASE_MIPS3D; | 1372 | c->ases |= MIPS_ASE_MIPS3D; |
1352 | if (c->fpu_id & MIPS_FPIR_FREP) | 1373 | if (c->fpu_id & MIPS_FPIR_FREP) |
@@ -1354,7 +1375,7 @@ void cpu_probe(void) | |||
1354 | } | 1375 | } |
1355 | } | 1376 | } |
1356 | 1377 | ||
1357 | if (cpu_has_mips_r2) { | 1378 | if (cpu_has_mips_r2_r6) { |
1358 | c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | 1379 | c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; |
1359 | /* R2 has Performance Counter Interrupt indicator */ | 1380 | /* R2 has Performance Counter Interrupt indicator */ |
1360 | c->options |= MIPS_CPU_PCI; | 1381 | c->options |= MIPS_CPU_PCI; |
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index a5b5b56485c1..d2c09f6475c5 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c | |||
@@ -11,29 +11,112 @@ | |||
11 | #include <linux/elf.h> | 11 | #include <linux/elf.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | 13 | ||
14 | /* FPU modes */ | ||
14 | enum { | 15 | enum { |
15 | FP_ERROR = -1, | 16 | FP_FRE, |
16 | FP_DOUBLE_64A = -2, | 17 | FP_FR0, |
18 | FP_FR1, | ||
17 | }; | 19 | }; |
18 | 20 | ||
21 | /** | ||
22 | * struct mode_req - ABI FPU mode requirements | ||
23 | * @single: The program being loaded needs an FPU but it will only issue | ||
24 | * single precision instructions meaning that it can execute in | ||
25 | * either FR0 or FR1. | ||
26 | * @soft: The soft(-float) requirement means that the program being | ||
27 | * loaded needs has no FPU dependency at all (i.e. it has no | ||
28 | * FPU instructions). | ||
29 | * @fr1: The program being loaded depends on FPU being in FR=1 mode. | ||
30 | * @frdefault: The program being loaded depends on the default FPU mode. | ||
31 | * That is FR0 for O32 and FR1 for N32/N64. | ||
32 | * @fre: The program being loaded depends on FPU with FRE=1. This mode is | ||
33 | * a bridge which uses FR=1 whilst still being able to maintain | ||
34 | * full compatibility with pre-existing code using the O32 FP32 | ||
35 | * ABI. | ||
36 | * | ||
37 | * More information about the FP ABIs can be found here: | ||
38 | * | ||
39 | * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up | ||
40 | * | ||
41 | */ | ||
42 | |||
43 | struct mode_req { | ||
44 | bool single; | ||
45 | bool soft; | ||
46 | bool fr1; | ||
47 | bool frdefault; | ||
48 | bool fre; | ||
49 | }; | ||
50 | |||
51 | static const struct mode_req fpu_reqs[] = { | ||
52 | [MIPS_ABI_FP_ANY] = { true, true, true, true, true }, | ||
53 | [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true }, | ||
54 | [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false }, | ||
55 | [MIPS_ABI_FP_SOFT] = { false, true, false, false, false }, | ||
56 | [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false }, | ||
57 | [MIPS_ABI_FP_XX] = { false, false, true, true, true }, | ||
58 | [MIPS_ABI_FP_64] = { false, false, true, false, false }, | ||
59 | [MIPS_ABI_FP_64A] = { false, false, true, false, true } | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * Mode requirements when .MIPS.abiflags is not present in the ELF. | ||
64 | * Not present means that everything is acceptable except FR1. | ||
65 | */ | ||
66 | static struct mode_req none_req = { true, true, false, true, true }; | ||
67 | |||
19 | int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, | 68 | int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, |
20 | bool is_interp, struct arch_elf_state *state) | 69 | bool is_interp, struct arch_elf_state *state) |
21 | { | 70 | { |
22 | struct elf32_hdr *ehdr = _ehdr; | 71 | struct elf32_hdr *ehdr32 = _ehdr; |
23 | struct elf32_phdr *phdr = _phdr; | 72 | struct elf32_phdr *phdr32 = _phdr; |
73 | struct elf64_phdr *phdr64 = _phdr; | ||
24 | struct mips_elf_abiflags_v0 abiflags; | 74 | struct mips_elf_abiflags_v0 abiflags; |
25 | int ret; | 75 | int ret; |
26 | 76 | ||
27 | if (config_enabled(CONFIG_64BIT) && | 77 | /* Lets see if this is an O32 ELF */ |
28 | (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) | 78 | if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { |
29 | return 0; | 79 | /* FR = 1 for N32 */ |
30 | if (phdr->p_type != PT_MIPS_ABIFLAGS) | 80 | if (ehdr32->e_flags & EF_MIPS_ABI2) |
31 | return 0; | 81 | state->overall_fp_mode = FP_FR1; |
32 | if (phdr->p_filesz < sizeof(abiflags)) | 82 | else |
33 | return -EINVAL; | 83 | /* Set a good default FPU mode for O32 */ |
84 | state->overall_fp_mode = cpu_has_mips_r6 ? | ||
85 | FP_FRE : FP_FR0; | ||
86 | |||
87 | if (ehdr32->e_flags & EF_MIPS_FP64) { | ||
88 | /* | ||
89 | * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it | ||
90 | * later if needed | ||
91 | */ | ||
92 | if (is_interp) | ||
93 | state->interp_fp_abi = MIPS_ABI_FP_OLD_64; | ||
94 | else | ||
95 | state->fp_abi = MIPS_ABI_FP_OLD_64; | ||
96 | } | ||
97 | if (phdr32->p_type != PT_MIPS_ABIFLAGS) | ||
98 | return 0; | ||
99 | |||
100 | if (phdr32->p_filesz < sizeof(abiflags)) | ||
101 | return -EINVAL; | ||
102 | |||
103 | ret = kernel_read(elf, phdr32->p_offset, | ||
104 | (char *)&abiflags, | ||
105 | sizeof(abiflags)); | ||
106 | } else { | ||
107 | /* FR=1 is really the only option for 64-bit */ | ||
108 | state->overall_fp_mode = FP_FR1; | ||
109 | |||
110 | if (phdr64->p_type != PT_MIPS_ABIFLAGS) | ||
111 | return 0; | ||
112 | if (phdr64->p_filesz < sizeof(abiflags)) | ||
113 | return -EINVAL; | ||
114 | |||
115 | ret = kernel_read(elf, phdr64->p_offset, | ||
116 | (char *)&abiflags, | ||
117 | sizeof(abiflags)); | ||
118 | } | ||
34 | 119 | ||
35 | ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, | ||
36 | sizeof(abiflags)); | ||
37 | if (ret < 0) | 120 | if (ret < 0) |
38 | return ret; | 121 | return ret; |
39 | if (ret != sizeof(abiflags)) | 122 | if (ret != sizeof(abiflags)) |
@@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, | |||
48 | return 0; | 131 | return 0; |
49 | } | 132 | } |
50 | 133 | ||
51 | static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) | 134 | static inline unsigned get_fp_abi(int in_abi) |
52 | { | 135 | { |
53 | /* If the ABI requirement is provided, simply return that */ | 136 | /* If the ABI requirement is provided, simply return that */ |
54 | if (in_abi != -1) | 137 | if (in_abi != MIPS_ABI_FP_UNKNOWN) |
55 | return in_abi; | 138 | return in_abi; |
56 | 139 | ||
57 | /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ | 140 | /* Unknown ABI */ |
58 | if (ehdr->e_flags & EF_MIPS_FP64) | 141 | return MIPS_ABI_FP_UNKNOWN; |
59 | return MIPS_ABI_FP_64; | ||
60 | |||
61 | /* Default to MIPS_ABI_FP_DOUBLE */ | ||
62 | return MIPS_ABI_FP_DOUBLE; | ||
63 | } | 142 | } |
64 | 143 | ||
65 | int arch_check_elf(void *_ehdr, bool has_interpreter, | 144 | int arch_check_elf(void *_ehdr, bool has_interpreter, |
66 | struct arch_elf_state *state) | 145 | struct arch_elf_state *state) |
67 | { | 146 | { |
68 | struct elf32_hdr *ehdr = _ehdr; | 147 | struct elf32_hdr *ehdr = _ehdr; |
69 | unsigned fp_abi, interp_fp_abi, abi0, abi1; | 148 | struct mode_req prog_req, interp_req; |
149 | int fp_abi, interp_fp_abi, abi0, abi1, max_abi; | ||
70 | 150 | ||
71 | /* Ignore non-O32 binaries */ | 151 | if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) |
72 | if (config_enabled(CONFIG_64BIT) && | ||
73 | (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) | ||
74 | return 0; | 152 | return 0; |
75 | 153 | ||
76 | fp_abi = get_fp_abi(ehdr, state->fp_abi); | 154 | fp_abi = get_fp_abi(state->fp_abi); |
77 | 155 | ||
78 | if (has_interpreter) { | 156 | if (has_interpreter) { |
79 | interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); | 157 | interp_fp_abi = get_fp_abi(state->interp_fp_abi); |
80 | 158 | ||
81 | abi0 = min(fp_abi, interp_fp_abi); | 159 | abi0 = min(fp_abi, interp_fp_abi); |
82 | abi1 = max(fp_abi, interp_fp_abi); | 160 | abi1 = max(fp_abi, interp_fp_abi); |
@@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, | |||
84 | abi0 = abi1 = fp_abi; | 162 | abi0 = abi1 = fp_abi; |
85 | } | 163 | } |
86 | 164 | ||
87 | state->overall_abi = FP_ERROR; | 165 | /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ |
88 | 166 | max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && | |
89 | if (abi0 == abi1) { | 167 | (!(ehdr->e_flags & EF_MIPS_ABI2))) ? |
90 | state->overall_abi = abi0; | 168 | MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; |
91 | } else if (abi0 == MIPS_ABI_FP_ANY) { | ||
92 | state->overall_abi = abi1; | ||
93 | } else if (abi0 == MIPS_ABI_FP_DOUBLE) { | ||
94 | switch (abi1) { | ||
95 | case MIPS_ABI_FP_XX: | ||
96 | state->overall_abi = MIPS_ABI_FP_DOUBLE; | ||
97 | break; | ||
98 | |||
99 | case MIPS_ABI_FP_64A: | ||
100 | state->overall_abi = FP_DOUBLE_64A; | ||
101 | break; | ||
102 | } | ||
103 | } else if (abi0 == MIPS_ABI_FP_SINGLE || | ||
104 | abi0 == MIPS_ABI_FP_SOFT) { | ||
105 | /* Cannot link with other ABIs */ | ||
106 | } else if (abi0 == MIPS_ABI_FP_OLD_64) { | ||
107 | switch (abi1) { | ||
108 | case MIPS_ABI_FP_XX: | ||
109 | case MIPS_ABI_FP_64: | ||
110 | case MIPS_ABI_FP_64A: | ||
111 | state->overall_abi = MIPS_ABI_FP_64; | ||
112 | break; | ||
113 | } | ||
114 | } else if (abi0 == MIPS_ABI_FP_XX || | ||
115 | abi0 == MIPS_ABI_FP_64 || | ||
116 | abi0 == MIPS_ABI_FP_64A) { | ||
117 | state->overall_abi = MIPS_ABI_FP_64; | ||
118 | } | ||
119 | 169 | ||
120 | switch (state->overall_abi) { | 170 | if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || |
121 | case MIPS_ABI_FP_64: | 171 | (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) |
122 | case MIPS_ABI_FP_64A: | 172 | return -ELIBBAD; |
123 | case FP_DOUBLE_64A: | 173 | |
124 | if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) | 174 | /* It's time to determine the FPU mode requirements */ |
125 | return -ELIBBAD; | 175 | prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0]; |
126 | break; | 176 | interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1]; |
127 | 177 | ||
128 | case FP_ERROR: | 178 | /* |
179 | * Check whether the program's and interp's ABIs have a matching FPU | ||
180 | * mode requirement. | ||
181 | */ | ||
182 | prog_req.single = interp_req.single && prog_req.single; | ||
183 | prog_req.soft = interp_req.soft && prog_req.soft; | ||
184 | prog_req.fr1 = interp_req.fr1 && prog_req.fr1; | ||
185 | prog_req.frdefault = interp_req.frdefault && prog_req.frdefault; | ||
186 | prog_req.fre = interp_req.fre && prog_req.fre; | ||
187 | |||
188 | /* | ||
189 | * Determine the desired FPU mode | ||
190 | * | ||
191 | * Decision making: | ||
192 | * | ||
193 | * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This | ||
194 | * means that we have a combination of program and interpreter | ||
195 | * that inherently require the hybrid FP mode. | ||
196 | * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or | ||
197 | * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU | ||
198 | * instructions so we don't care about the mode. We will simply use | ||
199 | * the one preferred by the hardware. In fpxx case, that ABI can | ||
200 | * handle both FR=1 and FR=0, so, again, we simply choose the one | ||
201 | * preferred by the hardware. Next, if we only use single-precision | ||
202 | * FPU instructions, and the default ABI FPU mode is not good | ||
203 | * (ie single + any ABI combination), we set again the FPU mode to the | ||
204 | * one is preferred by the hardware. Next, if we know that the code | ||
205 | * will only use single-precision instructions, shown by single being | ||
206 | * true but frdefault being false, then we again set the FPU mode to | ||
207 | * the one that is preferred by the hardware. | ||
208 | * - We want FP_FR1 if that's the only matching mode and the default one | ||
209 | * is not good. | ||
210 | * - Return with -ELIBADD if we can't find a matching FPU mode. | ||
211 | */ | ||
212 | if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1) | ||
213 | state->overall_fp_mode = FP_FRE; | ||
214 | else if ((prog_req.fr1 && prog_req.frdefault) || | ||
215 | (prog_req.single && !prog_req.frdefault)) | ||
216 | /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ | ||
217 | state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && | ||
218 | cpu_has_mips_r2_r6) ? | ||
219 | FP_FR1 : FP_FR0; | ||
220 | else if (prog_req.fr1) | ||
221 | state->overall_fp_mode = FP_FR1; | ||
222 | else if (!prog_req.fre && !prog_req.frdefault && | ||
223 | !prog_req.fr1 && !prog_req.single && !prog_req.soft) | ||
129 | return -ELIBBAD; | 224 | return -ELIBBAD; |
130 | } | ||
131 | 225 | ||
132 | return 0; | 226 | return 0; |
133 | } | 227 | } |
134 | 228 | ||
135 | void mips_set_personality_fp(struct arch_elf_state *state) | 229 | static inline void set_thread_fp_mode(int hybrid, int regs32) |
136 | { | 230 | { |
137 | if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { | 231 | if (hybrid) |
138 | /* | 232 | set_thread_flag(TIF_HYBRID_FPREGS); |
139 | * Use hybrid FPRs for all code which can correctly execute | 233 | else |
140 | * with that mode. | ||
141 | */ | ||
142 | switch (state->overall_abi) { | ||
143 | case MIPS_ABI_FP_DOUBLE: | ||
144 | case MIPS_ABI_FP_SINGLE: | ||
145 | case MIPS_ABI_FP_SOFT: | ||
146 | case MIPS_ABI_FP_XX: | ||
147 | case MIPS_ABI_FP_ANY: | ||
148 | /* FR=1, FRE=1 */ | ||
149 | clear_thread_flag(TIF_32BIT_FPREGS); | ||
150 | set_thread_flag(TIF_HYBRID_FPREGS); | ||
151 | return; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | switch (state->overall_abi) { | ||
156 | case MIPS_ABI_FP_DOUBLE: | ||
157 | case MIPS_ABI_FP_SINGLE: | ||
158 | case MIPS_ABI_FP_SOFT: | ||
159 | /* FR=0 */ | ||
160 | set_thread_flag(TIF_32BIT_FPREGS); | ||
161 | clear_thread_flag(TIF_HYBRID_FPREGS); | 234 | clear_thread_flag(TIF_HYBRID_FPREGS); |
162 | break; | 235 | if (regs32) |
163 | 236 | set_thread_flag(TIF_32BIT_FPREGS); | |
164 | case FP_DOUBLE_64A: | 237 | else |
165 | /* FR=1, FRE=1 */ | ||
166 | clear_thread_flag(TIF_32BIT_FPREGS); | 238 | clear_thread_flag(TIF_32BIT_FPREGS); |
167 | set_thread_flag(TIF_HYBRID_FPREGS); | 239 | } |
168 | break; | ||
169 | 240 | ||
170 | case MIPS_ABI_FP_64: | 241 | void mips_set_personality_fp(struct arch_elf_state *state) |
171 | case MIPS_ABI_FP_64A: | 242 | { |
172 | /* FR=1, FRE=0 */ | 243 | /* |
173 | clear_thread_flag(TIF_32BIT_FPREGS); | 244 | * This function is only ever called for O32 ELFs so we should |
174 | clear_thread_flag(TIF_HYBRID_FPREGS); | 245 | * not be worried about N32/N64 binaries. |
175 | break; | 246 | */ |
176 | 247 | ||
177 | case MIPS_ABI_FP_XX: | 248 | if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) |
178 | case MIPS_ABI_FP_ANY: | 249 | return; |
179 | if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) | ||
180 | set_thread_flag(TIF_32BIT_FPREGS); | ||
181 | else | ||
182 | clear_thread_flag(TIF_32BIT_FPREGS); | ||
183 | 250 | ||
184 | clear_thread_flag(TIF_HYBRID_FPREGS); | 251 | switch (state->overall_fp_mode) { |
252 | case FP_FRE: | ||
253 | set_thread_fp_mode(1, 0); | ||
254 | break; | ||
255 | case FP_FR0: | ||
256 | set_thread_fp_mode(0, 1); | ||
257 | break; | ||
258 | case FP_FR1: | ||
259 | set_thread_fp_mode(0, 0); | ||
185 | break; | 260 | break; |
186 | |||
187 | default: | 261 | default: |
188 | case FP_ERROR: | ||
189 | BUG(); | 262 | BUG(); |
190 | } | 263 | } |
191 | } | 264 | } |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 4353d323f017..af41ba6db960 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -46,6 +46,11 @@ resume_userspace: | |||
46 | local_irq_disable # make sure we dont miss an | 46 | local_irq_disable # make sure we dont miss an |
47 | # interrupt setting need_resched | 47 | # interrupt setting need_resched |
48 | # between sampling and return | 48 | # between sampling and return |
49 | #ifdef CONFIG_MIPSR2_TO_R6_EMULATOR | ||
50 | lw k0, TI_R2_EMUL_RET($28) | ||
51 | bnez k0, restore_all_from_r2_emul | ||
52 | #endif | ||
53 | |||
49 | LONG_L a2, TI_FLAGS($28) # current->work | 54 | LONG_L a2, TI_FLAGS($28) # current->work |
50 | andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) | 55 | andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) |
51 | bnez t0, work_pending | 56 | bnez t0, work_pending |
@@ -114,6 +119,19 @@ restore_partial: # restore partial frame | |||
114 | RESTORE_SP_AND_RET | 119 | RESTORE_SP_AND_RET |
115 | .set at | 120 | .set at |
116 | 121 | ||
122 | #ifdef CONFIG_MIPSR2_TO_R6_EMULATOR | ||
123 | restore_all_from_r2_emul: # restore full frame | ||
124 | .set noat | ||
125 | sw zero, TI_R2_EMUL_RET($28) # reset it | ||
126 | RESTORE_TEMP | ||
127 | RESTORE_AT | ||
128 | RESTORE_STATIC | ||
129 | RESTORE_SOME | ||
130 | LONG_L sp, PT_R29(sp) | ||
131 | eretnc | ||
132 | .set at | ||
133 | #endif | ||
134 | |||
117 | work_pending: | 135 | work_pending: |
118 | andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS | 136 | andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS |
119 | beqz t0, work_notifysig | 137 | beqz t0, work_notifysig |
@@ -158,7 +176,8 @@ syscall_exit_work: | |||
158 | jal syscall_trace_leave | 176 | jal syscall_trace_leave |
159 | b resume_userspace | 177 | b resume_userspace |
160 | 178 | ||
161 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) | 179 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \ |
180 | defined(CONFIG_MIPS_MT) | ||
162 | 181 | ||
163 | /* | 182 | /* |
164 | * MIPS32R2 Instruction Hazard Barrier - must be called | 183 | * MIPS32R2 Instruction Hazard Barrier - must be called |
@@ -171,4 +190,4 @@ LEAF(mips_ihb) | |||
171 | nop | 190 | nop |
172 | END(mips_ihb) | 191 | END(mips_ihb) |
173 | 192 | ||
174 | #endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ | 193 | #endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */ |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a5e26dd90592..2ebaabe3af15 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -125,7 +125,7 @@ LEAF(__r4k_wait) | |||
125 | nop | 125 | nop |
126 | nop | 126 | nop |
127 | #endif | 127 | #endif |
128 | .set arch=r4000 | 128 | .set MIPS_ISA_ARCH_LEVEL_RAW |
129 | wait | 129 | wait |
130 | /* end of rollback region (the region size must be power of two) */ | 130 | /* end of rollback region (the region size must be power of two) */ |
131 | 1: | 131 | 1: |
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 0b9082b6b683..368c88b7eb6c 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c | |||
@@ -186,6 +186,7 @@ void __init check_wait(void) | |||
186 | case CPU_PROAPTIV: | 186 | case CPU_PROAPTIV: |
187 | case CPU_P5600: | 187 | case CPU_P5600: |
188 | case CPU_M5150: | 188 | case CPU_M5150: |
189 | case CPU_QEMU_GENERIC: | ||
189 | cpu_wait = r4k_wait; | 190 | cpu_wait = r4k_wait; |
190 | if (read_c0_config7() & MIPS_CONF7_WII) | 191 | if (read_c0_config7() & MIPS_CONF7_WII) |
191 | cpu_wait = r4k_wait_irqoff; | 192 | cpu_wait = r4k_wait_irqoff; |
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c new file mode 100644 index 000000000000..64d17e41093b --- /dev/null +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c | |||
@@ -0,0 +1,2378 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2014 Imagination Technologies Ltd. | ||
7 | * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> | ||
8 | * Author: Markos Chandras <markos.chandras@imgtec.com> | ||
9 | * | ||
10 | * MIPS R2 user space instruction emulator for MIPS R6 | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/bug.h> | ||
14 | #include <linux/compiler.h> | ||
15 | #include <linux/debugfs.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/ptrace.h> | ||
20 | #include <linux/seq_file.h> | ||
21 | |||
22 | #include <asm/asm.h> | ||
23 | #include <asm/branch.h> | ||
24 | #include <asm/break.h> | ||
25 | #include <asm/fpu.h> | ||
26 | #include <asm/fpu_emulator.h> | ||
27 | #include <asm/inst.h> | ||
28 | #include <asm/mips-r2-to-r6-emul.h> | ||
29 | #include <asm/local.h> | ||
30 | #include <asm/ptrace.h> | ||
31 | #include <asm/uaccess.h> | ||
32 | |||
33 | #ifdef CONFIG_64BIT | ||
34 | #define ADDIU "daddiu " | ||
35 | #define INS "dins " | ||
36 | #define EXT "dext " | ||
37 | #else | ||
38 | #define ADDIU "addiu " | ||
39 | #define INS "ins " | ||
40 | #define EXT "ext " | ||
41 | #endif /* CONFIG_64BIT */ | ||
42 | |||
43 | #define SB "sb " | ||
44 | #define LB "lb " | ||
45 | #define LL "ll " | ||
46 | #define SC "sc " | ||
47 | |||
48 | DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); | ||
49 | DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); | ||
50 | DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); | ||
51 | |||
52 | extern const unsigned int fpucondbit[8]; | ||
53 | |||
54 | #define MIPS_R2_EMUL_TOTAL_PASS 10 | ||
55 | |||
56 | int mipsr2_emulation = 0; | ||
57 | |||
58 | static int __init mipsr2emu_enable(char *s) | ||
59 | { | ||
60 | mipsr2_emulation = 1; | ||
61 | |||
62 | pr_info("MIPS R2-to-R6 Emulator Enabled!"); | ||
63 | |||
64 | return 1; | ||
65 | } | ||
66 | __setup("mipsr2emu", mipsr2emu_enable); | ||
67 | |||
68 | /** | ||
69 | * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot | ||
70 | * for performance instead of the traditional way of using a stack trampoline | ||
71 | * which is rather slow. | ||
72 | * @regs: Process register set | ||
73 | * @ir: Instruction | ||
74 | */ | ||
75 | static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) | ||
76 | { | ||
77 | switch (MIPSInst_OPCODE(ir)) { | ||
78 | case addiu_op: | ||
79 | if (MIPSInst_RT(ir)) | ||
80 | regs->regs[MIPSInst_RT(ir)] = | ||
81 | (s32)regs->regs[MIPSInst_RS(ir)] + | ||
82 | (s32)MIPSInst_SIMM(ir); | ||
83 | return 0; | ||
84 | case daddiu_op: | ||
85 | if (config_enabled(CONFIG_32BIT)) | ||
86 | break; | ||
87 | |||
88 | if (MIPSInst_RT(ir)) | ||
89 | regs->regs[MIPSInst_RT(ir)] = | ||
90 | (s64)regs->regs[MIPSInst_RS(ir)] + | ||
91 | (s64)MIPSInst_SIMM(ir); | ||
92 | return 0; | ||
93 | case lwc1_op: | ||
94 | case swc1_op: | ||
95 | case cop1_op: | ||
96 | case cop1x_op: | ||
97 | /* FPU instructions in delay slot */ | ||
98 | return -SIGFPE; | ||
99 | case spec_op: | ||
100 | switch (MIPSInst_FUNC(ir)) { | ||
101 | case or_op: | ||
102 | if (MIPSInst_RD(ir)) | ||
103 | regs->regs[MIPSInst_RD(ir)] = | ||
104 | regs->regs[MIPSInst_RS(ir)] | | ||
105 | regs->regs[MIPSInst_RT(ir)]; | ||
106 | return 0; | ||
107 | case sll_op: | ||
108 | if (MIPSInst_RS(ir)) | ||
109 | break; | ||
110 | |||
111 | if (MIPSInst_RD(ir)) | ||
112 | regs->regs[MIPSInst_RD(ir)] = | ||
113 | (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << | ||
114 | MIPSInst_FD(ir)); | ||
115 | return 0; | ||
116 | case srl_op: | ||
117 | if (MIPSInst_RS(ir)) | ||
118 | break; | ||
119 | |||
120 | if (MIPSInst_RD(ir)) | ||
121 | regs->regs[MIPSInst_RD(ir)] = | ||
122 | (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> | ||
123 | MIPSInst_FD(ir)); | ||
124 | return 0; | ||
125 | case addu_op: | ||
126 | if (MIPSInst_FD(ir)) | ||
127 | break; | ||
128 | |||
129 | if (MIPSInst_RD(ir)) | ||
130 | regs->regs[MIPSInst_RD(ir)] = | ||
131 | (s32)((u32)regs->regs[MIPSInst_RS(ir)] + | ||
132 | (u32)regs->regs[MIPSInst_RT(ir)]); | ||
133 | return 0; | ||
134 | case subu_op: | ||
135 | if (MIPSInst_FD(ir)) | ||
136 | break; | ||
137 | |||
138 | if (MIPSInst_RD(ir)) | ||
139 | regs->regs[MIPSInst_RD(ir)] = | ||
140 | (s32)((u32)regs->regs[MIPSInst_RS(ir)] - | ||
141 | (u32)regs->regs[MIPSInst_RT(ir)]); | ||
142 | return 0; | ||
143 | case dsll_op: | ||
144 | if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) | ||
145 | break; | ||
146 | |||
147 | if (MIPSInst_RD(ir)) | ||
148 | regs->regs[MIPSInst_RD(ir)] = | ||
149 | (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << | ||
150 | MIPSInst_FD(ir)); | ||
151 | return 0; | ||
152 | case dsrl_op: | ||
153 | if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) | ||
154 | break; | ||
155 | |||
156 | if (MIPSInst_RD(ir)) | ||
157 | regs->regs[MIPSInst_RD(ir)] = | ||
158 | (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> | ||
159 | MIPSInst_FD(ir)); | ||
160 | return 0; | ||
161 | case daddu_op: | ||
162 | if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) | ||
163 | break; | ||
164 | |||
165 | if (MIPSInst_RD(ir)) | ||
166 | regs->regs[MIPSInst_RD(ir)] = | ||
167 | (u64)regs->regs[MIPSInst_RS(ir)] + | ||
168 | (u64)regs->regs[MIPSInst_RT(ir)]; | ||
169 | return 0; | ||
170 | case dsubu_op: | ||
171 | if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) | ||
172 | break; | ||
173 | |||
174 | if (MIPSInst_RD(ir)) | ||
175 | regs->regs[MIPSInst_RD(ir)] = | ||
176 | (s64)((u64)regs->regs[MIPSInst_RS(ir)] - | ||
177 | (u64)regs->regs[MIPSInst_RT(ir)]); | ||
178 | return 0; | ||
179 | } | ||
180 | break; | ||
181 | default: | ||
182 | pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", | ||
183 | ir, MIPSInst_OPCODE(ir)); | ||
184 | } | ||
185 | |||
186 | return SIGILL; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * movt_func - Emulate a MOVT instruction | ||
191 | * @regs: Process register set | ||
192 | * @ir: Instruction | ||
193 | * | ||
194 | * Returns 0 since it always succeeds. | ||
195 | */ | ||
196 | static int movf_func(struct pt_regs *regs, u32 ir) | ||
197 | { | ||
198 | u32 csr; | ||
199 | u32 cond; | ||
200 | |||
201 | csr = current->thread.fpu.fcr31; | ||
202 | cond = fpucondbit[MIPSInst_RT(ir) >> 2]; | ||
203 | if (((csr & cond) == 0) && MIPSInst_RD(ir)) | ||
204 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
205 | MIPS_R2_STATS(movs); | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * movt_func - Emulate a MOVT instruction | ||
211 | * @regs: Process register set | ||
212 | * @ir: Instruction | ||
213 | * | ||
214 | * Returns 0 since it always succeeds. | ||
215 | */ | ||
216 | static int movt_func(struct pt_regs *regs, u32 ir) | ||
217 | { | ||
218 | u32 csr; | ||
219 | u32 cond; | ||
220 | |||
221 | csr = current->thread.fpu.fcr31; | ||
222 | cond = fpucondbit[MIPSInst_RT(ir) >> 2]; | ||
223 | |||
224 | if (((csr & cond) != 0) && MIPSInst_RD(ir)) | ||
225 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
226 | |||
227 | MIPS_R2_STATS(movs); | ||
228 | |||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | /** | ||
233 | * jr_func - Emulate a JR instruction. | ||
234 | * @pt_regs: Process register set | ||
235 | * @ir: Instruction | ||
236 | * | ||
237 | * Returns SIGILL if JR was in delay slot, SIGEMT if we | ||
238 | * can't compute the EPC, SIGSEGV if we can't access the | ||
239 | * userland instruction or 0 on success. | ||
240 | */ | ||
241 | static int jr_func(struct pt_regs *regs, u32 ir) | ||
242 | { | ||
243 | int err; | ||
244 | unsigned long cepc, epc, nepc; | ||
245 | u32 nir; | ||
246 | |||
247 | if (delay_slot(regs)) | ||
248 | return SIGILL; | ||
249 | |||
250 | /* EPC after the RI/JR instruction */ | ||
251 | nepc = regs->cp0_epc; | ||
252 | /* Roll back to the reserved R2 JR instruction */ | ||
253 | regs->cp0_epc -= 4; | ||
254 | epc = regs->cp0_epc; | ||
255 | err = __compute_return_epc(regs); | ||
256 | |||
257 | if (err < 0) | ||
258 | return SIGEMT; | ||
259 | |||
260 | |||
261 | /* Computed EPC */ | ||
262 | cepc = regs->cp0_epc; | ||
263 | |||
264 | /* Get DS instruction */ | ||
265 | err = __get_user(nir, (u32 __user *)nepc); | ||
266 | if (err) | ||
267 | return SIGSEGV; | ||
268 | |||
269 | MIPS_R2BR_STATS(jrs); | ||
270 | |||
271 | /* If nir == 0(NOP), then nothing else to do */ | ||
272 | if (nir) { | ||
273 | /* | ||
274 | * Negative err means FPU instruction in BD-slot, | ||
275 | * Zero err means 'BD-slot emulation done' | ||
276 | * For anything else we go back to trampoline emulation. | ||
277 | */ | ||
278 | err = mipsr6_emul(regs, nir); | ||
279 | if (err > 0) { | ||
280 | regs->cp0_epc = nepc; | ||
281 | err = mips_dsemul(regs, nir, cepc); | ||
282 | if (err == SIGILL) | ||
283 | err = SIGEMT; | ||
284 | MIPS_R2_STATS(dsemul); | ||
285 | } | ||
286 | } | ||
287 | |||
288 | return err; | ||
289 | } | ||
290 | |||
291 | /** | ||
292 | * movz_func - Emulate a MOVZ instruction | ||
293 | * @regs: Process register set | ||
294 | * @ir: Instruction | ||
295 | * | ||
296 | * Returns 0 since it always succeeds. | ||
297 | */ | ||
298 | static int movz_func(struct pt_regs *regs, u32 ir) | ||
299 | { | ||
300 | if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) | ||
301 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
302 | MIPS_R2_STATS(movs); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * movn_func - Emulate a MOVZ instruction | ||
309 | * @regs: Process register set | ||
310 | * @ir: Instruction | ||
311 | * | ||
312 | * Returns 0 since it always succeeds. | ||
313 | */ | ||
314 | static int movn_func(struct pt_regs *regs, u32 ir) | ||
315 | { | ||
316 | if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) | ||
317 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
318 | MIPS_R2_STATS(movs); | ||
319 | |||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | /** | ||
324 | * mfhi_func - Emulate a MFHI instruction | ||
325 | * @regs: Process register set | ||
326 | * @ir: Instruction | ||
327 | * | ||
328 | * Returns 0 since it always succeeds. | ||
329 | */ | ||
330 | static int mfhi_func(struct pt_regs *regs, u32 ir) | ||
331 | { | ||
332 | if (MIPSInst_RD(ir)) | ||
333 | regs->regs[MIPSInst_RD(ir)] = regs->hi; | ||
334 | |||
335 | MIPS_R2_STATS(hilo); | ||
336 | |||
337 | return 0; | ||
338 | } | ||
339 | |||
340 | /** | ||
341 | * mthi_func - Emulate a MTHI instruction | ||
342 | * @regs: Process register set | ||
343 | * @ir: Instruction | ||
344 | * | ||
345 | * Returns 0 since it always succeeds. | ||
346 | */ | ||
347 | static int mthi_func(struct pt_regs *regs, u32 ir) | ||
348 | { | ||
349 | regs->hi = regs->regs[MIPSInst_RS(ir)]; | ||
350 | |||
351 | MIPS_R2_STATS(hilo); | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * mflo_func - Emulate a MFLO instruction | ||
358 | * @regs: Process register set | ||
359 | * @ir: Instruction | ||
360 | * | ||
361 | * Returns 0 since it always succeeds. | ||
362 | */ | ||
363 | static int mflo_func(struct pt_regs *regs, u32 ir) | ||
364 | { | ||
365 | if (MIPSInst_RD(ir)) | ||
366 | regs->regs[MIPSInst_RD(ir)] = regs->lo; | ||
367 | |||
368 | MIPS_R2_STATS(hilo); | ||
369 | |||
370 | return 0; | ||
371 | } | ||
372 | |||
373 | /** | ||
374 | * mtlo_func - Emulate a MTLO instruction | ||
375 | * @regs: Process register set | ||
376 | * @ir: Instruction | ||
377 | * | ||
378 | * Returns 0 since it always succeeds. | ||
379 | */ | ||
380 | static int mtlo_func(struct pt_regs *regs, u32 ir) | ||
381 | { | ||
382 | regs->lo = regs->regs[MIPSInst_RS(ir)]; | ||
383 | |||
384 | MIPS_R2_STATS(hilo); | ||
385 | |||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | /** | ||
390 | * mult_func - Emulate a MULT instruction | ||
391 | * @regs: Process register set | ||
392 | * @ir: Instruction | ||
393 | * | ||
394 | * Returns 0 since it always succeeds. | ||
395 | */ | ||
396 | static int mult_func(struct pt_regs *regs, u32 ir) | ||
397 | { | ||
398 | s64 res; | ||
399 | s32 rt, rs; | ||
400 | |||
401 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
402 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
403 | res = (s64)rt * (s64)rs; | ||
404 | |||
405 | rs = res; | ||
406 | regs->lo = (s64)rs; | ||
407 | rt = res >> 32; | ||
408 | res = (s64)rt; | ||
409 | regs->hi = res; | ||
410 | |||
411 | MIPS_R2_STATS(muls); | ||
412 | |||
413 | return 0; | ||
414 | } | ||
415 | |||
416 | /** | ||
417 | * multu_func - Emulate a MULTU instruction | ||
418 | * @regs: Process register set | ||
419 | * @ir: Instruction | ||
420 | * | ||
421 | * Returns 0 since it always succeeds. | ||
422 | */ | ||
423 | static int multu_func(struct pt_regs *regs, u32 ir) | ||
424 | { | ||
425 | u64 res; | ||
426 | u32 rt, rs; | ||
427 | |||
428 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
429 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
430 | res = (u64)rt * (u64)rs; | ||
431 | rt = res; | ||
432 | regs->lo = (s64)rt; | ||
433 | regs->hi = (s64)(res >> 32); | ||
434 | |||
435 | MIPS_R2_STATS(muls); | ||
436 | |||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | /** | ||
441 | * div_func - Emulate a DIV instruction | ||
442 | * @regs: Process register set | ||
443 | * @ir: Instruction | ||
444 | * | ||
445 | * Returns 0 since it always succeeds. | ||
446 | */ | ||
447 | static int div_func(struct pt_regs *regs, u32 ir) | ||
448 | { | ||
449 | s32 rt, rs; | ||
450 | |||
451 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
452 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
453 | |||
454 | regs->lo = (s64)(rs / rt); | ||
455 | regs->hi = (s64)(rs % rt); | ||
456 | |||
457 | MIPS_R2_STATS(divs); | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | /** | ||
463 | * divu_func - Emulate a DIVU instruction | ||
464 | * @regs: Process register set | ||
465 | * @ir: Instruction | ||
466 | * | ||
467 | * Returns 0 since it always succeeds. | ||
468 | */ | ||
469 | static int divu_func(struct pt_regs *regs, u32 ir) | ||
470 | { | ||
471 | u32 rt, rs; | ||
472 | |||
473 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
474 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
475 | |||
476 | regs->lo = (s64)(rs / rt); | ||
477 | regs->hi = (s64)(rs % rt); | ||
478 | |||
479 | MIPS_R2_STATS(divs); | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | /** | ||
485 | * dmult_func - Emulate a DMULT instruction | ||
486 | * @regs: Process register set | ||
487 | * @ir: Instruction | ||
488 | * | ||
489 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
490 | */ | ||
491 | static int dmult_func(struct pt_regs *regs, u32 ir) | ||
492 | { | ||
493 | s64 res; | ||
494 | s64 rt, rs; | ||
495 | |||
496 | if (config_enabled(CONFIG_32BIT)) | ||
497 | return SIGILL; | ||
498 | |||
499 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
500 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
501 | res = rt * rs; | ||
502 | |||
503 | regs->lo = res; | ||
504 | __asm__ __volatile__( | ||
505 | "dmuh %0, %1, %2\t\n" | ||
506 | : "=r"(res) | ||
507 | : "r"(rt), "r"(rs)); | ||
508 | |||
509 | regs->hi = res; | ||
510 | |||
511 | MIPS_R2_STATS(muls); | ||
512 | |||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * dmultu_func - Emulate a DMULTU instruction | ||
518 | * @regs: Process register set | ||
519 | * @ir: Instruction | ||
520 | * | ||
521 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
522 | */ | ||
523 | static int dmultu_func(struct pt_regs *regs, u32 ir) | ||
524 | { | ||
525 | u64 res; | ||
526 | u64 rt, rs; | ||
527 | |||
528 | if (config_enabled(CONFIG_32BIT)) | ||
529 | return SIGILL; | ||
530 | |||
531 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
532 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
533 | res = rt * rs; | ||
534 | |||
535 | regs->lo = res; | ||
536 | __asm__ __volatile__( | ||
537 | "dmuhu %0, %1, %2\t\n" | ||
538 | : "=r"(res) | ||
539 | : "r"(rt), "r"(rs)); | ||
540 | |||
541 | regs->hi = res; | ||
542 | |||
543 | MIPS_R2_STATS(muls); | ||
544 | |||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | /** | ||
549 | * ddiv_func - Emulate a DDIV instruction | ||
550 | * @regs: Process register set | ||
551 | * @ir: Instruction | ||
552 | * | ||
553 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
554 | */ | ||
555 | static int ddiv_func(struct pt_regs *regs, u32 ir) | ||
556 | { | ||
557 | s64 rt, rs; | ||
558 | |||
559 | if (config_enabled(CONFIG_32BIT)) | ||
560 | return SIGILL; | ||
561 | |||
562 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
563 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
564 | |||
565 | regs->lo = rs / rt; | ||
566 | regs->hi = rs % rt; | ||
567 | |||
568 | MIPS_R2_STATS(divs); | ||
569 | |||
570 | return 0; | ||
571 | } | ||
572 | |||
573 | /** | ||
574 | * ddivu_func - Emulate a DDIVU instruction | ||
575 | * @regs: Process register set | ||
576 | * @ir: Instruction | ||
577 | * | ||
578 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
579 | */ | ||
580 | static int ddivu_func(struct pt_regs *regs, u32 ir) | ||
581 | { | ||
582 | u64 rt, rs; | ||
583 | |||
584 | if (config_enabled(CONFIG_32BIT)) | ||
585 | return SIGILL; | ||
586 | |||
587 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
588 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
589 | |||
590 | regs->lo = rs / rt; | ||
591 | regs->hi = rs % rt; | ||
592 | |||
593 | MIPS_R2_STATS(divs); | ||
594 | |||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | /* R6 removed instructions for the SPECIAL opcode */ | ||
599 | static struct r2_decoder_table spec_op_table[] = { | ||
600 | { 0xfc1ff83f, 0x00000008, jr_func }, | ||
601 | { 0xfc00ffff, 0x00000018, mult_func }, | ||
602 | { 0xfc00ffff, 0x00000019, multu_func }, | ||
603 | { 0xfc00ffff, 0x0000001c, dmult_func }, | ||
604 | { 0xfc00ffff, 0x0000001d, dmultu_func }, | ||
605 | { 0xffff07ff, 0x00000010, mfhi_func }, | ||
606 | { 0xfc1fffff, 0x00000011, mthi_func }, | ||
607 | { 0xffff07ff, 0x00000012, mflo_func }, | ||
608 | { 0xfc1fffff, 0x00000013, mtlo_func }, | ||
609 | { 0xfc0307ff, 0x00000001, movf_func }, | ||
610 | { 0xfc0307ff, 0x00010001, movt_func }, | ||
611 | { 0xfc0007ff, 0x0000000a, movz_func }, | ||
612 | { 0xfc0007ff, 0x0000000b, movn_func }, | ||
613 | { 0xfc00ffff, 0x0000001a, div_func }, | ||
614 | { 0xfc00ffff, 0x0000001b, divu_func }, | ||
615 | { 0xfc00ffff, 0x0000001e, ddiv_func }, | ||
616 | { 0xfc00ffff, 0x0000001f, ddivu_func }, | ||
617 | {} | ||
618 | }; | ||
619 | |||
620 | /** | ||
621 | * madd_func - Emulate a MADD instruction | ||
622 | * @regs: Process register set | ||
623 | * @ir: Instruction | ||
624 | * | ||
625 | * Returns 0 since it always succeeds. | ||
626 | */ | ||
627 | static int madd_func(struct pt_regs *regs, u32 ir) | ||
628 | { | ||
629 | s64 res; | ||
630 | s32 rt, rs; | ||
631 | |||
632 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
633 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
634 | res = (s64)rt * (s64)rs; | ||
635 | rt = regs->hi; | ||
636 | rs = regs->lo; | ||
637 | res += ((((s64)rt) << 32) | (u32)rs); | ||
638 | |||
639 | rt = res; | ||
640 | regs->lo = (s64)rt; | ||
641 | rs = res >> 32; | ||
642 | regs->hi = (s64)rs; | ||
643 | |||
644 | MIPS_R2_STATS(dsps); | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | /** | ||
650 | * maddu_func - Emulate a MADDU instruction | ||
651 | * @regs: Process register set | ||
652 | * @ir: Instruction | ||
653 | * | ||
654 | * Returns 0 since it always succeeds. | ||
655 | */ | ||
656 | static int maddu_func(struct pt_regs *regs, u32 ir) | ||
657 | { | ||
658 | u64 res; | ||
659 | u32 rt, rs; | ||
660 | |||
661 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
662 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
663 | res = (u64)rt * (u64)rs; | ||
664 | rt = regs->hi; | ||
665 | rs = regs->lo; | ||
666 | res += ((((s64)rt) << 32) | (u32)rs); | ||
667 | |||
668 | rt = res; | ||
669 | regs->lo = (s64)rt; | ||
670 | rs = res >> 32; | ||
671 | regs->hi = (s64)rs; | ||
672 | |||
673 | MIPS_R2_STATS(dsps); | ||
674 | |||
675 | return 0; | ||
676 | } | ||
677 | |||
678 | /** | ||
679 | * msub_func - Emulate a MSUB instruction | ||
680 | * @regs: Process register set | ||
681 | * @ir: Instruction | ||
682 | * | ||
683 | * Returns 0 since it always succeeds. | ||
684 | */ | ||
685 | static int msub_func(struct pt_regs *regs, u32 ir) | ||
686 | { | ||
687 | s64 res; | ||
688 | s32 rt, rs; | ||
689 | |||
690 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
691 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
692 | res = (s64)rt * (s64)rs; | ||
693 | rt = regs->hi; | ||
694 | rs = regs->lo; | ||
695 | res = ((((s64)rt) << 32) | (u32)rs) - res; | ||
696 | |||
697 | rt = res; | ||
698 | regs->lo = (s64)rt; | ||
699 | rs = res >> 32; | ||
700 | regs->hi = (s64)rs; | ||
701 | |||
702 | MIPS_R2_STATS(dsps); | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | /** | ||
708 | * msubu_func - Emulate a MSUBU instruction | ||
709 | * @regs: Process register set | ||
710 | * @ir: Instruction | ||
711 | * | ||
712 | * Returns 0 since it always succeeds. | ||
713 | */ | ||
714 | static int msubu_func(struct pt_regs *regs, u32 ir) | ||
715 | { | ||
716 | u64 res; | ||
717 | u32 rt, rs; | ||
718 | |||
719 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
720 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
721 | res = (u64)rt * (u64)rs; | ||
722 | rt = regs->hi; | ||
723 | rs = regs->lo; | ||
724 | res = ((((s64)rt) << 32) | (u32)rs) - res; | ||
725 | |||
726 | rt = res; | ||
727 | regs->lo = (s64)rt; | ||
728 | rs = res >> 32; | ||
729 | regs->hi = (s64)rs; | ||
730 | |||
731 | MIPS_R2_STATS(dsps); | ||
732 | |||
733 | return 0; | ||
734 | } | ||
735 | |||
736 | /** | ||
737 | * mul_func - Emulate a MUL instruction | ||
738 | * @regs: Process register set | ||
739 | * @ir: Instruction | ||
740 | * | ||
741 | * Returns 0 since it always succeeds. | ||
742 | */ | ||
743 | static int mul_func(struct pt_regs *regs, u32 ir) | ||
744 | { | ||
745 | s64 res; | ||
746 | s32 rt, rs; | ||
747 | |||
748 | if (!MIPSInst_RD(ir)) | ||
749 | return 0; | ||
750 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
751 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
752 | res = (s64)rt * (s64)rs; | ||
753 | |||
754 | rs = res; | ||
755 | regs->regs[MIPSInst_RD(ir)] = (s64)rs; | ||
756 | |||
757 | MIPS_R2_STATS(muls); | ||
758 | |||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | /** | ||
763 | * clz_func - Emulate a CLZ instruction | ||
764 | * @regs: Process register set | ||
765 | * @ir: Instruction | ||
766 | * | ||
767 | * Returns 0 since it always succeeds. | ||
768 | */ | ||
769 | static int clz_func(struct pt_regs *regs, u32 ir) | ||
770 | { | ||
771 | u32 res; | ||
772 | u32 rs; | ||
773 | |||
774 | if (!MIPSInst_RD(ir)) | ||
775 | return 0; | ||
776 | |||
777 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
778 | __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); | ||
779 | regs->regs[MIPSInst_RD(ir)] = res; | ||
780 | |||
781 | MIPS_R2_STATS(bops); | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | /** | ||
787 | * clo_func - Emulate a CLO instruction | ||
788 | * @regs: Process register set | ||
789 | * @ir: Instruction | ||
790 | * | ||
791 | * Returns 0 since it always succeeds. | ||
792 | */ | ||
793 | |||
794 | static int clo_func(struct pt_regs *regs, u32 ir) | ||
795 | { | ||
796 | u32 res; | ||
797 | u32 rs; | ||
798 | |||
799 | if (!MIPSInst_RD(ir)) | ||
800 | return 0; | ||
801 | |||
802 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
803 | __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); | ||
804 | regs->regs[MIPSInst_RD(ir)] = res; | ||
805 | |||
806 | MIPS_R2_STATS(bops); | ||
807 | |||
808 | return 0; | ||
809 | } | ||
810 | |||
811 | /** | ||
812 | * dclz_func - Emulate a DCLZ instruction | ||
813 | * @regs: Process register set | ||
814 | * @ir: Instruction | ||
815 | * | ||
816 | * Returns 0 since it always succeeds. | ||
817 | */ | ||
818 | static int dclz_func(struct pt_regs *regs, u32 ir) | ||
819 | { | ||
820 | u64 res; | ||
821 | u64 rs; | ||
822 | |||
823 | if (config_enabled(CONFIG_32BIT)) | ||
824 | return SIGILL; | ||
825 | |||
826 | if (!MIPSInst_RD(ir)) | ||
827 | return 0; | ||
828 | |||
829 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
830 | __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); | ||
831 | regs->regs[MIPSInst_RD(ir)] = res; | ||
832 | |||
833 | MIPS_R2_STATS(bops); | ||
834 | |||
835 | return 0; | ||
836 | } | ||
837 | |||
838 | /** | ||
839 | * dclo_func - Emulate a DCLO instruction | ||
840 | * @regs: Process register set | ||
841 | * @ir: Instruction | ||
842 | * | ||
843 | * Returns 0 since it always succeeds. | ||
844 | */ | ||
845 | static int dclo_func(struct pt_regs *regs, u32 ir) | ||
846 | { | ||
847 | u64 res; | ||
848 | u64 rs; | ||
849 | |||
850 | if (config_enabled(CONFIG_32BIT)) | ||
851 | return SIGILL; | ||
852 | |||
853 | if (!MIPSInst_RD(ir)) | ||
854 | return 0; | ||
855 | |||
856 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
857 | __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); | ||
858 | regs->regs[MIPSInst_RD(ir)] = res; | ||
859 | |||
860 | MIPS_R2_STATS(bops); | ||
861 | |||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | /* R6 removed instructions for the SPECIAL2 opcode */ | ||
866 | static struct r2_decoder_table spec2_op_table[] = { | ||
867 | { 0xfc00ffff, 0x70000000, madd_func }, | ||
868 | { 0xfc00ffff, 0x70000001, maddu_func }, | ||
869 | { 0xfc0007ff, 0x70000002, mul_func }, | ||
870 | { 0xfc00ffff, 0x70000004, msub_func }, | ||
871 | { 0xfc00ffff, 0x70000005, msubu_func }, | ||
872 | { 0xfc0007ff, 0x70000020, clz_func }, | ||
873 | { 0xfc0007ff, 0x70000021, clo_func }, | ||
874 | { 0xfc0007ff, 0x70000024, dclz_func }, | ||
875 | { 0xfc0007ff, 0x70000025, dclo_func }, | ||
876 | { } | ||
877 | }; | ||
878 | |||
879 | static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, | ||
880 | struct r2_decoder_table *table) | ||
881 | { | ||
882 | struct r2_decoder_table *p; | ||
883 | int err; | ||
884 | |||
885 | for (p = table; p->func; p++) { | ||
886 | if ((inst & p->mask) == p->code) { | ||
887 | err = (p->func)(regs, inst); | ||
888 | return err; | ||
889 | } | ||
890 | } | ||
891 | return SIGILL; | ||
892 | } | ||
893 | |||
894 | /** | ||
895 | * mipsr2_decoder: Decode and emulate a MIPS R2 instruction | ||
896 | * @regs: Process register set | ||
897 | * @inst: Instruction to decode and emulate | ||
898 | */ | ||
899 | int mipsr2_decoder(struct pt_regs *regs, u32 inst) | ||
900 | { | ||
901 | int err = 0; | ||
902 | unsigned long vaddr; | ||
903 | u32 nir; | ||
904 | unsigned long cpc, epc, nepc, r31, res, rs, rt; | ||
905 | |||
906 | void __user *fault_addr = NULL; | ||
907 | int pass = 0; | ||
908 | |||
909 | repeat: | ||
910 | r31 = regs->regs[31]; | ||
911 | epc = regs->cp0_epc; | ||
912 | err = compute_return_epc(regs); | ||
913 | if (err < 0) { | ||
914 | BUG(); | ||
915 | return SIGEMT; | ||
916 | } | ||
917 | pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", | ||
918 | inst, epc, pass); | ||
919 | |||
920 | switch (MIPSInst_OPCODE(inst)) { | ||
921 | case spec_op: | ||
922 | err = mipsr2_find_op_func(regs, inst, spec_op_table); | ||
923 | if (err < 0) { | ||
924 | /* FPU instruction under JR */ | ||
925 | regs->cp0_cause |= CAUSEF_BD; | ||
926 | goto fpu_emul; | ||
927 | } | ||
928 | break; | ||
929 | case spec2_op: | ||
930 | err = mipsr2_find_op_func(regs, inst, spec2_op_table); | ||
931 | break; | ||
932 | case bcond_op: | ||
933 | rt = MIPSInst_RT(inst); | ||
934 | rs = MIPSInst_RS(inst); | ||
935 | switch (rt) { | ||
936 | case tgei_op: | ||
937 | if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) | ||
938 | do_trap_or_bp(regs, 0, "TGEI"); | ||
939 | |||
940 | MIPS_R2_STATS(traps); | ||
941 | |||
942 | break; | ||
943 | case tgeiu_op: | ||
944 | if (regs->regs[rs] >= MIPSInst_UIMM(inst)) | ||
945 | do_trap_or_bp(regs, 0, "TGEIU"); | ||
946 | |||
947 | MIPS_R2_STATS(traps); | ||
948 | |||
949 | break; | ||
950 | case tlti_op: | ||
951 | if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) | ||
952 | do_trap_or_bp(regs, 0, "TLTI"); | ||
953 | |||
954 | MIPS_R2_STATS(traps); | ||
955 | |||
956 | break; | ||
957 | case tltiu_op: | ||
958 | if (regs->regs[rs] < MIPSInst_UIMM(inst)) | ||
959 | do_trap_or_bp(regs, 0, "TLTIU"); | ||
960 | |||
961 | MIPS_R2_STATS(traps); | ||
962 | |||
963 | break; | ||
964 | case teqi_op: | ||
965 | if (regs->regs[rs] == MIPSInst_SIMM(inst)) | ||
966 | do_trap_or_bp(regs, 0, "TEQI"); | ||
967 | |||
968 | MIPS_R2_STATS(traps); | ||
969 | |||
970 | break; | ||
971 | case tnei_op: | ||
972 | if (regs->regs[rs] != MIPSInst_SIMM(inst)) | ||
973 | do_trap_or_bp(regs, 0, "TNEI"); | ||
974 | |||
975 | MIPS_R2_STATS(traps); | ||
976 | |||
977 | break; | ||
978 | case bltzl_op: | ||
979 | case bgezl_op: | ||
980 | case bltzall_op: | ||
981 | case bgezall_op: | ||
982 | if (delay_slot(regs)) { | ||
983 | err = SIGILL; | ||
984 | break; | ||
985 | } | ||
986 | regs->regs[31] = r31; | ||
987 | regs->cp0_epc = epc; | ||
988 | err = __compute_return_epc(regs); | ||
989 | if (err < 0) | ||
990 | return SIGEMT; | ||
991 | if (err != BRANCH_LIKELY_TAKEN) | ||
992 | break; | ||
993 | cpc = regs->cp0_epc; | ||
994 | nepc = epc + 4; | ||
995 | err = __get_user(nir, (u32 __user *)nepc); | ||
996 | if (err) { | ||
997 | err = SIGSEGV; | ||
998 | break; | ||
999 | } | ||
1000 | /* | ||
1001 | * This will probably be optimized away when | ||
1002 | * CONFIG_DEBUG_FS is not enabled | ||
1003 | */ | ||
1004 | switch (rt) { | ||
1005 | case bltzl_op: | ||
1006 | MIPS_R2BR_STATS(bltzl); | ||
1007 | break; | ||
1008 | case bgezl_op: | ||
1009 | MIPS_R2BR_STATS(bgezl); | ||
1010 | break; | ||
1011 | case bltzall_op: | ||
1012 | MIPS_R2BR_STATS(bltzall); | ||
1013 | break; | ||
1014 | case bgezall_op: | ||
1015 | MIPS_R2BR_STATS(bgezall); | ||
1016 | break; | ||
1017 | } | ||
1018 | |||
1019 | switch (MIPSInst_OPCODE(nir)) { | ||
1020 | case cop1_op: | ||
1021 | case cop1x_op: | ||
1022 | case lwc1_op: | ||
1023 | case swc1_op: | ||
1024 | regs->cp0_cause |= CAUSEF_BD; | ||
1025 | goto fpu_emul; | ||
1026 | } | ||
1027 | if (nir) { | ||
1028 | err = mipsr6_emul(regs, nir); | ||
1029 | if (err > 0) { | ||
1030 | err = mips_dsemul(regs, nir, cpc); | ||
1031 | if (err == SIGILL) | ||
1032 | err = SIGEMT; | ||
1033 | MIPS_R2_STATS(dsemul); | ||
1034 | } | ||
1035 | } | ||
1036 | break; | ||
1037 | case bltzal_op: | ||
1038 | case bgezal_op: | ||
1039 | if (delay_slot(regs)) { | ||
1040 | err = SIGILL; | ||
1041 | break; | ||
1042 | } | ||
1043 | regs->regs[31] = r31; | ||
1044 | regs->cp0_epc = epc; | ||
1045 | err = __compute_return_epc(regs); | ||
1046 | if (err < 0) | ||
1047 | return SIGEMT; | ||
1048 | cpc = regs->cp0_epc; | ||
1049 | nepc = epc + 4; | ||
1050 | err = __get_user(nir, (u32 __user *)nepc); | ||
1051 | if (err) { | ||
1052 | err = SIGSEGV; | ||
1053 | break; | ||
1054 | } | ||
1055 | /* | ||
1056 | * This will probably be optimized away when | ||
1057 | * CONFIG_DEBUG_FS is not enabled | ||
1058 | */ | ||
1059 | switch (rt) { | ||
1060 | case bltzal_op: | ||
1061 | MIPS_R2BR_STATS(bltzal); | ||
1062 | break; | ||
1063 | case bgezal_op: | ||
1064 | MIPS_R2BR_STATS(bgezal); | ||
1065 | break; | ||
1066 | } | ||
1067 | |||
1068 | switch (MIPSInst_OPCODE(nir)) { | ||
1069 | case cop1_op: | ||
1070 | case cop1x_op: | ||
1071 | case lwc1_op: | ||
1072 | case swc1_op: | ||
1073 | regs->cp0_cause |= CAUSEF_BD; | ||
1074 | goto fpu_emul; | ||
1075 | } | ||
1076 | if (nir) { | ||
1077 | err = mipsr6_emul(regs, nir); | ||
1078 | if (err > 0) { | ||
1079 | err = mips_dsemul(regs, nir, cpc); | ||
1080 | if (err == SIGILL) | ||
1081 | err = SIGEMT; | ||
1082 | MIPS_R2_STATS(dsemul); | ||
1083 | } | ||
1084 | } | ||
1085 | break; | ||
1086 | default: | ||
1087 | regs->regs[31] = r31; | ||
1088 | regs->cp0_epc = epc; | ||
1089 | err = SIGILL; | ||
1090 | break; | ||
1091 | } | ||
1092 | break; | ||
1093 | |||
1094 | case beql_op: | ||
1095 | case bnel_op: | ||
1096 | case blezl_op: | ||
1097 | case bgtzl_op: | ||
1098 | if (delay_slot(regs)) { | ||
1099 | err = SIGILL; | ||
1100 | break; | ||
1101 | } | ||
1102 | regs->regs[31] = r31; | ||
1103 | regs->cp0_epc = epc; | ||
1104 | err = __compute_return_epc(regs); | ||
1105 | if (err < 0) | ||
1106 | return SIGEMT; | ||
1107 | if (err != BRANCH_LIKELY_TAKEN) | ||
1108 | break; | ||
1109 | cpc = regs->cp0_epc; | ||
1110 | nepc = epc + 4; | ||
1111 | err = __get_user(nir, (u32 __user *)nepc); | ||
1112 | if (err) { | ||
1113 | err = SIGSEGV; | ||
1114 | break; | ||
1115 | } | ||
1116 | /* | ||
1117 | * This will probably be optimized away when | ||
1118 | * CONFIG_DEBUG_FS is not enabled | ||
1119 | */ | ||
1120 | switch (MIPSInst_OPCODE(inst)) { | ||
1121 | case beql_op: | ||
1122 | MIPS_R2BR_STATS(beql); | ||
1123 | break; | ||
1124 | case bnel_op: | ||
1125 | MIPS_R2BR_STATS(bnel); | ||
1126 | break; | ||
1127 | case blezl_op: | ||
1128 | MIPS_R2BR_STATS(blezl); | ||
1129 | break; | ||
1130 | case bgtzl_op: | ||
1131 | MIPS_R2BR_STATS(bgtzl); | ||
1132 | break; | ||
1133 | } | ||
1134 | |||
1135 | switch (MIPSInst_OPCODE(nir)) { | ||
1136 | case cop1_op: | ||
1137 | case cop1x_op: | ||
1138 | case lwc1_op: | ||
1139 | case swc1_op: | ||
1140 | regs->cp0_cause |= CAUSEF_BD; | ||
1141 | goto fpu_emul; | ||
1142 | } | ||
1143 | if (nir) { | ||
1144 | err = mipsr6_emul(regs, nir); | ||
1145 | if (err > 0) { | ||
1146 | err = mips_dsemul(regs, nir, cpc); | ||
1147 | if (err == SIGILL) | ||
1148 | err = SIGEMT; | ||
1149 | MIPS_R2_STATS(dsemul); | ||
1150 | } | ||
1151 | } | ||
1152 | break; | ||
1153 | case lwc1_op: | ||
1154 | case swc1_op: | ||
1155 | case cop1_op: | ||
1156 | case cop1x_op: | ||
1157 | fpu_emul: | ||
1158 | regs->regs[31] = r31; | ||
1159 | regs->cp0_epc = epc; | ||
1160 | if (!used_math()) { /* First time FPU user. */ | ||
1161 | err = init_fpu(); | ||
1162 | set_used_math(); | ||
1163 | } | ||
1164 | lose_fpu(1); /* Save FPU state for the emulator. */ | ||
1165 | |||
1166 | err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, | ||
1167 | &fault_addr); | ||
1168 | |||
1169 | /* | ||
1170 | * this is a tricky issue - lose_fpu() uses LL/SC atomics | ||
1171 | * if FPU is owned and effectively cancels user level LL/SC. | ||
1172 | * So, it could be logical to don't restore FPU ownership here. | ||
1173 | * But the sequence of multiple FPU instructions is much much | ||
1174 | * more often than LL-FPU-SC and I prefer loop here until | ||
1175 | * next scheduler cycle cancels FPU ownership | ||
1176 | */ | ||
1177 | own_fpu(1); /* Restore FPU state. */ | ||
1178 | |||
1179 | if (err) | ||
1180 | current->thread.cp0_baduaddr = (unsigned long)fault_addr; | ||
1181 | |||
1182 | MIPS_R2_STATS(fpus); | ||
1183 | |||
1184 | break; | ||
1185 | |||
1186 | case lwl_op: | ||
1187 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1188 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1189 | if (!access_ok(VERIFY_READ, vaddr, 4)) { | ||
1190 | current->thread.cp0_baduaddr = vaddr; | ||
1191 | err = SIGSEGV; | ||
1192 | break; | ||
1193 | } | ||
1194 | __asm__ __volatile__( | ||
1195 | " .set push\n" | ||
1196 | " .set reorder\n" | ||
1197 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1198 | "1:" LB "%1, 0(%2)\n" | ||
1199 | INS "%0, %1, 24, 8\n" | ||
1200 | " andi %1, %2, 0x3\n" | ||
1201 | " beq $0, %1, 9f\n" | ||
1202 | ADDIU "%2, %2, -1\n" | ||
1203 | "2:" LB "%1, 0(%2)\n" | ||
1204 | INS "%0, %1, 16, 8\n" | ||
1205 | " andi %1, %2, 0x3\n" | ||
1206 | " beq $0, %1, 9f\n" | ||
1207 | ADDIU "%2, %2, -1\n" | ||
1208 | "3:" LB "%1, 0(%2)\n" | ||
1209 | INS "%0, %1, 8, 8\n" | ||
1210 | " andi %1, %2, 0x3\n" | ||
1211 | " beq $0, %1, 9f\n" | ||
1212 | ADDIU "%2, %2, -1\n" | ||
1213 | "4:" LB "%1, 0(%2)\n" | ||
1214 | INS "%0, %1, 0, 8\n" | ||
1215 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1216 | "1:" LB "%1, 0(%2)\n" | ||
1217 | INS "%0, %1, 24, 8\n" | ||
1218 | ADDIU "%2, %2, 1\n" | ||
1219 | " andi %1, %2, 0x3\n" | ||
1220 | " beq $0, %1, 9f\n" | ||
1221 | "2:" LB "%1, 0(%2)\n" | ||
1222 | INS "%0, %1, 16, 8\n" | ||
1223 | ADDIU "%2, %2, 1\n" | ||
1224 | " andi %1, %2, 0x3\n" | ||
1225 | " beq $0, %1, 9f\n" | ||
1226 | "3:" LB "%1, 0(%2)\n" | ||
1227 | INS "%0, %1, 8, 8\n" | ||
1228 | ADDIU "%2, %2, 1\n" | ||
1229 | " andi %1, %2, 0x3\n" | ||
1230 | " beq $0, %1, 9f\n" | ||
1231 | "4:" LB "%1, 0(%2)\n" | ||
1232 | INS "%0, %1, 0, 8\n" | ||
1233 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1234 | "9: sll %0, %0, 0\n" | ||
1235 | "10:\n" | ||
1236 | " .insn\n" | ||
1237 | " .section .fixup,\"ax\"\n" | ||
1238 | "8: li %3,%4\n" | ||
1239 | " j 10b\n" | ||
1240 | " .previous\n" | ||
1241 | " .section __ex_table,\"a\"\n" | ||
1242 | " .word 1b,8b\n" | ||
1243 | " .word 2b,8b\n" | ||
1244 | " .word 3b,8b\n" | ||
1245 | " .word 4b,8b\n" | ||
1246 | " .previous\n" | ||
1247 | " .set pop\n" | ||
1248 | : "+&r"(rt), "=&r"(rs), | ||
1249 | "+&r"(vaddr), "+&r"(err) | ||
1250 | : "i"(SIGSEGV)); | ||
1251 | |||
1252 | if (MIPSInst_RT(inst) && !err) | ||
1253 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1254 | |||
1255 | MIPS_R2_STATS(loads); | ||
1256 | |||
1257 | break; | ||
1258 | |||
1259 | case lwr_op: | ||
1260 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1261 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1262 | if (!access_ok(VERIFY_READ, vaddr, 4)) { | ||
1263 | current->thread.cp0_baduaddr = vaddr; | ||
1264 | err = SIGSEGV; | ||
1265 | break; | ||
1266 | } | ||
1267 | __asm__ __volatile__( | ||
1268 | " .set push\n" | ||
1269 | " .set reorder\n" | ||
1270 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1271 | "1:" LB "%1, 0(%2)\n" | ||
1272 | INS "%0, %1, 0, 8\n" | ||
1273 | ADDIU "%2, %2, 1\n" | ||
1274 | " andi %1, %2, 0x3\n" | ||
1275 | " beq $0, %1, 9f\n" | ||
1276 | "2:" LB "%1, 0(%2)\n" | ||
1277 | INS "%0, %1, 8, 8\n" | ||
1278 | ADDIU "%2, %2, 1\n" | ||
1279 | " andi %1, %2, 0x3\n" | ||
1280 | " beq $0, %1, 9f\n" | ||
1281 | "3:" LB "%1, 0(%2)\n" | ||
1282 | INS "%0, %1, 16, 8\n" | ||
1283 | ADDIU "%2, %2, 1\n" | ||
1284 | " andi %1, %2, 0x3\n" | ||
1285 | " beq $0, %1, 9f\n" | ||
1286 | "4:" LB "%1, 0(%2)\n" | ||
1287 | INS "%0, %1, 24, 8\n" | ||
1288 | " sll %0, %0, 0\n" | ||
1289 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1290 | "1:" LB "%1, 0(%2)\n" | ||
1291 | INS "%0, %1, 0, 8\n" | ||
1292 | " andi %1, %2, 0x3\n" | ||
1293 | " beq $0, %1, 9f\n" | ||
1294 | ADDIU "%2, %2, -1\n" | ||
1295 | "2:" LB "%1, 0(%2)\n" | ||
1296 | INS "%0, %1, 8, 8\n" | ||
1297 | " andi %1, %2, 0x3\n" | ||
1298 | " beq $0, %1, 9f\n" | ||
1299 | ADDIU "%2, %2, -1\n" | ||
1300 | "3:" LB "%1, 0(%2)\n" | ||
1301 | INS "%0, %1, 16, 8\n" | ||
1302 | " andi %1, %2, 0x3\n" | ||
1303 | " beq $0, %1, 9f\n" | ||
1304 | ADDIU "%2, %2, -1\n" | ||
1305 | "4:" LB "%1, 0(%2)\n" | ||
1306 | INS "%0, %1, 24, 8\n" | ||
1307 | " sll %0, %0, 0\n" | ||
1308 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1309 | "9:\n" | ||
1310 | "10:\n" | ||
1311 | " .insn\n" | ||
1312 | " .section .fixup,\"ax\"\n" | ||
1313 | "8: li %3,%4\n" | ||
1314 | " j 10b\n" | ||
1315 | " .previous\n" | ||
1316 | " .section __ex_table,\"a\"\n" | ||
1317 | " .word 1b,8b\n" | ||
1318 | " .word 2b,8b\n" | ||
1319 | " .word 3b,8b\n" | ||
1320 | " .word 4b,8b\n" | ||
1321 | " .previous\n" | ||
1322 | " .set pop\n" | ||
1323 | : "+&r"(rt), "=&r"(rs), | ||
1324 | "+&r"(vaddr), "+&r"(err) | ||
1325 | : "i"(SIGSEGV)); | ||
1326 | if (MIPSInst_RT(inst) && !err) | ||
1327 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1328 | |||
1329 | MIPS_R2_STATS(loads); | ||
1330 | |||
1331 | break; | ||
1332 | |||
1333 | case swl_op: | ||
1334 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1335 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1336 | if (!access_ok(VERIFY_WRITE, vaddr, 4)) { | ||
1337 | current->thread.cp0_baduaddr = vaddr; | ||
1338 | err = SIGSEGV; | ||
1339 | break; | ||
1340 | } | ||
1341 | __asm__ __volatile__( | ||
1342 | " .set push\n" | ||
1343 | " .set reorder\n" | ||
1344 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1345 | EXT "%1, %0, 24, 8\n" | ||
1346 | "1:" SB "%1, 0(%2)\n" | ||
1347 | " andi %1, %2, 0x3\n" | ||
1348 | " beq $0, %1, 9f\n" | ||
1349 | ADDIU "%2, %2, -1\n" | ||
1350 | EXT "%1, %0, 16, 8\n" | ||
1351 | "2:" SB "%1, 0(%2)\n" | ||
1352 | " andi %1, %2, 0x3\n" | ||
1353 | " beq $0, %1, 9f\n" | ||
1354 | ADDIU "%2, %2, -1\n" | ||
1355 | EXT "%1, %0, 8, 8\n" | ||
1356 | "3:" SB "%1, 0(%2)\n" | ||
1357 | " andi %1, %2, 0x3\n" | ||
1358 | " beq $0, %1, 9f\n" | ||
1359 | ADDIU "%2, %2, -1\n" | ||
1360 | EXT "%1, %0, 0, 8\n" | ||
1361 | "4:" SB "%1, 0(%2)\n" | ||
1362 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1363 | EXT "%1, %0, 24, 8\n" | ||
1364 | "1:" SB "%1, 0(%2)\n" | ||
1365 | ADDIU "%2, %2, 1\n" | ||
1366 | " andi %1, %2, 0x3\n" | ||
1367 | " beq $0, %1, 9f\n" | ||
1368 | EXT "%1, %0, 16, 8\n" | ||
1369 | "2:" SB "%1, 0(%2)\n" | ||
1370 | ADDIU "%2, %2, 1\n" | ||
1371 | " andi %1, %2, 0x3\n" | ||
1372 | " beq $0, %1, 9f\n" | ||
1373 | EXT "%1, %0, 8, 8\n" | ||
1374 | "3:" SB "%1, 0(%2)\n" | ||
1375 | ADDIU "%2, %2, 1\n" | ||
1376 | " andi %1, %2, 0x3\n" | ||
1377 | " beq $0, %1, 9f\n" | ||
1378 | EXT "%1, %0, 0, 8\n" | ||
1379 | "4:" SB "%1, 0(%2)\n" | ||
1380 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1381 | "9:\n" | ||
1382 | " .insn\n" | ||
1383 | " .section .fixup,\"ax\"\n" | ||
1384 | "8: li %3,%4\n" | ||
1385 | " j 9b\n" | ||
1386 | " .previous\n" | ||
1387 | " .section __ex_table,\"a\"\n" | ||
1388 | " .word 1b,8b\n" | ||
1389 | " .word 2b,8b\n" | ||
1390 | " .word 3b,8b\n" | ||
1391 | " .word 4b,8b\n" | ||
1392 | " .previous\n" | ||
1393 | " .set pop\n" | ||
1394 | : "+&r"(rt), "=&r"(rs), | ||
1395 | "+&r"(vaddr), "+&r"(err) | ||
1396 | : "i"(SIGSEGV) | ||
1397 | : "memory"); | ||
1398 | |||
1399 | MIPS_R2_STATS(stores); | ||
1400 | |||
1401 | break; | ||
1402 | |||
1403 | case swr_op: | ||
1404 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1405 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1406 | if (!access_ok(VERIFY_WRITE, vaddr, 4)) { | ||
1407 | current->thread.cp0_baduaddr = vaddr; | ||
1408 | err = SIGSEGV; | ||
1409 | break; | ||
1410 | } | ||
1411 | __asm__ __volatile__( | ||
1412 | " .set push\n" | ||
1413 | " .set reorder\n" | ||
1414 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1415 | EXT "%1, %0, 0, 8\n" | ||
1416 | "1:" SB "%1, 0(%2)\n" | ||
1417 | ADDIU "%2, %2, 1\n" | ||
1418 | " andi %1, %2, 0x3\n" | ||
1419 | " beq $0, %1, 9f\n" | ||
1420 | EXT "%1, %0, 8, 8\n" | ||
1421 | "2:" SB "%1, 0(%2)\n" | ||
1422 | ADDIU "%2, %2, 1\n" | ||
1423 | " andi %1, %2, 0x3\n" | ||
1424 | " beq $0, %1, 9f\n" | ||
1425 | EXT "%1, %0, 16, 8\n" | ||
1426 | "3:" SB "%1, 0(%2)\n" | ||
1427 | ADDIU "%2, %2, 1\n" | ||
1428 | " andi %1, %2, 0x3\n" | ||
1429 | " beq $0, %1, 9f\n" | ||
1430 | EXT "%1, %0, 24, 8\n" | ||
1431 | "4:" SB "%1, 0(%2)\n" | ||
1432 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1433 | EXT "%1, %0, 0, 8\n" | ||
1434 | "1:" SB "%1, 0(%2)\n" | ||
1435 | " andi %1, %2, 0x3\n" | ||
1436 | " beq $0, %1, 9f\n" | ||
1437 | ADDIU "%2, %2, -1\n" | ||
1438 | EXT "%1, %0, 8, 8\n" | ||
1439 | "2:" SB "%1, 0(%2)\n" | ||
1440 | " andi %1, %2, 0x3\n" | ||
1441 | " beq $0, %1, 9f\n" | ||
1442 | ADDIU "%2, %2, -1\n" | ||
1443 | EXT "%1, %0, 16, 8\n" | ||
1444 | "3:" SB "%1, 0(%2)\n" | ||
1445 | " andi %1, %2, 0x3\n" | ||
1446 | " beq $0, %1, 9f\n" | ||
1447 | ADDIU "%2, %2, -1\n" | ||
1448 | EXT "%1, %0, 24, 8\n" | ||
1449 | "4:" SB "%1, 0(%2)\n" | ||
1450 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1451 | "9:\n" | ||
1452 | " .insn\n" | ||
1453 | " .section .fixup,\"ax\"\n" | ||
1454 | "8: li %3,%4\n" | ||
1455 | " j 9b\n" | ||
1456 | " .previous\n" | ||
1457 | " .section __ex_table,\"a\"\n" | ||
1458 | " .word 1b,8b\n" | ||
1459 | " .word 2b,8b\n" | ||
1460 | " .word 3b,8b\n" | ||
1461 | " .word 4b,8b\n" | ||
1462 | " .previous\n" | ||
1463 | " .set pop\n" | ||
1464 | : "+&r"(rt), "=&r"(rs), | ||
1465 | "+&r"(vaddr), "+&r"(err) | ||
1466 | : "i"(SIGSEGV) | ||
1467 | : "memory"); | ||
1468 | |||
1469 | MIPS_R2_STATS(stores); | ||
1470 | |||
1471 | break; | ||
1472 | |||
1473 | case ldl_op: | ||
1474 | if (config_enabled(CONFIG_32BIT)) { | ||
1475 | err = SIGILL; | ||
1476 | break; | ||
1477 | } | ||
1478 | |||
1479 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1480 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1481 | if (!access_ok(VERIFY_READ, vaddr, 8)) { | ||
1482 | current->thread.cp0_baduaddr = vaddr; | ||
1483 | err = SIGSEGV; | ||
1484 | break; | ||
1485 | } | ||
1486 | __asm__ __volatile__( | ||
1487 | " .set push\n" | ||
1488 | " .set reorder\n" | ||
1489 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1490 | "1: lb %1, 0(%2)\n" | ||
1491 | " dinsu %0, %1, 56, 8\n" | ||
1492 | " andi %1, %2, 0x7\n" | ||
1493 | " beq $0, %1, 9f\n" | ||
1494 | " daddiu %2, %2, -1\n" | ||
1495 | "2: lb %1, 0(%2)\n" | ||
1496 | " dinsu %0, %1, 48, 8\n" | ||
1497 | " andi %1, %2, 0x7\n" | ||
1498 | " beq $0, %1, 9f\n" | ||
1499 | " daddiu %2, %2, -1\n" | ||
1500 | "3: lb %1, 0(%2)\n" | ||
1501 | " dinsu %0, %1, 40, 8\n" | ||
1502 | " andi %1, %2, 0x7\n" | ||
1503 | " beq $0, %1, 9f\n" | ||
1504 | " daddiu %2, %2, -1\n" | ||
1505 | "4: lb %1, 0(%2)\n" | ||
1506 | " dinsu %0, %1, 32, 8\n" | ||
1507 | " andi %1, %2, 0x7\n" | ||
1508 | " beq $0, %1, 9f\n" | ||
1509 | " daddiu %2, %2, -1\n" | ||
1510 | "5: lb %1, 0(%2)\n" | ||
1511 | " dins %0, %1, 24, 8\n" | ||
1512 | " andi %1, %2, 0x7\n" | ||
1513 | " beq $0, %1, 9f\n" | ||
1514 | " daddiu %2, %2, -1\n" | ||
1515 | "6: lb %1, 0(%2)\n" | ||
1516 | " dins %0, %1, 16, 8\n" | ||
1517 | " andi %1, %2, 0x7\n" | ||
1518 | " beq $0, %1, 9f\n" | ||
1519 | " daddiu %2, %2, -1\n" | ||
1520 | "7: lb %1, 0(%2)\n" | ||
1521 | " dins %0, %1, 8, 8\n" | ||
1522 | " andi %1, %2, 0x7\n" | ||
1523 | " beq $0, %1, 9f\n" | ||
1524 | " daddiu %2, %2, -1\n" | ||
1525 | "0: lb %1, 0(%2)\n" | ||
1526 | " dins %0, %1, 0, 8\n" | ||
1527 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1528 | "1: lb %1, 0(%2)\n" | ||
1529 | " dinsu %0, %1, 56, 8\n" | ||
1530 | " daddiu %2, %2, 1\n" | ||
1531 | " andi %1, %2, 0x7\n" | ||
1532 | " beq $0, %1, 9f\n" | ||
1533 | "2: lb %1, 0(%2)\n" | ||
1534 | " dinsu %0, %1, 48, 8\n" | ||
1535 | " daddiu %2, %2, 1\n" | ||
1536 | " andi %1, %2, 0x7\n" | ||
1537 | " beq $0, %1, 9f\n" | ||
1538 | "3: lb %1, 0(%2)\n" | ||
1539 | " dinsu %0, %1, 40, 8\n" | ||
1540 | " daddiu %2, %2, 1\n" | ||
1541 | " andi %1, %2, 0x7\n" | ||
1542 | " beq $0, %1, 9f\n" | ||
1543 | "4: lb %1, 0(%2)\n" | ||
1544 | " dinsu %0, %1, 32, 8\n" | ||
1545 | " daddiu %2, %2, 1\n" | ||
1546 | " andi %1, %2, 0x7\n" | ||
1547 | " beq $0, %1, 9f\n" | ||
1548 | "5: lb %1, 0(%2)\n" | ||
1549 | " dins %0, %1, 24, 8\n" | ||
1550 | " daddiu %2, %2, 1\n" | ||
1551 | " andi %1, %2, 0x7\n" | ||
1552 | " beq $0, %1, 9f\n" | ||
1553 | "6: lb %1, 0(%2)\n" | ||
1554 | " dins %0, %1, 16, 8\n" | ||
1555 | " daddiu %2, %2, 1\n" | ||
1556 | " andi %1, %2, 0x7\n" | ||
1557 | " beq $0, %1, 9f\n" | ||
1558 | "7: lb %1, 0(%2)\n" | ||
1559 | " dins %0, %1, 8, 8\n" | ||
1560 | " daddiu %2, %2, 1\n" | ||
1561 | " andi %1, %2, 0x7\n" | ||
1562 | " beq $0, %1, 9f\n" | ||
1563 | "0: lb %1, 0(%2)\n" | ||
1564 | " dins %0, %1, 0, 8\n" | ||
1565 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1566 | "9:\n" | ||
1567 | " .insn\n" | ||
1568 | " .section .fixup,\"ax\"\n" | ||
1569 | "8: li %3,%4\n" | ||
1570 | " j 9b\n" | ||
1571 | " .previous\n" | ||
1572 | " .section __ex_table,\"a\"\n" | ||
1573 | " .word 1b,8b\n" | ||
1574 | " .word 2b,8b\n" | ||
1575 | " .word 3b,8b\n" | ||
1576 | " .word 4b,8b\n" | ||
1577 | " .word 5b,8b\n" | ||
1578 | " .word 6b,8b\n" | ||
1579 | " .word 7b,8b\n" | ||
1580 | " .word 0b,8b\n" | ||
1581 | " .previous\n" | ||
1582 | " .set pop\n" | ||
1583 | : "+&r"(rt), "=&r"(rs), | ||
1584 | "+&r"(vaddr), "+&r"(err) | ||
1585 | : "i"(SIGSEGV)); | ||
1586 | if (MIPSInst_RT(inst) && !err) | ||
1587 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1588 | |||
1589 | MIPS_R2_STATS(loads); | ||
1590 | break; | ||
1591 | |||
1592 | case ldr_op: | ||
1593 | if (config_enabled(CONFIG_32BIT)) { | ||
1594 | err = SIGILL; | ||
1595 | break; | ||
1596 | } | ||
1597 | |||
1598 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1599 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1600 | if (!access_ok(VERIFY_READ, vaddr, 8)) { | ||
1601 | current->thread.cp0_baduaddr = vaddr; | ||
1602 | err = SIGSEGV; | ||
1603 | break; | ||
1604 | } | ||
1605 | __asm__ __volatile__( | ||
1606 | " .set push\n" | ||
1607 | " .set reorder\n" | ||
1608 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1609 | "1: lb %1, 0(%2)\n" | ||
1610 | " dins %0, %1, 0, 8\n" | ||
1611 | " daddiu %2, %2, 1\n" | ||
1612 | " andi %1, %2, 0x7\n" | ||
1613 | " beq $0, %1, 9f\n" | ||
1614 | "2: lb %1, 0(%2)\n" | ||
1615 | " dins %0, %1, 8, 8\n" | ||
1616 | " daddiu %2, %2, 1\n" | ||
1617 | " andi %1, %2, 0x7\n" | ||
1618 | " beq $0, %1, 9f\n" | ||
1619 | "3: lb %1, 0(%2)\n" | ||
1620 | " dins %0, %1, 16, 8\n" | ||
1621 | " daddiu %2, %2, 1\n" | ||
1622 | " andi %1, %2, 0x7\n" | ||
1623 | " beq $0, %1, 9f\n" | ||
1624 | "4: lb %1, 0(%2)\n" | ||
1625 | " dins %0, %1, 24, 8\n" | ||
1626 | " daddiu %2, %2, 1\n" | ||
1627 | " andi %1, %2, 0x7\n" | ||
1628 | " beq $0, %1, 9f\n" | ||
1629 | "5: lb %1, 0(%2)\n" | ||
1630 | " dinsu %0, %1, 32, 8\n" | ||
1631 | " daddiu %2, %2, 1\n" | ||
1632 | " andi %1, %2, 0x7\n" | ||
1633 | " beq $0, %1, 9f\n" | ||
1634 | "6: lb %1, 0(%2)\n" | ||
1635 | " dinsu %0, %1, 40, 8\n" | ||
1636 | " daddiu %2, %2, 1\n" | ||
1637 | " andi %1, %2, 0x7\n" | ||
1638 | " beq $0, %1, 9f\n" | ||
1639 | "7: lb %1, 0(%2)\n" | ||
1640 | " dinsu %0, %1, 48, 8\n" | ||
1641 | " daddiu %2, %2, 1\n" | ||
1642 | " andi %1, %2, 0x7\n" | ||
1643 | " beq $0, %1, 9f\n" | ||
1644 | "0: lb %1, 0(%2)\n" | ||
1645 | " dinsu %0, %1, 56, 8\n" | ||
1646 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1647 | "1: lb %1, 0(%2)\n" | ||
1648 | " dins %0, %1, 0, 8\n" | ||
1649 | " andi %1, %2, 0x7\n" | ||
1650 | " beq $0, %1, 9f\n" | ||
1651 | " daddiu %2, %2, -1\n" | ||
1652 | "2: lb %1, 0(%2)\n" | ||
1653 | " dins %0, %1, 8, 8\n" | ||
1654 | " andi %1, %2, 0x7\n" | ||
1655 | " beq $0, %1, 9f\n" | ||
1656 | " daddiu %2, %2, -1\n" | ||
1657 | "3: lb %1, 0(%2)\n" | ||
1658 | " dins %0, %1, 16, 8\n" | ||
1659 | " andi %1, %2, 0x7\n" | ||
1660 | " beq $0, %1, 9f\n" | ||
1661 | " daddiu %2, %2, -1\n" | ||
1662 | "4: lb %1, 0(%2)\n" | ||
1663 | " dins %0, %1, 24, 8\n" | ||
1664 | " andi %1, %2, 0x7\n" | ||
1665 | " beq $0, %1, 9f\n" | ||
1666 | " daddiu %2, %2, -1\n" | ||
1667 | "5: lb %1, 0(%2)\n" | ||
1668 | " dinsu %0, %1, 32, 8\n" | ||
1669 | " andi %1, %2, 0x7\n" | ||
1670 | " beq $0, %1, 9f\n" | ||
1671 | " daddiu %2, %2, -1\n" | ||
1672 | "6: lb %1, 0(%2)\n" | ||
1673 | " dinsu %0, %1, 40, 8\n" | ||
1674 | " andi %1, %2, 0x7\n" | ||
1675 | " beq $0, %1, 9f\n" | ||
1676 | " daddiu %2, %2, -1\n" | ||
1677 | "7: lb %1, 0(%2)\n" | ||
1678 | " dinsu %0, %1, 48, 8\n" | ||
1679 | " andi %1, %2, 0x7\n" | ||
1680 | " beq $0, %1, 9f\n" | ||
1681 | " daddiu %2, %2, -1\n" | ||
1682 | "0: lb %1, 0(%2)\n" | ||
1683 | " dinsu %0, %1, 56, 8\n" | ||
1684 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1685 | "9:\n" | ||
1686 | " .insn\n" | ||
1687 | " .section .fixup,\"ax\"\n" | ||
1688 | "8: li %3,%4\n" | ||
1689 | " j 9b\n" | ||
1690 | " .previous\n" | ||
1691 | " .section __ex_table,\"a\"\n" | ||
1692 | " .word 1b,8b\n" | ||
1693 | " .word 2b,8b\n" | ||
1694 | " .word 3b,8b\n" | ||
1695 | " .word 4b,8b\n" | ||
1696 | " .word 5b,8b\n" | ||
1697 | " .word 6b,8b\n" | ||
1698 | " .word 7b,8b\n" | ||
1699 | " .word 0b,8b\n" | ||
1700 | " .previous\n" | ||
1701 | " .set pop\n" | ||
1702 | : "+&r"(rt), "=&r"(rs), | ||
1703 | "+&r"(vaddr), "+&r"(err) | ||
1704 | : "i"(SIGSEGV)); | ||
1705 | if (MIPSInst_RT(inst) && !err) | ||
1706 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1707 | |||
1708 | MIPS_R2_STATS(loads); | ||
1709 | break; | ||
1710 | |||
1711 | case sdl_op: | ||
1712 | if (config_enabled(CONFIG_32BIT)) { | ||
1713 | err = SIGILL; | ||
1714 | break; | ||
1715 | } | ||
1716 | |||
1717 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1718 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1719 | if (!access_ok(VERIFY_WRITE, vaddr, 8)) { | ||
1720 | current->thread.cp0_baduaddr = vaddr; | ||
1721 | err = SIGSEGV; | ||
1722 | break; | ||
1723 | } | ||
1724 | __asm__ __volatile__( | ||
1725 | " .set push\n" | ||
1726 | " .set reorder\n" | ||
1727 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1728 | " dextu %1, %0, 56, 8\n" | ||
1729 | "1: sb %1, 0(%2)\n" | ||
1730 | " andi %1, %2, 0x7\n" | ||
1731 | " beq $0, %1, 9f\n" | ||
1732 | " daddiu %2, %2, -1\n" | ||
1733 | " dextu %1, %0, 48, 8\n" | ||
1734 | "2: sb %1, 0(%2)\n" | ||
1735 | " andi %1, %2, 0x7\n" | ||
1736 | " beq $0, %1, 9f\n" | ||
1737 | " daddiu %2, %2, -1\n" | ||
1738 | " dextu %1, %0, 40, 8\n" | ||
1739 | "3: sb %1, 0(%2)\n" | ||
1740 | " andi %1, %2, 0x7\n" | ||
1741 | " beq $0, %1, 9f\n" | ||
1742 | " daddiu %2, %2, -1\n" | ||
1743 | " dextu %1, %0, 32, 8\n" | ||
1744 | "4: sb %1, 0(%2)\n" | ||
1745 | " andi %1, %2, 0x7\n" | ||
1746 | " beq $0, %1, 9f\n" | ||
1747 | " daddiu %2, %2, -1\n" | ||
1748 | " dext %1, %0, 24, 8\n" | ||
1749 | "5: sb %1, 0(%2)\n" | ||
1750 | " andi %1, %2, 0x7\n" | ||
1751 | " beq $0, %1, 9f\n" | ||
1752 | " daddiu %2, %2, -1\n" | ||
1753 | " dext %1, %0, 16, 8\n" | ||
1754 | "6: sb %1, 0(%2)\n" | ||
1755 | " andi %1, %2, 0x7\n" | ||
1756 | " beq $0, %1, 9f\n" | ||
1757 | " daddiu %2, %2, -1\n" | ||
1758 | " dext %1, %0, 8, 8\n" | ||
1759 | "7: sb %1, 0(%2)\n" | ||
1760 | " andi %1, %2, 0x7\n" | ||
1761 | " beq $0, %1, 9f\n" | ||
1762 | " daddiu %2, %2, -1\n" | ||
1763 | " dext %1, %0, 0, 8\n" | ||
1764 | "0: sb %1, 0(%2)\n" | ||
1765 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1766 | " dextu %1, %0, 56, 8\n" | ||
1767 | "1: sb %1, 0(%2)\n" | ||
1768 | " daddiu %2, %2, 1\n" | ||
1769 | " andi %1, %2, 0x7\n" | ||
1770 | " beq $0, %1, 9f\n" | ||
1771 | " dextu %1, %0, 48, 8\n" | ||
1772 | "2: sb %1, 0(%2)\n" | ||
1773 | " daddiu %2, %2, 1\n" | ||
1774 | " andi %1, %2, 0x7\n" | ||
1775 | " beq $0, %1, 9f\n" | ||
1776 | " dextu %1, %0, 40, 8\n" | ||
1777 | "3: sb %1, 0(%2)\n" | ||
1778 | " daddiu %2, %2, 1\n" | ||
1779 | " andi %1, %2, 0x7\n" | ||
1780 | " beq $0, %1, 9f\n" | ||
1781 | " dextu %1, %0, 32, 8\n" | ||
1782 | "4: sb %1, 0(%2)\n" | ||
1783 | " daddiu %2, %2, 1\n" | ||
1784 | " andi %1, %2, 0x7\n" | ||
1785 | " beq $0, %1, 9f\n" | ||
1786 | " dext %1, %0, 24, 8\n" | ||
1787 | "5: sb %1, 0(%2)\n" | ||
1788 | " daddiu %2, %2, 1\n" | ||
1789 | " andi %1, %2, 0x7\n" | ||
1790 | " beq $0, %1, 9f\n" | ||
1791 | " dext %1, %0, 16, 8\n" | ||
1792 | "6: sb %1, 0(%2)\n" | ||
1793 | " daddiu %2, %2, 1\n" | ||
1794 | " andi %1, %2, 0x7\n" | ||
1795 | " beq $0, %1, 9f\n" | ||
1796 | " dext %1, %0, 8, 8\n" | ||
1797 | "7: sb %1, 0(%2)\n" | ||
1798 | " daddiu %2, %2, 1\n" | ||
1799 | " andi %1, %2, 0x7\n" | ||
1800 | " beq $0, %1, 9f\n" | ||
1801 | " dext %1, %0, 0, 8\n" | ||
1802 | "0: sb %1, 0(%2)\n" | ||
1803 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1804 | "9:\n" | ||
1805 | " .insn\n" | ||
1806 | " .section .fixup,\"ax\"\n" | ||
1807 | "8: li %3,%4\n" | ||
1808 | " j 9b\n" | ||
1809 | " .previous\n" | ||
1810 | " .section __ex_table,\"a\"\n" | ||
1811 | " .word 1b,8b\n" | ||
1812 | " .word 2b,8b\n" | ||
1813 | " .word 3b,8b\n" | ||
1814 | " .word 4b,8b\n" | ||
1815 | " .word 5b,8b\n" | ||
1816 | " .word 6b,8b\n" | ||
1817 | " .word 7b,8b\n" | ||
1818 | " .word 0b,8b\n" | ||
1819 | " .previous\n" | ||
1820 | " .set pop\n" | ||
1821 | : "+&r"(rt), "=&r"(rs), | ||
1822 | "+&r"(vaddr), "+&r"(err) | ||
1823 | : "i"(SIGSEGV) | ||
1824 | : "memory"); | ||
1825 | |||
1826 | MIPS_R2_STATS(stores); | ||
1827 | break; | ||
1828 | |||
1829 | case sdr_op: | ||
1830 | if (config_enabled(CONFIG_32BIT)) { | ||
1831 | err = SIGILL; | ||
1832 | break; | ||
1833 | } | ||
1834 | |||
1835 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1836 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1837 | if (!access_ok(VERIFY_WRITE, vaddr, 8)) { | ||
1838 | current->thread.cp0_baduaddr = vaddr; | ||
1839 | err = SIGSEGV; | ||
1840 | break; | ||
1841 | } | ||
1842 | __asm__ __volatile__( | ||
1843 | " .set push\n" | ||
1844 | " .set reorder\n" | ||
1845 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1846 | " dext %1, %0, 0, 8\n" | ||
1847 | "1: sb %1, 0(%2)\n" | ||
1848 | " daddiu %2, %2, 1\n" | ||
1849 | " andi %1, %2, 0x7\n" | ||
1850 | " beq $0, %1, 9f\n" | ||
1851 | " dext %1, %0, 8, 8\n" | ||
1852 | "2: sb %1, 0(%2)\n" | ||
1853 | " daddiu %2, %2, 1\n" | ||
1854 | " andi %1, %2, 0x7\n" | ||
1855 | " beq $0, %1, 9f\n" | ||
1856 | " dext %1, %0, 16, 8\n" | ||
1857 | "3: sb %1, 0(%2)\n" | ||
1858 | " daddiu %2, %2, 1\n" | ||
1859 | " andi %1, %2, 0x7\n" | ||
1860 | " beq $0, %1, 9f\n" | ||
1861 | " dext %1, %0, 24, 8\n" | ||
1862 | "4: sb %1, 0(%2)\n" | ||
1863 | " daddiu %2, %2, 1\n" | ||
1864 | " andi %1, %2, 0x7\n" | ||
1865 | " beq $0, %1, 9f\n" | ||
1866 | " dextu %1, %0, 32, 8\n" | ||
1867 | "5: sb %1, 0(%2)\n" | ||
1868 | " daddiu %2, %2, 1\n" | ||
1869 | " andi %1, %2, 0x7\n" | ||
1870 | " beq $0, %1, 9f\n" | ||
1871 | " dextu %1, %0, 40, 8\n" | ||
1872 | "6: sb %1, 0(%2)\n" | ||
1873 | " daddiu %2, %2, 1\n" | ||
1874 | " andi %1, %2, 0x7\n" | ||
1875 | " beq $0, %1, 9f\n" | ||
1876 | " dextu %1, %0, 48, 8\n" | ||
1877 | "7: sb %1, 0(%2)\n" | ||
1878 | " daddiu %2, %2, 1\n" | ||
1879 | " andi %1, %2, 0x7\n" | ||
1880 | " beq $0, %1, 9f\n" | ||
1881 | " dextu %1, %0, 56, 8\n" | ||
1882 | "0: sb %1, 0(%2)\n" | ||
1883 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1884 | " dext %1, %0, 0, 8\n" | ||
1885 | "1: sb %1, 0(%2)\n" | ||
1886 | " andi %1, %2, 0x7\n" | ||
1887 | " beq $0, %1, 9f\n" | ||
1888 | " daddiu %2, %2, -1\n" | ||
1889 | " dext %1, %0, 8, 8\n" | ||
1890 | "2: sb %1, 0(%2)\n" | ||
1891 | " andi %1, %2, 0x7\n" | ||
1892 | " beq $0, %1, 9f\n" | ||
1893 | " daddiu %2, %2, -1\n" | ||
1894 | " dext %1, %0, 16, 8\n" | ||
1895 | "3: sb %1, 0(%2)\n" | ||
1896 | " andi %1, %2, 0x7\n" | ||
1897 | " beq $0, %1, 9f\n" | ||
1898 | " daddiu %2, %2, -1\n" | ||
1899 | " dext %1, %0, 24, 8\n" | ||
1900 | "4: sb %1, 0(%2)\n" | ||
1901 | " andi %1, %2, 0x7\n" | ||
1902 | " beq $0, %1, 9f\n" | ||
1903 | " daddiu %2, %2, -1\n" | ||
1904 | " dextu %1, %0, 32, 8\n" | ||
1905 | "5: sb %1, 0(%2)\n" | ||
1906 | " andi %1, %2, 0x7\n" | ||
1907 | " beq $0, %1, 9f\n" | ||
1908 | " daddiu %2, %2, -1\n" | ||
1909 | " dextu %1, %0, 40, 8\n" | ||
1910 | "6: sb %1, 0(%2)\n" | ||
1911 | " andi %1, %2, 0x7\n" | ||
1912 | " beq $0, %1, 9f\n" | ||
1913 | " daddiu %2, %2, -1\n" | ||
1914 | " dextu %1, %0, 48, 8\n" | ||
1915 | "7: sb %1, 0(%2)\n" | ||
1916 | " andi %1, %2, 0x7\n" | ||
1917 | " beq $0, %1, 9f\n" | ||
1918 | " daddiu %2, %2, -1\n" | ||
1919 | " dextu %1, %0, 56, 8\n" | ||
1920 | "0: sb %1, 0(%2)\n" | ||
1921 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1922 | "9:\n" | ||
1923 | " .insn\n" | ||
1924 | " .section .fixup,\"ax\"\n" | ||
1925 | "8: li %3,%4\n" | ||
1926 | " j 9b\n" | ||
1927 | " .previous\n" | ||
1928 | " .section __ex_table,\"a\"\n" | ||
1929 | " .word 1b,8b\n" | ||
1930 | " .word 2b,8b\n" | ||
1931 | " .word 3b,8b\n" | ||
1932 | " .word 4b,8b\n" | ||
1933 | " .word 5b,8b\n" | ||
1934 | " .word 6b,8b\n" | ||
1935 | " .word 7b,8b\n" | ||
1936 | " .word 0b,8b\n" | ||
1937 | " .previous\n" | ||
1938 | " .set pop\n" | ||
1939 | : "+&r"(rt), "=&r"(rs), | ||
1940 | "+&r"(vaddr), "+&r"(err) | ||
1941 | : "i"(SIGSEGV) | ||
1942 | : "memory"); | ||
1943 | |||
1944 | MIPS_R2_STATS(stores); | ||
1945 | |||
1946 | break; | ||
1947 | case ll_op: | ||
1948 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1949 | if (vaddr & 0x3) { | ||
1950 | current->thread.cp0_baduaddr = vaddr; | ||
1951 | err = SIGBUS; | ||
1952 | break; | ||
1953 | } | ||
1954 | if (!access_ok(VERIFY_READ, vaddr, 4)) { | ||
1955 | current->thread.cp0_baduaddr = vaddr; | ||
1956 | err = SIGBUS; | ||
1957 | break; | ||
1958 | } | ||
1959 | |||
1960 | if (!cpu_has_rw_llb) { | ||
1961 | /* | ||
1962 | * An LL/SC block can't be safely emulated without | ||
1963 | * a Config5/LLB availability. So it's probably time to | ||
1964 | * kill our process before things get any worse. This is | ||
1965 | * because Config5/LLB allows us to use ERETNC so that | ||
1966 | * the LLAddr/LLB bit is not cleared when we return from | ||
1967 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
1968 | * RI exception so once we emulate them here, we return | ||
1969 | * back to userland with ERETNC. That preserves the | ||
1970 | * LLAddr/LLB so the subsequent SC instruction will | ||
1971 | * succeed preserving the atomic semantics of the LL/SC | ||
1972 | * block. Without that, there is no safe way to emulate | ||
1973 | * an LL/SC block in MIPSR2 userland. | ||
1974 | */ | ||
1975 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
1976 | err = SIGKILL; | ||
1977 | break; | ||
1978 | } | ||
1979 | |||
1980 | __asm__ __volatile__( | ||
1981 | "1:\n" | ||
1982 | "ll %0, 0(%2)\n" | ||
1983 | "2:\n" | ||
1984 | ".insn\n" | ||
1985 | ".section .fixup,\"ax\"\n" | ||
1986 | "3:\n" | ||
1987 | "li %1, %3\n" | ||
1988 | "j 2b\n" | ||
1989 | ".previous\n" | ||
1990 | ".section __ex_table,\"a\"\n" | ||
1991 | ".word 1b, 3b\n" | ||
1992 | ".previous\n" | ||
1993 | : "=&r"(res), "+&r"(err) | ||
1994 | : "r"(vaddr), "i"(SIGSEGV) | ||
1995 | : "memory"); | ||
1996 | |||
1997 | if (MIPSInst_RT(inst) && !err) | ||
1998 | regs->regs[MIPSInst_RT(inst)] = res; | ||
1999 | MIPS_R2_STATS(llsc); | ||
2000 | |||
2001 | break; | ||
2002 | |||
2003 | case sc_op: | ||
2004 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
2005 | if (vaddr & 0x3) { | ||
2006 | current->thread.cp0_baduaddr = vaddr; | ||
2007 | err = SIGBUS; | ||
2008 | break; | ||
2009 | } | ||
2010 | if (!access_ok(VERIFY_WRITE, vaddr, 4)) { | ||
2011 | current->thread.cp0_baduaddr = vaddr; | ||
2012 | err = SIGBUS; | ||
2013 | break; | ||
2014 | } | ||
2015 | |||
2016 | if (!cpu_has_rw_llb) { | ||
2017 | /* | ||
2018 | * An LL/SC block can't be safely emulated without | ||
2019 | * a Config5/LLB availability. So it's probably time to | ||
2020 | * kill our process before things get any worse. This is | ||
2021 | * because Config5/LLB allows us to use ERETNC so that | ||
2022 | * the LLAddr/LLB bit is not cleared when we return from | ||
2023 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
2024 | * RI exception so once we emulate them here, we return | ||
2025 | * back to userland with ERETNC. That preserves the | ||
2026 | * LLAddr/LLB so the subsequent SC instruction will | ||
2027 | * succeed preserving the atomic semantics of the LL/SC | ||
2028 | * block. Without that, there is no safe way to emulate | ||
2029 | * an LL/SC block in MIPSR2 userland. | ||
2030 | */ | ||
2031 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
2032 | err = SIGKILL; | ||
2033 | break; | ||
2034 | } | ||
2035 | |||
2036 | res = regs->regs[MIPSInst_RT(inst)]; | ||
2037 | |||
2038 | __asm__ __volatile__( | ||
2039 | "1:\n" | ||
2040 | "sc %0, 0(%2)\n" | ||
2041 | "2:\n" | ||
2042 | ".insn\n" | ||
2043 | ".section .fixup,\"ax\"\n" | ||
2044 | "3:\n" | ||
2045 | "li %1, %3\n" | ||
2046 | "j 2b\n" | ||
2047 | ".previous\n" | ||
2048 | ".section __ex_table,\"a\"\n" | ||
2049 | ".word 1b, 3b\n" | ||
2050 | ".previous\n" | ||
2051 | : "+&r"(res), "+&r"(err) | ||
2052 | : "r"(vaddr), "i"(SIGSEGV)); | ||
2053 | |||
2054 | if (MIPSInst_RT(inst) && !err) | ||
2055 | regs->regs[MIPSInst_RT(inst)] = res; | ||
2056 | |||
2057 | MIPS_R2_STATS(llsc); | ||
2058 | |||
2059 | break; | ||
2060 | |||
2061 | case lld_op: | ||
2062 | if (config_enabled(CONFIG_32BIT)) { | ||
2063 | err = SIGILL; | ||
2064 | break; | ||
2065 | } | ||
2066 | |||
2067 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
2068 | if (vaddr & 0x7) { | ||
2069 | current->thread.cp0_baduaddr = vaddr; | ||
2070 | err = SIGBUS; | ||
2071 | break; | ||
2072 | } | ||
2073 | if (!access_ok(VERIFY_READ, vaddr, 8)) { | ||
2074 | current->thread.cp0_baduaddr = vaddr; | ||
2075 | err = SIGBUS; | ||
2076 | break; | ||
2077 | } | ||
2078 | |||
2079 | if (!cpu_has_rw_llb) { | ||
2080 | /* | ||
2081 | * An LL/SC block can't be safely emulated without | ||
2082 | * a Config5/LLB availability. So it's probably time to | ||
2083 | * kill our process before things get any worse. This is | ||
2084 | * because Config5/LLB allows us to use ERETNC so that | ||
2085 | * the LLAddr/LLB bit is not cleared when we return from | ||
2086 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
2087 | * RI exception so once we emulate them here, we return | ||
2088 | * back to userland with ERETNC. That preserves the | ||
2089 | * LLAddr/LLB so the subsequent SC instruction will | ||
2090 | * succeed preserving the atomic semantics of the LL/SC | ||
2091 | * block. Without that, there is no safe way to emulate | ||
2092 | * an LL/SC block in MIPSR2 userland. | ||
2093 | */ | ||
2094 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
2095 | err = SIGKILL; | ||
2096 | break; | ||
2097 | } | ||
2098 | |||
2099 | __asm__ __volatile__( | ||
2100 | "1:\n" | ||
2101 | "lld %0, 0(%2)\n" | ||
2102 | "2:\n" | ||
2103 | ".insn\n" | ||
2104 | ".section .fixup,\"ax\"\n" | ||
2105 | "3:\n" | ||
2106 | "li %1, %3\n" | ||
2107 | "j 2b\n" | ||
2108 | ".previous\n" | ||
2109 | ".section __ex_table,\"a\"\n" | ||
2110 | ".word 1b, 3b\n" | ||
2111 | ".previous\n" | ||
2112 | : "=&r"(res), "+&r"(err) | ||
2113 | : "r"(vaddr), "i"(SIGSEGV) | ||
2114 | : "memory"); | ||
2115 | if (MIPSInst_RT(inst) && !err) | ||
2116 | regs->regs[MIPSInst_RT(inst)] = res; | ||
2117 | |||
2118 | MIPS_R2_STATS(llsc); | ||
2119 | |||
2120 | break; | ||
2121 | |||
2122 | case scd_op: | ||
2123 | if (config_enabled(CONFIG_32BIT)) { | ||
2124 | err = SIGILL; | ||
2125 | break; | ||
2126 | } | ||
2127 | |||
2128 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
2129 | if (vaddr & 0x7) { | ||
2130 | current->thread.cp0_baduaddr = vaddr; | ||
2131 | err = SIGBUS; | ||
2132 | break; | ||
2133 | } | ||
2134 | if (!access_ok(VERIFY_WRITE, vaddr, 8)) { | ||
2135 | current->thread.cp0_baduaddr = vaddr; | ||
2136 | err = SIGBUS; | ||
2137 | break; | ||
2138 | } | ||
2139 | |||
2140 | if (!cpu_has_rw_llb) { | ||
2141 | /* | ||
2142 | * An LL/SC block can't be safely emulated without | ||
2143 | * a Config5/LLB availability. So it's probably time to | ||
2144 | * kill our process before things get any worse. This is | ||
2145 | * because Config5/LLB allows us to use ERETNC so that | ||
2146 | * the LLAddr/LLB bit is not cleared when we return from | ||
2147 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
2148 | * RI exception so once we emulate them here, we return | ||
2149 | * back to userland with ERETNC. That preserves the | ||
2150 | * LLAddr/LLB so the subsequent SC instruction will | ||
2151 | * succeed preserving the atomic semantics of the LL/SC | ||
2152 | * block. Without that, there is no safe way to emulate | ||
2153 | * an LL/SC block in MIPSR2 userland. | ||
2154 | */ | ||
2155 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
2156 | err = SIGKILL; | ||
2157 | break; | ||
2158 | } | ||
2159 | |||
2160 | res = regs->regs[MIPSInst_RT(inst)]; | ||
2161 | |||
2162 | __asm__ __volatile__( | ||
2163 | "1:\n" | ||
2164 | "scd %0, 0(%2)\n" | ||
2165 | "2:\n" | ||
2166 | ".insn\n" | ||
2167 | ".section .fixup,\"ax\"\n" | ||
2168 | "3:\n" | ||
2169 | "li %1, %3\n" | ||
2170 | "j 2b\n" | ||
2171 | ".previous\n" | ||
2172 | ".section __ex_table,\"a\"\n" | ||
2173 | ".word 1b, 3b\n" | ||
2174 | ".previous\n" | ||
2175 | : "+&r"(res), "+&r"(err) | ||
2176 | : "r"(vaddr), "i"(SIGSEGV)); | ||
2177 | |||
2178 | if (MIPSInst_RT(inst) && !err) | ||
2179 | regs->regs[MIPSInst_RT(inst)] = res; | ||
2180 | |||
2181 | MIPS_R2_STATS(llsc); | ||
2182 | |||
2183 | break; | ||
2184 | case pref_op: | ||
2185 | /* skip it */ | ||
2186 | break; | ||
2187 | default: | ||
2188 | err = SIGILL; | ||
2189 | } | ||
2190 | |||
2191 | /* | ||
2192 | * Lets not return to userland just yet. It's constly and | ||
2193 | * it's likely we have more R2 instructions to emulate | ||
2194 | */ | ||
2195 | if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { | ||
2196 | regs->cp0_cause &= ~CAUSEF_BD; | ||
2197 | err = get_user(inst, (u32 __user *)regs->cp0_epc); | ||
2198 | if (!err) | ||
2199 | goto repeat; | ||
2200 | |||
2201 | if (err < 0) | ||
2202 | err = SIGSEGV; | ||
2203 | } | ||
2204 | |||
2205 | if (err && (err != SIGEMT)) { | ||
2206 | regs->regs[31] = r31; | ||
2207 | regs->cp0_epc = epc; | ||
2208 | } | ||
2209 | |||
2210 | /* Likely a MIPS R6 compatible instruction */ | ||
2211 | if (pass && (err == SIGILL)) | ||
2212 | err = 0; | ||
2213 | |||
2214 | return err; | ||
2215 | } | ||
2216 | |||
2217 | #ifdef CONFIG_DEBUG_FS | ||
2218 | |||
2219 | static int mipsr2_stats_show(struct seq_file *s, void *unused) | ||
2220 | { | ||
2221 | |||
2222 | seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); | ||
2223 | seq_printf(s, "movs\t\t%ld\t%ld\n", | ||
2224 | (unsigned long)__this_cpu_read(mipsr2emustats.movs), | ||
2225 | (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); | ||
2226 | seq_printf(s, "hilo\t\t%ld\t%ld\n", | ||
2227 | (unsigned long)__this_cpu_read(mipsr2emustats.hilo), | ||
2228 | (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); | ||
2229 | seq_printf(s, "muls\t\t%ld\t%ld\n", | ||
2230 | (unsigned long)__this_cpu_read(mipsr2emustats.muls), | ||
2231 | (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); | ||
2232 | seq_printf(s, "divs\t\t%ld\t%ld\n", | ||
2233 | (unsigned long)__this_cpu_read(mipsr2emustats.divs), | ||
2234 | (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); | ||
2235 | seq_printf(s, "dsps\t\t%ld\t%ld\n", | ||
2236 | (unsigned long)__this_cpu_read(mipsr2emustats.dsps), | ||
2237 | (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); | ||
2238 | seq_printf(s, "bops\t\t%ld\t%ld\n", | ||
2239 | (unsigned long)__this_cpu_read(mipsr2emustats.bops), | ||
2240 | (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); | ||
2241 | seq_printf(s, "traps\t\t%ld\t%ld\n", | ||
2242 | (unsigned long)__this_cpu_read(mipsr2emustats.traps), | ||
2243 | (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); | ||
2244 | seq_printf(s, "fpus\t\t%ld\t%ld\n", | ||
2245 | (unsigned long)__this_cpu_read(mipsr2emustats.fpus), | ||
2246 | (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); | ||
2247 | seq_printf(s, "loads\t\t%ld\t%ld\n", | ||
2248 | (unsigned long)__this_cpu_read(mipsr2emustats.loads), | ||
2249 | (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); | ||
2250 | seq_printf(s, "stores\t\t%ld\t%ld\n", | ||
2251 | (unsigned long)__this_cpu_read(mipsr2emustats.stores), | ||
2252 | (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); | ||
2253 | seq_printf(s, "llsc\t\t%ld\t%ld\n", | ||
2254 | (unsigned long)__this_cpu_read(mipsr2emustats.llsc), | ||
2255 | (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); | ||
2256 | seq_printf(s, "dsemul\t\t%ld\t%ld\n", | ||
2257 | (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), | ||
2258 | (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); | ||
2259 | seq_printf(s, "jr\t\t%ld\n", | ||
2260 | (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); | ||
2261 | seq_printf(s, "bltzl\t\t%ld\n", | ||
2262 | (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); | ||
2263 | seq_printf(s, "bgezl\t\t%ld\n", | ||
2264 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); | ||
2265 | seq_printf(s, "bltzll\t\t%ld\n", | ||
2266 | (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); | ||
2267 | seq_printf(s, "bgezll\t\t%ld\n", | ||
2268 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); | ||
2269 | seq_printf(s, "bltzal\t\t%ld\n", | ||
2270 | (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); | ||
2271 | seq_printf(s, "bgezal\t\t%ld\n", | ||
2272 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); | ||
2273 | seq_printf(s, "beql\t\t%ld\n", | ||
2274 | (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); | ||
2275 | seq_printf(s, "bnel\t\t%ld\n", | ||
2276 | (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); | ||
2277 | seq_printf(s, "blezl\t\t%ld\n", | ||
2278 | (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); | ||
2279 | seq_printf(s, "bgtzl\t\t%ld\n", | ||
2280 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); | ||
2281 | |||
2282 | return 0; | ||
2283 | } | ||
2284 | |||
2285 | static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) | ||
2286 | { | ||
2287 | mipsr2_stats_show(s, unused); | ||
2288 | |||
2289 | __this_cpu_write((mipsr2emustats).movs, 0); | ||
2290 | __this_cpu_write((mipsr2bdemustats).movs, 0); | ||
2291 | __this_cpu_write((mipsr2emustats).hilo, 0); | ||
2292 | __this_cpu_write((mipsr2bdemustats).hilo, 0); | ||
2293 | __this_cpu_write((mipsr2emustats).muls, 0); | ||
2294 | __this_cpu_write((mipsr2bdemustats).muls, 0); | ||
2295 | __this_cpu_write((mipsr2emustats).divs, 0); | ||
2296 | __this_cpu_write((mipsr2bdemustats).divs, 0); | ||
2297 | __this_cpu_write((mipsr2emustats).dsps, 0); | ||
2298 | __this_cpu_write((mipsr2bdemustats).dsps, 0); | ||
2299 | __this_cpu_write((mipsr2emustats).bops, 0); | ||
2300 | __this_cpu_write((mipsr2bdemustats).bops, 0); | ||
2301 | __this_cpu_write((mipsr2emustats).traps, 0); | ||
2302 | __this_cpu_write((mipsr2bdemustats).traps, 0); | ||
2303 | __this_cpu_write((mipsr2emustats).fpus, 0); | ||
2304 | __this_cpu_write((mipsr2bdemustats).fpus, 0); | ||
2305 | __this_cpu_write((mipsr2emustats).loads, 0); | ||
2306 | __this_cpu_write((mipsr2bdemustats).loads, 0); | ||
2307 | __this_cpu_write((mipsr2emustats).stores, 0); | ||
2308 | __this_cpu_write((mipsr2bdemustats).stores, 0); | ||
2309 | __this_cpu_write((mipsr2emustats).llsc, 0); | ||
2310 | __this_cpu_write((mipsr2bdemustats).llsc, 0); | ||
2311 | __this_cpu_write((mipsr2emustats).dsemul, 0); | ||
2312 | __this_cpu_write((mipsr2bdemustats).dsemul, 0); | ||
2313 | __this_cpu_write((mipsr2bremustats).jrs, 0); | ||
2314 | __this_cpu_write((mipsr2bremustats).bltzl, 0); | ||
2315 | __this_cpu_write((mipsr2bremustats).bgezl, 0); | ||
2316 | __this_cpu_write((mipsr2bremustats).bltzll, 0); | ||
2317 | __this_cpu_write((mipsr2bremustats).bgezll, 0); | ||
2318 | __this_cpu_write((mipsr2bremustats).bltzal, 0); | ||
2319 | __this_cpu_write((mipsr2bremustats).bgezal, 0); | ||
2320 | __this_cpu_write((mipsr2bremustats).beql, 0); | ||
2321 | __this_cpu_write((mipsr2bremustats).bnel, 0); | ||
2322 | __this_cpu_write((mipsr2bremustats).blezl, 0); | ||
2323 | __this_cpu_write((mipsr2bremustats).bgtzl, 0); | ||
2324 | |||
2325 | return 0; | ||
2326 | } | ||
2327 | |||
2328 | static int mipsr2_stats_open(struct inode *inode, struct file *file) | ||
2329 | { | ||
2330 | return single_open(file, mipsr2_stats_show, inode->i_private); | ||
2331 | } | ||
2332 | |||
2333 | static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) | ||
2334 | { | ||
2335 | return single_open(file, mipsr2_stats_clear_show, inode->i_private); | ||
2336 | } | ||
2337 | |||
2338 | static const struct file_operations mipsr2_emul_fops = { | ||
2339 | .open = mipsr2_stats_open, | ||
2340 | .read = seq_read, | ||
2341 | .llseek = seq_lseek, | ||
2342 | .release = single_release, | ||
2343 | }; | ||
2344 | |||
2345 | static const struct file_operations mipsr2_clear_fops = { | ||
2346 | .open = mipsr2_stats_clear_open, | ||
2347 | .read = seq_read, | ||
2348 | .llseek = seq_lseek, | ||
2349 | .release = single_release, | ||
2350 | }; | ||
2351 | |||
2352 | |||
2353 | static int __init mipsr2_init_debugfs(void) | ||
2354 | { | ||
2355 | extern struct dentry *mips_debugfs_dir; | ||
2356 | struct dentry *mipsr2_emul; | ||
2357 | |||
2358 | if (!mips_debugfs_dir) | ||
2359 | return -ENODEV; | ||
2360 | |||
2361 | mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, | ||
2362 | mips_debugfs_dir, NULL, | ||
2363 | &mipsr2_emul_fops); | ||
2364 | if (!mipsr2_emul) | ||
2365 | return -ENOMEM; | ||
2366 | |||
2367 | mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, | ||
2368 | mips_debugfs_dir, NULL, | ||
2369 | &mipsr2_clear_fops); | ||
2370 | if (!mipsr2_emul) | ||
2371 | return -ENOMEM; | ||
2372 | |||
2373 | return 0; | ||
2374 | } | ||
2375 | |||
2376 | device_initcall(mipsr2_init_debugfs); | ||
2377 | |||
2378 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 17eaf0cf760c..291af0b5c482 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c | |||
@@ -14,6 +14,8 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <asm/uaccess.h> | 15 | #include <asm/uaccess.h> |
16 | #include <asm/ftrace.h> | 16 | #include <asm/ftrace.h> |
17 | #include <asm/fpu.h> | ||
18 | #include <asm/msa.h> | ||
17 | 19 | ||
18 | extern void *__bzero(void *__s, size_t __count); | 20 | extern void *__bzero(void *__s, size_t __count); |
19 | extern long __strncpy_from_kernel_nocheck_asm(char *__to, | 21 | extern long __strncpy_from_kernel_nocheck_asm(char *__to, |
@@ -32,6 +34,14 @@ extern long __strnlen_user_nocheck_asm(const char *s); | |||
32 | extern long __strnlen_user_asm(const char *s); | 34 | extern long __strnlen_user_asm(const char *s); |
33 | 35 | ||
34 | /* | 36 | /* |
37 | * Core architecture code | ||
38 | */ | ||
39 | EXPORT_SYMBOL_GPL(_save_fp); | ||
40 | #ifdef CONFIG_CPU_HAS_MSA | ||
41 | EXPORT_SYMBOL_GPL(_save_msa); | ||
42 | #endif | ||
43 | |||
44 | /* | ||
35 | * String functions | 45 | * String functions |
36 | */ | 46 | */ |
37 | EXPORT_SYMBOL(memset); | 47 | EXPORT_SYMBOL(memset); |
@@ -67,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm); | |||
67 | EXPORT_SYMBOL(__strnlen_user_nocheck_asm); | 77 | EXPORT_SYMBOL(__strnlen_user_nocheck_asm); |
68 | EXPORT_SYMBOL(__strnlen_user_asm); | 78 | EXPORT_SYMBOL(__strnlen_user_asm); |
69 | 79 | ||
80 | #ifndef CONFIG_CPU_MIPSR6 | ||
70 | EXPORT_SYMBOL(csum_partial); | 81 | EXPORT_SYMBOL(csum_partial); |
71 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | 82 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
72 | EXPORT_SYMBOL(__csum_partial_copy_kernel); | 83 | EXPORT_SYMBOL(__csum_partial_copy_kernel); |
73 | EXPORT_SYMBOL(__csum_partial_copy_to_user); | 84 | EXPORT_SYMBOL(__csum_partial_copy_to_user); |
74 | EXPORT_SYMBOL(__csum_partial_copy_from_user); | 85 | EXPORT_SYMBOL(__csum_partial_copy_from_user); |
86 | #endif | ||
75 | 87 | ||
76 | EXPORT_SYMBOL(invalid_pte_table); | 88 | EXPORT_SYMBOL(invalid_pte_table); |
77 | #ifdef CONFIG_FUNCTION_TRACER | 89 | #ifdef CONFIG_FUNCTION_TRACER |
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index f6547680c81c..423ae83af1fb 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S | |||
@@ -31,15 +31,11 @@ | |||
31 | /* | 31 | /* |
32 | * check if we need to save FPU registers | 32 | * check if we need to save FPU registers |
33 | */ | 33 | */ |
34 | PTR_L t3, TASK_THREAD_INFO(a0) | 34 | .set push |
35 | LONG_L t0, TI_FLAGS(t3) | 35 | .set noreorder |
36 | li t1, _TIF_USEDFPU | 36 | beqz a3, 1f |
37 | and t2, t0, t1 | 37 | PTR_L t3, TASK_THREAD_INFO(a0) |
38 | beqz t2, 1f | 38 | .set pop |
39 | nor t1, zero, t1 | ||
40 | |||
41 | and t0, t0, t1 | ||
42 | LONG_S t0, TI_FLAGS(t3) | ||
43 | 39 | ||
44 | /* | 40 | /* |
45 | * clear saved user stack CU1 bit | 41 | * clear saved user stack CU1 bit |
@@ -56,36 +52,9 @@ | |||
56 | .set pop | 52 | .set pop |
57 | 1: | 53 | 1: |
58 | 54 | ||
59 | /* check if we need to save COP2 registers */ | ||
60 | PTR_L t2, TASK_THREAD_INFO(a0) | ||
61 | LONG_L t0, ST_OFF(t2) | ||
62 | bbit0 t0, 30, 1f | ||
63 | |||
64 | /* Disable COP2 in the stored process state */ | ||
65 | li t1, ST0_CU2 | ||
66 | xor t0, t1 | ||
67 | LONG_S t0, ST_OFF(t2) | ||
68 | |||
69 | /* Enable COP2 so we can save it */ | ||
70 | mfc0 t0, CP0_STATUS | ||
71 | or t0, t1 | ||
72 | mtc0 t0, CP0_STATUS | ||
73 | |||
74 | /* Save COP2 */ | ||
75 | daddu a0, THREAD_CP2 | ||
76 | jal octeon_cop2_save | ||
77 | dsubu a0, THREAD_CP2 | ||
78 | |||
79 | /* Disable COP2 now that we are done */ | ||
80 | mfc0 t0, CP0_STATUS | ||
81 | li t1, ST0_CU2 | ||
82 | xor t0, t1 | ||
83 | mtc0 t0, CP0_STATUS | ||
84 | |||
85 | 1: | ||
86 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | 55 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 |
87 | /* Check if we need to store CVMSEG state */ | 56 | /* Check if we need to store CVMSEG state */ |
88 | mfc0 t0, $11,7 /* CvmMemCtl */ | 57 | dmfc0 t0, $11,7 /* CvmMemCtl */ |
89 | bbit0 t0, 6, 3f /* Is user access enabled? */ | 58 | bbit0 t0, 6, 3f /* Is user access enabled? */ |
90 | 59 | ||
91 | /* Store the CVMSEG state */ | 60 | /* Store the CVMSEG state */ |
@@ -109,9 +78,9 @@ | |||
109 | .set reorder | 78 | .set reorder |
110 | 79 | ||
111 | /* Disable access to CVMSEG */ | 80 | /* Disable access to CVMSEG */ |
112 | mfc0 t0, $11,7 /* CvmMemCtl */ | 81 | dmfc0 t0, $11,7 /* CvmMemCtl */ |
113 | xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ | 82 | xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ |
114 | mtc0 t0, $11,7 /* CvmMemCtl */ | 83 | dmtc0 t0, $11,7 /* CvmMemCtl */ |
115 | #endif | 84 | #endif |
116 | 3: | 85 | 3: |
117 | 86 | ||
@@ -147,6 +116,8 @@ | |||
147 | * void octeon_cop2_save(struct octeon_cop2_state *a0) | 116 | * void octeon_cop2_save(struct octeon_cop2_state *a0) |
148 | */ | 117 | */ |
149 | .align 7 | 118 | .align 7 |
119 | .set push | ||
120 | .set noreorder | ||
150 | LEAF(octeon_cop2_save) | 121 | LEAF(octeon_cop2_save) |
151 | 122 | ||
152 | dmfc0 t9, $9,7 /* CvmCtl register. */ | 123 | dmfc0 t9, $9,7 /* CvmCtl register. */ |
@@ -157,17 +128,17 @@ | |||
157 | dmfc2 t2, 0x0200 | 128 | dmfc2 t2, 0x0200 |
158 | sd t0, OCTEON_CP2_CRC_IV(a0) | 129 | sd t0, OCTEON_CP2_CRC_IV(a0) |
159 | sd t1, OCTEON_CP2_CRC_LENGTH(a0) | 130 | sd t1, OCTEON_CP2_CRC_LENGTH(a0) |
160 | sd t2, OCTEON_CP2_CRC_POLY(a0) | ||
161 | /* Skip next instructions if CvmCtl[NODFA_CP2] set */ | 131 | /* Skip next instructions if CvmCtl[NODFA_CP2] set */ |
162 | bbit1 t9, 28, 1f | 132 | bbit1 t9, 28, 1f |
133 | sd t2, OCTEON_CP2_CRC_POLY(a0) | ||
163 | 134 | ||
164 | /* Save the LLM state */ | 135 | /* Save the LLM state */ |
165 | dmfc2 t0, 0x0402 | 136 | dmfc2 t0, 0x0402 |
166 | dmfc2 t1, 0x040A | 137 | dmfc2 t1, 0x040A |
167 | sd t0, OCTEON_CP2_LLM_DAT(a0) | 138 | sd t0, OCTEON_CP2_LLM_DAT(a0) |
168 | sd t1, OCTEON_CP2_LLM_DAT+8(a0) | ||
169 | 139 | ||
170 | 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ | 140 | 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ |
141 | sd t1, OCTEON_CP2_LLM_DAT+8(a0) | ||
171 | 142 | ||
172 | /* Save the COP2 crypto state */ | 143 | /* Save the COP2 crypto state */ |
173 | /* this part is mostly common to both pass 1 and later revisions */ | 144 | /* this part is mostly common to both pass 1 and later revisions */ |
@@ -198,18 +169,20 @@ | |||
198 | sd t2, OCTEON_CP2_AES_KEY+16(a0) | 169 | sd t2, OCTEON_CP2_AES_KEY+16(a0) |
199 | dmfc2 t2, 0x0101 | 170 | dmfc2 t2, 0x0101 |
200 | sd t3, OCTEON_CP2_AES_KEY+24(a0) | 171 | sd t3, OCTEON_CP2_AES_KEY+24(a0) |
201 | mfc0 t3, $15,0 /* Get the processor ID register */ | 172 | mfc0 v0, $15,0 /* Get the processor ID register */ |
202 | sd t0, OCTEON_CP2_AES_KEYLEN(a0) | 173 | sd t0, OCTEON_CP2_AES_KEYLEN(a0) |
203 | li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ | 174 | li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ |
204 | sd t1, OCTEON_CP2_AES_RESULT(a0) | 175 | sd t1, OCTEON_CP2_AES_RESULT(a0) |
205 | sd t2, OCTEON_CP2_AES_RESULT+8(a0) | ||
206 | /* Skip to the Pass1 version of the remainder of the COP2 state */ | 176 | /* Skip to the Pass1 version of the remainder of the COP2 state */ |
207 | beq t3, t0, 2f | 177 | beq v0, v1, 2f |
178 | sd t2, OCTEON_CP2_AES_RESULT+8(a0) | ||
208 | 179 | ||
209 | /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ | 180 | /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ |
210 | dmfc2 t1, 0x0240 | 181 | dmfc2 t1, 0x0240 |
211 | dmfc2 t2, 0x0241 | 182 | dmfc2 t2, 0x0241 |
183 | ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ | ||
212 | dmfc2 t3, 0x0242 | 184 | dmfc2 t3, 0x0242 |
185 | subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ | ||
213 | dmfc2 t0, 0x0243 | 186 | dmfc2 t0, 0x0243 |
214 | sd t1, OCTEON_CP2_HSH_DATW(a0) | 187 | sd t1, OCTEON_CP2_HSH_DATW(a0) |
215 | dmfc2 t1, 0x0244 | 188 | dmfc2 t1, 0x0244 |
@@ -262,8 +235,16 @@ | |||
262 | sd t1, OCTEON_CP2_GFM_MULT+8(a0) | 235 | sd t1, OCTEON_CP2_GFM_MULT+8(a0) |
263 | sd t2, OCTEON_CP2_GFM_POLY(a0) | 236 | sd t2, OCTEON_CP2_GFM_POLY(a0) |
264 | sd t3, OCTEON_CP2_GFM_RESULT(a0) | 237 | sd t3, OCTEON_CP2_GFM_RESULT(a0) |
265 | sd t0, OCTEON_CP2_GFM_RESULT+8(a0) | 238 | bltz v1, 4f |
239 | sd t0, OCTEON_CP2_GFM_RESULT+8(a0) | ||
240 | /* OCTEON III things*/ | ||
241 | dmfc2 t0, 0x024F | ||
242 | dmfc2 t1, 0x0050 | ||
243 | sd t0, OCTEON_CP2_SHA3(a0) | ||
244 | sd t1, OCTEON_CP2_SHA3+8(a0) | ||
245 | 4: | ||
266 | jr ra | 246 | jr ra |
247 | nop | ||
267 | 248 | ||
268 | 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ | 249 | 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ |
269 | dmfc2 t3, 0x0040 | 250 | dmfc2 t3, 0x0040 |
@@ -289,7 +270,9 @@ | |||
289 | 270 | ||
290 | 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ | 271 | 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ |
291 | jr ra | 272 | jr ra |
273 | nop | ||
292 | END(octeon_cop2_save) | 274 | END(octeon_cop2_save) |
275 | .set pop | ||
293 | 276 | ||
294 | /* | 277 | /* |
295 | * void octeon_cop2_restore(struct octeon_cop2_state *a0) | 278 | * void octeon_cop2_restore(struct octeon_cop2_state *a0) |
@@ -354,9 +337,9 @@ | |||
354 | ld t2, OCTEON_CP2_AES_RESULT+8(a0) | 337 | ld t2, OCTEON_CP2_AES_RESULT+8(a0) |
355 | mfc0 t3, $15,0 /* Get the processor ID register */ | 338 | mfc0 t3, $15,0 /* Get the processor ID register */ |
356 | dmtc2 t0, 0x0110 | 339 | dmtc2 t0, 0x0110 |
357 | li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ | 340 | li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ |
358 | dmtc2 t1, 0x0100 | 341 | dmtc2 t1, 0x0100 |
359 | bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ | 342 | bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ |
360 | dmtc2 t2, 0x0101 | 343 | dmtc2 t2, 0x0101 |
361 | 344 | ||
362 | /* this code is specific for pass 1 */ | 345 | /* this code is specific for pass 1 */ |
@@ -384,6 +367,7 @@ | |||
384 | 367 | ||
385 | 3: /* this is post-pass1 code */ | 368 | 3: /* this is post-pass1 code */ |
386 | ld t2, OCTEON_CP2_HSH_DATW(a0) | 369 | ld t2, OCTEON_CP2_HSH_DATW(a0) |
370 | ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ | ||
387 | ld t0, OCTEON_CP2_HSH_DATW+8(a0) | 371 | ld t0, OCTEON_CP2_HSH_DATW+8(a0) |
388 | ld t1, OCTEON_CP2_HSH_DATW+16(a0) | 372 | ld t1, OCTEON_CP2_HSH_DATW+16(a0) |
389 | dmtc2 t2, 0x0240 | 373 | dmtc2 t2, 0x0240 |
@@ -437,9 +421,15 @@ | |||
437 | dmtc2 t2, 0x0259 | 421 | dmtc2 t2, 0x0259 |
438 | ld t2, OCTEON_CP2_GFM_RESULT+8(a0) | 422 | ld t2, OCTEON_CP2_GFM_RESULT+8(a0) |
439 | dmtc2 t0, 0x025E | 423 | dmtc2 t0, 0x025E |
424 | subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ | ||
440 | dmtc2 t1, 0x025A | 425 | dmtc2 t1, 0x025A |
441 | dmtc2 t2, 0x025B | 426 | bltz v0, done_restore |
442 | 427 | dmtc2 t2, 0x025B | |
428 | /* OCTEON III things*/ | ||
429 | ld t0, OCTEON_CP2_SHA3(a0) | ||
430 | ld t1, OCTEON_CP2_SHA3+8(a0) | ||
431 | dmtc2 t0, 0x0051 | ||
432 | dmtc2 t1, 0x0050 | ||
443 | done_restore: | 433 | done_restore: |
444 | jr ra | 434 | jr ra |
445 | nop | 435 | nop |
@@ -450,18 +440,23 @@ done_restore: | |||
450 | * void octeon_mult_save() | 440 | * void octeon_mult_save() |
451 | * sp is assumed to point to a struct pt_regs | 441 | * sp is assumed to point to a struct pt_regs |
452 | * | 442 | * |
453 | * NOTE: This is called in SAVE_SOME in stackframe.h. It can only | 443 | * NOTE: This is called in SAVE_TEMP in stackframe.h. It can |
454 | * safely modify k0 and k1. | 444 | * safely modify v1,k0, k1,$10-$15, and $24. It will |
445 | * be overwritten with a processor specific version of the code. | ||
455 | */ | 446 | */ |
456 | .align 7 | 447 | .p2align 7 |
457 | .set push | 448 | .set push |
458 | .set noreorder | 449 | .set noreorder |
459 | LEAF(octeon_mult_save) | 450 | LEAF(octeon_mult_save) |
460 | dmfc0 k0, $9,7 /* CvmCtl register. */ | 451 | jr ra |
461 | bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ | ||
462 | nop | 452 | nop |
453 | .space 30 * 4, 0 | ||
454 | octeon_mult_save_end: | ||
455 | EXPORT(octeon_mult_save_end) | ||
456 | END(octeon_mult_save) | ||
463 | 457 | ||
464 | /* Save the multiplier state */ | 458 | LEAF(octeon_mult_save2) |
459 | /* Save the multiplier state OCTEON II and earlier*/ | ||
465 | v3mulu k0, $0, $0 | 460 | v3mulu k0, $0, $0 |
466 | v3mulu k1, $0, $0 | 461 | v3mulu k1, $0, $0 |
467 | sd k0, PT_MTP(sp) /* PT_MTP has P0 */ | 462 | sd k0, PT_MTP(sp) /* PT_MTP has P0 */ |
@@ -476,44 +471,107 @@ done_restore: | |||
476 | sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ | 471 | sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ |
477 | jr ra | 472 | jr ra |
478 | sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ | 473 | sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ |
479 | 474 | octeon_mult_save2_end: | |
480 | 1: /* Resume here if CvmCtl[NOMUL] */ | 475 | EXPORT(octeon_mult_save2_end) |
476 | END(octeon_mult_save2) | ||
477 | |||
478 | LEAF(octeon_mult_save3) | ||
479 | /* Save the multiplier state OCTEON III */ | ||
480 | v3mulu $10, $0, $0 /* read P0 */ | ||
481 | v3mulu $11, $0, $0 /* read P1 */ | ||
482 | v3mulu $12, $0, $0 /* read P2 */ | ||
483 | sd $10, PT_MTP+(0*8)(sp) /* store P0 */ | ||
484 | v3mulu $10, $0, $0 /* read P3 */ | ||
485 | sd $11, PT_MTP+(1*8)(sp) /* store P1 */ | ||
486 | v3mulu $11, $0, $0 /* read P4 */ | ||
487 | sd $12, PT_MTP+(2*8)(sp) /* store P2 */ | ||
488 | ori $13, $0, 1 | ||
489 | v3mulu $12, $0, $0 /* read P5 */ | ||
490 | sd $10, PT_MTP+(3*8)(sp) /* store P3 */ | ||
491 | v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ | ||
492 | sd $11, PT_MTP+(4*8)(sp) /* store P4 */ | ||
493 | v3mulu $10, $0, $0 /* read MPL1 */ | ||
494 | sd $12, PT_MTP+(5*8)(sp) /* store P5 */ | ||
495 | v3mulu $11, $0, $0 /* read MPL2 */ | ||
496 | sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ | ||
497 | v3mulu $12, $0, $0 /* read MPL3 */ | ||
498 | sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ | ||
499 | v3mulu $10, $0, $0 /* read MPL4 */ | ||
500 | sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ | ||
501 | v3mulu $11, $0, $0 /* read MPL5 */ | ||
502 | sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ | ||
503 | sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ | ||
481 | jr ra | 504 | jr ra |
482 | END(octeon_mult_save) | 505 | sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ |
506 | octeon_mult_save3_end: | ||
507 | EXPORT(octeon_mult_save3_end) | ||
508 | END(octeon_mult_save3) | ||
483 | .set pop | 509 | .set pop |
484 | 510 | ||
485 | /* | 511 | /* |
486 | * void octeon_mult_restore() | 512 | * void octeon_mult_restore() |
487 | * sp is assumed to point to a struct pt_regs | 513 | * sp is assumed to point to a struct pt_regs |
488 | * | 514 | * |
489 | * NOTE: This is called in RESTORE_SOME in stackframe.h. | 515 | * NOTE: This is called in RESTORE_TEMP in stackframe.h. |
490 | */ | 516 | */ |
491 | .align 7 | 517 | .p2align 7 |
492 | .set push | 518 | .set push |
493 | .set noreorder | 519 | .set noreorder |
494 | LEAF(octeon_mult_restore) | 520 | LEAF(octeon_mult_restore) |
495 | dmfc0 k1, $9,7 /* CvmCtl register. */ | 521 | jr ra |
496 | ld v0, PT_MPL(sp) /* MPL0 */ | 522 | nop |
497 | ld v1, PT_MPL+8(sp) /* MPL1 */ | 523 | .space 30 * 4, 0 |
498 | ld k0, PT_MPL+16(sp) /* MPL2 */ | 524 | octeon_mult_restore_end: |
499 | bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ | 525 | EXPORT(octeon_mult_restore_end) |
500 | /* Normally falls through, so no time wasted here */ | 526 | END(octeon_mult_restore) |
501 | nop | ||
502 | 527 | ||
528 | LEAF(octeon_mult_restore2) | ||
529 | ld v0, PT_MPL(sp) /* MPL0 */ | ||
530 | ld v1, PT_MPL+8(sp) /* MPL1 */ | ||
531 | ld k0, PT_MPL+16(sp) /* MPL2 */ | ||
503 | /* Restore the multiplier state */ | 532 | /* Restore the multiplier state */ |
504 | ld k1, PT_MTP+16(sp) /* P2 */ | 533 | ld k1, PT_MTP+16(sp) /* P2 */ |
505 | MTM0 v0 /* MPL0 */ | 534 | mtm0 v0 /* MPL0 */ |
506 | ld v0, PT_MTP+8(sp) /* P1 */ | 535 | ld v0, PT_MTP+8(sp) /* P1 */ |
507 | MTM1 v1 /* MPL1 */ | 536 | mtm1 v1 /* MPL1 */ |
508 | ld v1, PT_MTP(sp) /* P0 */ | 537 | ld v1, PT_MTP(sp) /* P0 */ |
509 | MTM2 k0 /* MPL2 */ | 538 | mtm2 k0 /* MPL2 */ |
510 | MTP2 k1 /* P2 */ | 539 | mtp2 k1 /* P2 */ |
511 | MTP1 v0 /* P1 */ | 540 | mtp1 v0 /* P1 */ |
512 | jr ra | 541 | jr ra |
513 | MTP0 v1 /* P0 */ | 542 | mtp0 v1 /* P0 */ |
514 | 543 | octeon_mult_restore2_end: | |
515 | 1: /* Resume here if CvmCtl[NOMUL] */ | 544 | EXPORT(octeon_mult_restore2_end) |
545 | END(octeon_mult_restore2) | ||
546 | |||
547 | LEAF(octeon_mult_restore3) | ||
548 | ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ | ||
549 | ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ | ||
550 | ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ | ||
551 | ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ | ||
552 | .word 0x718d0008 | ||
553 | /* mtm0 $12, $13 restore MPL0 and MPL3 */ | ||
554 | ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ | ||
555 | .word 0x714b000c | ||
556 | /* mtm1 $10, $11 restore MPL1 and MPL4 */ | ||
557 | ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ | ||
558 | ld $10, PT_MTP+(0*8)(sp) /* read P0 */ | ||
559 | ld $11, PT_MTP+(3*8)(sp) /* read P3 */ | ||
560 | .word 0x718d000d | ||
561 | /* mtm2 $12, $13 restore MPL2 and MPL5 */ | ||
562 | ld $12, PT_MTP+(1*8)(sp) /* read P1 */ | ||
563 | .word 0x714b0009 | ||
564 | /* mtp0 $10, $11 restore P0 and P3 */ | ||
565 | ld $13, PT_MTP+(4*8)(sp) /* read P4 */ | ||
566 | ld $10, PT_MTP+(2*8)(sp) /* read P2 */ | ||
567 | ld $11, PT_MTP+(5*8)(sp) /* read P5 */ | ||
568 | .word 0x718d000a | ||
569 | /* mtp1 $12, $13 restore P1 and P4 */ | ||
516 | jr ra | 570 | jr ra |
517 | nop | 571 | .word 0x714b000b |
518 | END(octeon_mult_restore) | 572 | /* mtp2 $10, $11 restore P2 and P5 */ |
573 | |||
574 | octeon_mult_restore3_end: | ||
575 | EXPORT(octeon_mult_restore3_end) | ||
576 | END(octeon_mult_restore3) | ||
519 | .set pop | 577 | .set pop |
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 097fc8d14e42..130af7d26a9c 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c | |||
@@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
82 | seq_printf(m, "]\n"); | 82 | seq_printf(m, "]\n"); |
83 | } | 83 | } |
84 | 84 | ||
85 | seq_printf(m, "isa\t\t\t: mips1"); | 85 | seq_printf(m, "isa\t\t\t:"); |
86 | if (cpu_has_mips_r1) | ||
87 | seq_printf(m, " mips1"); | ||
86 | if (cpu_has_mips_2) | 88 | if (cpu_has_mips_2) |
87 | seq_printf(m, "%s", " mips2"); | 89 | seq_printf(m, "%s", " mips2"); |
88 | if (cpu_has_mips_3) | 90 | if (cpu_has_mips_3) |
@@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
95 | seq_printf(m, "%s", " mips32r1"); | 97 | seq_printf(m, "%s", " mips32r1"); |
96 | if (cpu_has_mips32r2) | 98 | if (cpu_has_mips32r2) |
97 | seq_printf(m, "%s", " mips32r2"); | 99 | seq_printf(m, "%s", " mips32r2"); |
100 | if (cpu_has_mips32r6) | ||
101 | seq_printf(m, "%s", " mips32r6"); | ||
98 | if (cpu_has_mips64r1) | 102 | if (cpu_has_mips64r1) |
99 | seq_printf(m, "%s", " mips64r1"); | 103 | seq_printf(m, "%s", " mips64r1"); |
100 | if (cpu_has_mips64r2) | 104 | if (cpu_has_mips64r2) |
101 | seq_printf(m, "%s", " mips64r2"); | 105 | seq_printf(m, "%s", " mips64r2"); |
106 | if (cpu_has_mips64r6) | ||
107 | seq_printf(m, "%s", " mips64r6"); | ||
102 | seq_printf(m, "\n"); | 108 | seq_printf(m, "\n"); |
103 | 109 | ||
104 | seq_printf(m, "ASEs implemented\t:"); | 110 | seq_printf(m, "ASEs implemented\t:"); |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 85bff5d513e5..bf85cc180d91 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/completion.h> | 25 | #include <linux/completion.h> |
26 | #include <linux/kallsyms.h> | 26 | #include <linux/kallsyms.h> |
27 | #include <linux/random.h> | 27 | #include <linux/random.h> |
28 | #include <linux/prctl.h> | ||
28 | 29 | ||
29 | #include <asm/asm.h> | 30 | #include <asm/asm.h> |
30 | #include <asm/bootinfo.h> | 31 | #include <asm/bootinfo.h> |
@@ -562,3 +563,98 @@ void arch_trigger_all_cpu_backtrace(bool include_self) | |||
562 | { | 563 | { |
563 | smp_call_function(arch_dump_stack, NULL, 1); | 564 | smp_call_function(arch_dump_stack, NULL, 1); |
564 | } | 565 | } |
566 | |||
567 | int mips_get_process_fp_mode(struct task_struct *task) | ||
568 | { | ||
569 | int value = 0; | ||
570 | |||
571 | if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) | ||
572 | value |= PR_FP_MODE_FR; | ||
573 | if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) | ||
574 | value |= PR_FP_MODE_FRE; | ||
575 | |||
576 | return value; | ||
577 | } | ||
578 | |||
579 | int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) | ||
580 | { | ||
581 | const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; | ||
582 | unsigned long switch_count; | ||
583 | struct task_struct *t; | ||
584 | |||
585 | /* Check the value is valid */ | ||
586 | if (value & ~known_bits) | ||
587 | return -EOPNOTSUPP; | ||
588 | |||
589 | /* Avoid inadvertently triggering emulation */ | ||
590 | if ((value & PR_FP_MODE_FR) && cpu_has_fpu && | ||
591 | !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) | ||
592 | return -EOPNOTSUPP; | ||
593 | if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) | ||
594 | return -EOPNOTSUPP; | ||
595 | |||
596 | /* FR = 0 not supported in MIPS R6 */ | ||
597 | if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) | ||
598 | return -EOPNOTSUPP; | ||
599 | |||
600 | /* Save FP & vector context, then disable FPU & MSA */ | ||
601 | if (task->signal == current->signal) | ||
602 | lose_fpu(1); | ||
603 | |||
604 | /* Prevent any threads from obtaining live FP context */ | ||
605 | atomic_set(&task->mm->context.fp_mode_switching, 1); | ||
606 | smp_mb__after_atomic(); | ||
607 | |||
608 | /* | ||
609 | * If there are multiple online CPUs then wait until all threads whose | ||
610 | * FP mode is about to change have been context switched. This approach | ||
611 | * allows us to only worry about whether an FP mode switch is in | ||
612 | * progress when FP is first used in a tasks time slice. Pretty much all | ||
613 | * of the mode switch overhead can thus be confined to cases where mode | ||
614 | * switches are actually occuring. That is, to here. However for the | ||
615 | * thread performing the mode switch it may take a while... | ||
616 | */ | ||
617 | if (num_online_cpus() > 1) { | ||
618 | spin_lock_irq(&task->sighand->siglock); | ||
619 | |||
620 | for_each_thread(task, t) { | ||
621 | if (t == current) | ||
622 | continue; | ||
623 | |||
624 | switch_count = t->nvcsw + t->nivcsw; | ||
625 | |||
626 | do { | ||
627 | spin_unlock_irq(&task->sighand->siglock); | ||
628 | cond_resched(); | ||
629 | spin_lock_irq(&task->sighand->siglock); | ||
630 | } while ((t->nvcsw + t->nivcsw) == switch_count); | ||
631 | } | ||
632 | |||
633 | spin_unlock_irq(&task->sighand->siglock); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * There are now no threads of the process with live FP context, so it | ||
638 | * is safe to proceed with the FP mode switch. | ||
639 | */ | ||
640 | for_each_thread(task, t) { | ||
641 | /* Update desired FP register width */ | ||
642 | if (value & PR_FP_MODE_FR) { | ||
643 | clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); | ||
644 | } else { | ||
645 | set_tsk_thread_flag(t, TIF_32BIT_FPREGS); | ||
646 | clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); | ||
647 | } | ||
648 | |||
649 | /* Update desired FP single layout */ | ||
650 | if (value & PR_FP_MODE_FRE) | ||
651 | set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); | ||
652 | else | ||
653 | clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); | ||
654 | } | ||
655 | |||
656 | /* Allow threads to use FP again */ | ||
657 | atomic_set(&task->mm->context.fp_mode_switching, 0); | ||
658 | |||
659 | return 0; | ||
660 | } | ||
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 6c160c67984c..676c5030a953 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S | |||
@@ -34,7 +34,7 @@ | |||
34 | .endm | 34 | .endm |
35 | 35 | ||
36 | .set noreorder | 36 | .set noreorder |
37 | .set arch=r4000 | 37 | .set MIPS_ISA_ARCH_LEVEL_RAW |
38 | 38 | ||
39 | LEAF(_save_fp_context) | 39 | LEAF(_save_fp_context) |
40 | .set push | 40 | .set push |
@@ -42,7 +42,8 @@ LEAF(_save_fp_context) | |||
42 | cfc1 t1, fcr31 | 42 | cfc1 t1, fcr31 |
43 | .set pop | 43 | .set pop |
44 | 44 | ||
45 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) | 45 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
46 | defined(CONFIG_CPU_MIPS32_R6) | ||
46 | .set push | 47 | .set push |
47 | SET_HARDFLOAT | 48 | SET_HARDFLOAT |
48 | #ifdef CONFIG_CPU_MIPS32_R2 | 49 | #ifdef CONFIG_CPU_MIPS32_R2 |
@@ -105,10 +106,12 @@ LEAF(_save_fp_context32) | |||
105 | SET_HARDFLOAT | 106 | SET_HARDFLOAT |
106 | cfc1 t1, fcr31 | 107 | cfc1 t1, fcr31 |
107 | 108 | ||
109 | #ifndef CONFIG_CPU_MIPS64_R6 | ||
108 | mfc0 t0, CP0_STATUS | 110 | mfc0 t0, CP0_STATUS |
109 | sll t0, t0, 5 | 111 | sll t0, t0, 5 |
110 | bgez t0, 1f # skip storing odd if FR=0 | 112 | bgez t0, 1f # skip storing odd if FR=0 |
111 | nop | 113 | nop |
114 | #endif | ||
112 | 115 | ||
113 | /* Store the 16 odd double precision registers */ | 116 | /* Store the 16 odd double precision registers */ |
114 | EX sdc1 $f1, SC32_FPREGS+8(a0) | 117 | EX sdc1 $f1, SC32_FPREGS+8(a0) |
@@ -163,7 +166,8 @@ LEAF(_save_fp_context32) | |||
163 | LEAF(_restore_fp_context) | 166 | LEAF(_restore_fp_context) |
164 | EX lw t1, SC_FPC_CSR(a0) | 167 | EX lw t1, SC_FPC_CSR(a0) |
165 | 168 | ||
166 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) | 169 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
170 | defined(CONFIG_CPU_MIPS32_R6) | ||
167 | .set push | 171 | .set push |
168 | SET_HARDFLOAT | 172 | SET_HARDFLOAT |
169 | #ifdef CONFIG_CPU_MIPS32_R2 | 173 | #ifdef CONFIG_CPU_MIPS32_R2 |
@@ -223,10 +227,12 @@ LEAF(_restore_fp_context32) | |||
223 | SET_HARDFLOAT | 227 | SET_HARDFLOAT |
224 | EX lw t1, SC32_FPC_CSR(a0) | 228 | EX lw t1, SC32_FPC_CSR(a0) |
225 | 229 | ||
230 | #ifndef CONFIG_CPU_MIPS64_R6 | ||
226 | mfc0 t0, CP0_STATUS | 231 | mfc0 t0, CP0_STATUS |
227 | sll t0, t0, 5 | 232 | sll t0, t0, 5 |
228 | bgez t0, 1f # skip loading odd if FR=0 | 233 | bgez t0, 1f # skip loading odd if FR=0 |
229 | nop | 234 | nop |
235 | #endif | ||
230 | 236 | ||
231 | EX ldc1 $f1, SC32_FPREGS+8(a0) | 237 | EX ldc1 $f1, SC32_FPREGS+8(a0) |
232 | EX ldc1 $f3, SC32_FPREGS+24(a0) | 238 | EX ldc1 $f3, SC32_FPREGS+24(a0) |
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 64591e671878..3b1a36f13a7d 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S | |||
@@ -115,7 +115,8 @@ | |||
115 | * Save a thread's fp context. | 115 | * Save a thread's fp context. |
116 | */ | 116 | */ |
117 | LEAF(_save_fp) | 117 | LEAF(_save_fp) |
118 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) | 118 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
119 | defined(CONFIG_CPU_MIPS32_R6) | ||
119 | mfc0 t0, CP0_STATUS | 120 | mfc0 t0, CP0_STATUS |
120 | #endif | 121 | #endif |
121 | fpu_save_double a0 t0 t1 # clobbers t1 | 122 | fpu_save_double a0 t0 t1 # clobbers t1 |
@@ -126,7 +127,8 @@ LEAF(_save_fp) | |||
126 | * Restore a thread's fp context. | 127 | * Restore a thread's fp context. |
127 | */ | 128 | */ |
128 | LEAF(_restore_fp) | 129 | LEAF(_restore_fp) |
129 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) | 130 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ |
131 | defined(CONFIG_CPU_MIPS32_R6) | ||
130 | mfc0 t0, CP0_STATUS | 132 | mfc0 t0, CP0_STATUS |
131 | #endif | 133 | #endif |
132 | fpu_restore_double a0 t0 t1 # clobbers t1 | 134 | fpu_restore_double a0 t0 t1 # clobbers t1 |
@@ -240,9 +242,9 @@ LEAF(_init_fpu) | |||
240 | mtc1 t1, $f30 | 242 | mtc1 t1, $f30 |
241 | mtc1 t1, $f31 | 243 | mtc1 t1, $f31 |
242 | 244 | ||
243 | #ifdef CONFIG_CPU_MIPS32_R2 | 245 | #if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) |
244 | .set push | 246 | .set push |
245 | .set mips32r2 | 247 | .set MIPS_ISA_LEVEL_RAW |
246 | .set fp=64 | 248 | .set fp=64 |
247 | sll t0, t0, 5 # is Status.FR set? | 249 | sll t0, t0, 5 # is Status.FR set? |
248 | bgez t0, 1f # no: skip setting upper 32b | 250 | bgez t0, 1f # no: skip setting upper 32b |
@@ -280,9 +282,9 @@ LEAF(_init_fpu) | |||
280 | mthc1 t1, $f30 | 282 | mthc1 t1, $f30 |
281 | mthc1 t1, $f31 | 283 | mthc1 t1, $f31 |
282 | 1: .set pop | 284 | 1: .set pop |
283 | #endif /* CONFIG_CPU_MIPS32_R2 */ | 285 | #endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ |
284 | #else | 286 | #else |
285 | .set arch=r4000 | 287 | .set MIPS_ISA_ARCH_LEVEL_RAW |
286 | dmtc1 t1, $f0 | 288 | dmtc1 t1, $f0 |
287 | dmtc1 t1, $f2 | 289 | dmtc1 t1, $f2 |
288 | dmtc1 t1, $f4 | 290 | dmtc1 t1, $f4 |
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 67f2495def1c..d1168d7c31e8 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c | |||
@@ -208,6 +208,7 @@ void spram_config(void) | |||
208 | case CPU_INTERAPTIV: | 208 | case CPU_INTERAPTIV: |
209 | case CPU_PROAPTIV: | 209 | case CPU_PROAPTIV: |
210 | case CPU_P5600: | 210 | case CPU_P5600: |
211 | case CPU_QEMU_GENERIC: | ||
211 | config0 = read_c0_config(); | 212 | config0 = read_c0_config(); |
212 | /* FIXME: addresses are Malta specific */ | 213 | /* FIXME: addresses are Malta specific */ |
213 | if (config0 & (1<<24)) { | 214 | if (config0 & (1<<24)) { |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 604b558809c4..53a7ef9a8f32 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) | |||
136 | : "memory"); | 136 | : "memory"); |
137 | } else if (cpu_has_llsc) { | 137 | } else if (cpu_has_llsc) { |
138 | __asm__ __volatile__ ( | 138 | __asm__ __volatile__ ( |
139 | " .set arch=r4000 \n" | 139 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
140 | " li %[err], 0 \n" | 140 | " li %[err], 0 \n" |
141 | "1: ll %[old], (%[addr]) \n" | 141 | "1: ll %[old], (%[addr]) \n" |
142 | " move %[tmp], %[new] \n" | 142 | " move %[tmp], %[new] \n" |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c3b41e24c05a..33984c04b60b 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/fpu.h> | 46 | #include <asm/fpu.h> |
47 | #include <asm/fpu_emulator.h> | 47 | #include <asm/fpu_emulator.h> |
48 | #include <asm/idle.h> | 48 | #include <asm/idle.h> |
49 | #include <asm/mips-r2-to-r6-emul.h> | ||
49 | #include <asm/mipsregs.h> | 50 | #include <asm/mipsregs.h> |
50 | #include <asm/mipsmtregs.h> | 51 | #include <asm/mipsmtregs.h> |
51 | #include <asm/module.h> | 52 | #include <asm/module.h> |
@@ -837,7 +838,7 @@ out: | |||
837 | exception_exit(prev_state); | 838 | exception_exit(prev_state); |
838 | } | 839 | } |
839 | 840 | ||
840 | static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, | 841 | void do_trap_or_bp(struct pt_regs *regs, unsigned int code, |
841 | const char *str) | 842 | const char *str) |
842 | { | 843 | { |
843 | siginfo_t info; | 844 | siginfo_t info; |
@@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs) | |||
1027 | unsigned int opcode = 0; | 1028 | unsigned int opcode = 0; |
1028 | int status = -1; | 1029 | int status = -1; |
1029 | 1030 | ||
1031 | /* | ||
1032 | * Avoid any kernel code. Just emulate the R2 instruction | ||
1033 | * as quickly as possible. | ||
1034 | */ | ||
1035 | if (mipsr2_emulation && cpu_has_mips_r6 && | ||
1036 | likely(user_mode(regs))) { | ||
1037 | if (likely(get_user(opcode, epc) >= 0)) { | ||
1038 | status = mipsr2_decoder(regs, opcode); | ||
1039 | switch (status) { | ||
1040 | case 0: | ||
1041 | case SIGEMT: | ||
1042 | task_thread_info(current)->r2_emul_return = 1; | ||
1043 | return; | ||
1044 | case SIGILL: | ||
1045 | goto no_r2_instr; | ||
1046 | default: | ||
1047 | process_fpemu_return(status, | ||
1048 | ¤t->thread.cp0_baduaddr); | ||
1049 | task_thread_info(current)->r2_emul_return = 1; | ||
1050 | return; | ||
1051 | } | ||
1052 | } | ||
1053 | } | ||
1054 | |||
1055 | no_r2_instr: | ||
1056 | |||
1030 | prev_state = exception_enter(); | 1057 | prev_state = exception_enter(); |
1058 | |||
1031 | if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), | 1059 | if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), |
1032 | SIGILL) == NOTIFY_STOP) | 1060 | SIGILL) == NOTIFY_STOP) |
1033 | goto out; | 1061 | goto out; |
@@ -1134,10 +1162,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | |||
1134 | return NOTIFY_OK; | 1162 | return NOTIFY_OK; |
1135 | } | 1163 | } |
1136 | 1164 | ||
1165 | static int wait_on_fp_mode_switch(atomic_t *p) | ||
1166 | { | ||
1167 | /* | ||
1168 | * The FP mode for this task is currently being switched. That may | ||
1169 | * involve modifications to the format of this tasks FP context which | ||
1170 | * make it unsafe to proceed with execution for the moment. Instead, | ||
1171 | * schedule some other task. | ||
1172 | */ | ||
1173 | schedule(); | ||
1174 | return 0; | ||
1175 | } | ||
1176 | |||
1137 | static int enable_restore_fp_context(int msa) | 1177 | static int enable_restore_fp_context(int msa) |
1138 | { | 1178 | { |
1139 | int err, was_fpu_owner, prior_msa; | 1179 | int err, was_fpu_owner, prior_msa; |
1140 | 1180 | ||
1181 | /* | ||
1182 | * If an FP mode switch is currently underway, wait for it to | ||
1183 | * complete before proceeding. | ||
1184 | */ | ||
1185 | wait_on_atomic_t(¤t->mm->context.fp_mode_switching, | ||
1186 | wait_on_fp_mode_switch, TASK_KILLABLE); | ||
1187 | |||
1141 | if (!used_math()) { | 1188 | if (!used_math()) { |
1142 | /* First time FP context user. */ | 1189 | /* First time FP context user. */ |
1143 | preempt_disable(); | 1190 | preempt_disable(); |
@@ -1541,6 +1588,7 @@ static inline void parity_protection_init(void) | |||
1541 | case CPU_INTERAPTIV: | 1588 | case CPU_INTERAPTIV: |
1542 | case CPU_PROAPTIV: | 1589 | case CPU_PROAPTIV: |
1543 | case CPU_P5600: | 1590 | case CPU_P5600: |
1591 | case CPU_QEMU_GENERIC: | ||
1544 | { | 1592 | { |
1545 | #define ERRCTL_PE 0x80000000 | 1593 | #define ERRCTL_PE 0x80000000 |
1546 | #define ERRCTL_L2P 0x00800000 | 1594 | #define ERRCTL_L2P 0x00800000 |
@@ -1630,7 +1678,7 @@ asmlinkage void cache_parity_error(void) | |||
1630 | printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", | 1678 | printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", |
1631 | reg_val & (1<<30) ? "secondary" : "primary", | 1679 | reg_val & (1<<30) ? "secondary" : "primary", |
1632 | reg_val & (1<<31) ? "data" : "insn"); | 1680 | reg_val & (1<<31) ? "data" : "insn"); |
1633 | if (cpu_has_mips_r2 && | 1681 | if ((cpu_has_mips_r2_r6) && |
1634 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { | 1682 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { |
1635 | pr_err("Error bits: %s%s%s%s%s%s%s%s\n", | 1683 | pr_err("Error bits: %s%s%s%s%s%s%s%s\n", |
1636 | reg_val & (1<<29) ? "ED " : "", | 1684 | reg_val & (1<<29) ? "ED " : "", |
@@ -1670,7 +1718,7 @@ asmlinkage void do_ftlb(void) | |||
1670 | unsigned int reg_val; | 1718 | unsigned int reg_val; |
1671 | 1719 | ||
1672 | /* For the moment, report the problem and hang. */ | 1720 | /* For the moment, report the problem and hang. */ |
1673 | if (cpu_has_mips_r2 && | 1721 | if ((cpu_has_mips_r2_r6) && |
1674 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { | 1722 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { |
1675 | pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", | 1723 | pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", |
1676 | read_c0_ecc()); | 1724 | read_c0_ecc()); |
@@ -1959,7 +2007,7 @@ static void configure_hwrena(void) | |||
1959 | { | 2007 | { |
1960 | unsigned int hwrena = cpu_hwrena_impl_bits; | 2008 | unsigned int hwrena = cpu_hwrena_impl_bits; |
1961 | 2009 | ||
1962 | if (cpu_has_mips_r2) | 2010 | if (cpu_has_mips_r2_r6) |
1963 | hwrena |= 0x0000000f; | 2011 | hwrena |= 0x0000000f; |
1964 | 2012 | ||
1965 | if (!noulri && cpu_has_userlocal) | 2013 | if (!noulri && cpu_has_userlocal) |
@@ -2003,7 +2051,7 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
2003 | * o read IntCtl.IPTI to determine the timer interrupt | 2051 | * o read IntCtl.IPTI to determine the timer interrupt |
2004 | * o read IntCtl.IPPCI to determine the performance counter interrupt | 2052 | * o read IntCtl.IPPCI to determine the performance counter interrupt |
2005 | */ | 2053 | */ |
2006 | if (cpu_has_mips_r2) { | 2054 | if (cpu_has_mips_r2_r6) { |
2007 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; | 2055 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; |
2008 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; | 2056 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; |
2009 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; | 2057 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; |
@@ -2094,7 +2142,7 @@ void __init trap_init(void) | |||
2094 | #else | 2142 | #else |
2095 | ebase = CKSEG0; | 2143 | ebase = CKSEG0; |
2096 | #endif | 2144 | #endif |
2097 | if (cpu_has_mips_r2) | 2145 | if (cpu_has_mips_r2_r6) |
2098 | ebase += (read_c0_ebase() & 0x3ffff000); | 2146 | ebase += (read_c0_ebase() & 0x3ffff000); |
2099 | } | 2147 | } |
2100 | 2148 | ||
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index e11906dff885..bbb69695a0a1 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs); | |||
129 | : "=&r" (value), "=r" (res) \ | 129 | : "=&r" (value), "=r" (res) \ |
130 | : "r" (addr), "i" (-EFAULT)); | 130 | : "r" (addr), "i" (-EFAULT)); |
131 | 131 | ||
132 | #ifndef CONFIG_CPU_MIPSR6 | ||
132 | #define LoadW(addr, value, res) \ | 133 | #define LoadW(addr, value, res) \ |
133 | __asm__ __volatile__ ( \ | 134 | __asm__ __volatile__ ( \ |
134 | "1:\t"user_lwl("%0", "(%2)")"\n" \ | 135 | "1:\t"user_lwl("%0", "(%2)")"\n" \ |
@@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs); | |||
146 | ".previous" \ | 147 | ".previous" \ |
147 | : "=&r" (value), "=r" (res) \ | 148 | : "=&r" (value), "=r" (res) \ |
148 | : "r" (addr), "i" (-EFAULT)); | 149 | : "r" (addr), "i" (-EFAULT)); |
150 | #else | ||
151 | /* MIPSR6 has no lwl instruction */ | ||
152 | #define LoadW(addr, value, res) \ | ||
153 | __asm__ __volatile__ ( \ | ||
154 | ".set\tpush\n" \ | ||
155 | ".set\tnoat\n\t" \ | ||
156 | "1:"user_lb("%0", "0(%2)")"\n\t" \ | ||
157 | "2:"user_lbu("$1", "1(%2)")"\n\t" \ | ||
158 | "sll\t%0, 0x8\n\t" \ | ||
159 | "or\t%0, $1\n\t" \ | ||
160 | "3:"user_lbu("$1", "2(%2)")"\n\t" \ | ||
161 | "sll\t%0, 0x8\n\t" \ | ||
162 | "or\t%0, $1\n\t" \ | ||
163 | "4:"user_lbu("$1", "3(%2)")"\n\t" \ | ||
164 | "sll\t%0, 0x8\n\t" \ | ||
165 | "or\t%0, $1\n\t" \ | ||
166 | "li\t%1, 0\n" \ | ||
167 | ".set\tpop\n" \ | ||
168 | "10:\n\t" \ | ||
169 | ".insn\n\t" \ | ||
170 | ".section\t.fixup,\"ax\"\n\t" \ | ||
171 | "11:\tli\t%1, %3\n\t" \ | ||
172 | "j\t10b\n\t" \ | ||
173 | ".previous\n\t" \ | ||
174 | ".section\t__ex_table,\"a\"\n\t" \ | ||
175 | STR(PTR)"\t1b, 11b\n\t" \ | ||
176 | STR(PTR)"\t2b, 11b\n\t" \ | ||
177 | STR(PTR)"\t3b, 11b\n\t" \ | ||
178 | STR(PTR)"\t4b, 11b\n\t" \ | ||
179 | ".previous" \ | ||
180 | : "=&r" (value), "=r" (res) \ | ||
181 | : "r" (addr), "i" (-EFAULT)); | ||
182 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
149 | 183 | ||
150 | #define LoadHWU(addr, value, res) \ | 184 | #define LoadHWU(addr, value, res) \ |
151 | __asm__ __volatile__ ( \ | 185 | __asm__ __volatile__ ( \ |
@@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs); | |||
169 | : "=&r" (value), "=r" (res) \ | 203 | : "=&r" (value), "=r" (res) \ |
170 | : "r" (addr), "i" (-EFAULT)); | 204 | : "r" (addr), "i" (-EFAULT)); |
171 | 205 | ||
206 | #ifndef CONFIG_CPU_MIPSR6 | ||
172 | #define LoadWU(addr, value, res) \ | 207 | #define LoadWU(addr, value, res) \ |
173 | __asm__ __volatile__ ( \ | 208 | __asm__ __volatile__ ( \ |
174 | "1:\t"user_lwl("%0", "(%2)")"\n" \ | 209 | "1:\t"user_lwl("%0", "(%2)")"\n" \ |
@@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs); | |||
206 | ".previous" \ | 241 | ".previous" \ |
207 | : "=&r" (value), "=r" (res) \ | 242 | : "=&r" (value), "=r" (res) \ |
208 | : "r" (addr), "i" (-EFAULT)); | 243 | : "r" (addr), "i" (-EFAULT)); |
244 | #else | ||
245 | /* MIPSR6 has not lwl and ldl instructions */ | ||
246 | #define LoadWU(addr, value, res) \ | ||
247 | __asm__ __volatile__ ( \ | ||
248 | ".set\tpush\n\t" \ | ||
249 | ".set\tnoat\n\t" \ | ||
250 | "1:"user_lbu("%0", "0(%2)")"\n\t" \ | ||
251 | "2:"user_lbu("$1", "1(%2)")"\n\t" \ | ||
252 | "sll\t%0, 0x8\n\t" \ | ||
253 | "or\t%0, $1\n\t" \ | ||
254 | "3:"user_lbu("$1", "2(%2)")"\n\t" \ | ||
255 | "sll\t%0, 0x8\n\t" \ | ||
256 | "or\t%0, $1\n\t" \ | ||
257 | "4:"user_lbu("$1", "3(%2)")"\n\t" \ | ||
258 | "sll\t%0, 0x8\n\t" \ | ||
259 | "or\t%0, $1\n\t" \ | ||
260 | "li\t%1, 0\n" \ | ||
261 | ".set\tpop\n" \ | ||
262 | "10:\n\t" \ | ||
263 | ".insn\n\t" \ | ||
264 | ".section\t.fixup,\"ax\"\n\t" \ | ||
265 | "11:\tli\t%1, %3\n\t" \ | ||
266 | "j\t10b\n\t" \ | ||
267 | ".previous\n\t" \ | ||
268 | ".section\t__ex_table,\"a\"\n\t" \ | ||
269 | STR(PTR)"\t1b, 11b\n\t" \ | ||
270 | STR(PTR)"\t2b, 11b\n\t" \ | ||
271 | STR(PTR)"\t3b, 11b\n\t" \ | ||
272 | STR(PTR)"\t4b, 11b\n\t" \ | ||
273 | ".previous" \ | ||
274 | : "=&r" (value), "=r" (res) \ | ||
275 | : "r" (addr), "i" (-EFAULT)); | ||
276 | |||
277 | #define LoadDW(addr, value, res) \ | ||
278 | __asm__ __volatile__ ( \ | ||
279 | ".set\tpush\n\t" \ | ||
280 | ".set\tnoat\n\t" \ | ||
281 | "1:lb\t%0, 0(%2)\n\t" \ | ||
282 | "2:lbu\t $1, 1(%2)\n\t" \ | ||
283 | "dsll\t%0, 0x8\n\t" \ | ||
284 | "or\t%0, $1\n\t" \ | ||
285 | "3:lbu\t$1, 2(%2)\n\t" \ | ||
286 | "dsll\t%0, 0x8\n\t" \ | ||
287 | "or\t%0, $1\n\t" \ | ||
288 | "4:lbu\t$1, 3(%2)\n\t" \ | ||
289 | "dsll\t%0, 0x8\n\t" \ | ||
290 | "or\t%0, $1\n\t" \ | ||
291 | "5:lbu\t$1, 4(%2)\n\t" \ | ||
292 | "dsll\t%0, 0x8\n\t" \ | ||
293 | "or\t%0, $1\n\t" \ | ||
294 | "6:lbu\t$1, 5(%2)\n\t" \ | ||
295 | "dsll\t%0, 0x8\n\t" \ | ||
296 | "or\t%0, $1\n\t" \ | ||
297 | "7:lbu\t$1, 6(%2)\n\t" \ | ||
298 | "dsll\t%0, 0x8\n\t" \ | ||
299 | "or\t%0, $1\n\t" \ | ||
300 | "8:lbu\t$1, 7(%2)\n\t" \ | ||
301 | "dsll\t%0, 0x8\n\t" \ | ||
302 | "or\t%0, $1\n\t" \ | ||
303 | "li\t%1, 0\n" \ | ||
304 | ".set\tpop\n\t" \ | ||
305 | "10:\n\t" \ | ||
306 | ".insn\n\t" \ | ||
307 | ".section\t.fixup,\"ax\"\n\t" \ | ||
308 | "11:\tli\t%1, %3\n\t" \ | ||
309 | "j\t10b\n\t" \ | ||
310 | ".previous\n\t" \ | ||
311 | ".section\t__ex_table,\"a\"\n\t" \ | ||
312 | STR(PTR)"\t1b, 11b\n\t" \ | ||
313 | STR(PTR)"\t2b, 11b\n\t" \ | ||
314 | STR(PTR)"\t3b, 11b\n\t" \ | ||
315 | STR(PTR)"\t4b, 11b\n\t" \ | ||
316 | STR(PTR)"\t5b, 11b\n\t" \ | ||
317 | STR(PTR)"\t6b, 11b\n\t" \ | ||
318 | STR(PTR)"\t7b, 11b\n\t" \ | ||
319 | STR(PTR)"\t8b, 11b\n\t" \ | ||
320 | ".previous" \ | ||
321 | : "=&r" (value), "=r" (res) \ | ||
322 | : "r" (addr), "i" (-EFAULT)); | ||
323 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
324 | |||
209 | 325 | ||
210 | #define StoreHW(addr, value, res) \ | 326 | #define StoreHW(addr, value, res) \ |
211 | __asm__ __volatile__ ( \ | 327 | __asm__ __volatile__ ( \ |
@@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs); | |||
228 | : "=r" (res) \ | 344 | : "=r" (res) \ |
229 | : "r" (value), "r" (addr), "i" (-EFAULT)); | 345 | : "r" (value), "r" (addr), "i" (-EFAULT)); |
230 | 346 | ||
347 | #ifndef CONFIG_CPU_MIPSR6 | ||
231 | #define StoreW(addr, value, res) \ | 348 | #define StoreW(addr, value, res) \ |
232 | __asm__ __volatile__ ( \ | 349 | __asm__ __volatile__ ( \ |
233 | "1:\t"user_swl("%1", "(%2)")"\n" \ | 350 | "1:\t"user_swl("%1", "(%2)")"\n" \ |
@@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs); | |||
263 | ".previous" \ | 380 | ".previous" \ |
264 | : "=r" (res) \ | 381 | : "=r" (res) \ |
265 | : "r" (value), "r" (addr), "i" (-EFAULT)); | 382 | : "r" (value), "r" (addr), "i" (-EFAULT)); |
266 | #endif | 383 | #else |
384 | /* MIPSR6 has no swl and sdl instructions */ | ||
385 | #define StoreW(addr, value, res) \ | ||
386 | __asm__ __volatile__ ( \ | ||
387 | ".set\tpush\n\t" \ | ||
388 | ".set\tnoat\n\t" \ | ||
389 | "1:"user_sb("%1", "3(%2)")"\n\t" \ | ||
390 | "srl\t$1, %1, 0x8\n\t" \ | ||
391 | "2:"user_sb("$1", "2(%2)")"\n\t" \ | ||
392 | "srl\t$1, $1, 0x8\n\t" \ | ||
393 | "3:"user_sb("$1", "1(%2)")"\n\t" \ | ||
394 | "srl\t$1, $1, 0x8\n\t" \ | ||
395 | "4:"user_sb("$1", "0(%2)")"\n\t" \ | ||
396 | ".set\tpop\n\t" \ | ||
397 | "li\t%0, 0\n" \ | ||
398 | "10:\n\t" \ | ||
399 | ".insn\n\t" \ | ||
400 | ".section\t.fixup,\"ax\"\n\t" \ | ||
401 | "11:\tli\t%0, %3\n\t" \ | ||
402 | "j\t10b\n\t" \ | ||
403 | ".previous\n\t" \ | ||
404 | ".section\t__ex_table,\"a\"\n\t" \ | ||
405 | STR(PTR)"\t1b, 11b\n\t" \ | ||
406 | STR(PTR)"\t2b, 11b\n\t" \ | ||
407 | STR(PTR)"\t3b, 11b\n\t" \ | ||
408 | STR(PTR)"\t4b, 11b\n\t" \ | ||
409 | ".previous" \ | ||
410 | : "=&r" (res) \ | ||
411 | : "r" (value), "r" (addr), "i" (-EFAULT) \ | ||
412 | : "memory"); | ||
413 | |||
414 | #define StoreDW(addr, value, res) \ | ||
415 | __asm__ __volatile__ ( \ | ||
416 | ".set\tpush\n\t" \ | ||
417 | ".set\tnoat\n\t" \ | ||
418 | "1:sb\t%1, 7(%2)\n\t" \ | ||
419 | "dsrl\t$1, %1, 0x8\n\t" \ | ||
420 | "2:sb\t$1, 6(%2)\n\t" \ | ||
421 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
422 | "3:sb\t$1, 5(%2)\n\t" \ | ||
423 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
424 | "4:sb\t$1, 4(%2)\n\t" \ | ||
425 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
426 | "5:sb\t$1, 3(%2)\n\t" \ | ||
427 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
428 | "6:sb\t$1, 2(%2)\n\t" \ | ||
429 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
430 | "7:sb\t$1, 1(%2)\n\t" \ | ||
431 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
432 | "8:sb\t$1, 0(%2)\n\t" \ | ||
433 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
434 | ".set\tpop\n\t" \ | ||
435 | "li\t%0, 0\n" \ | ||
436 | "10:\n\t" \ | ||
437 | ".insn\n\t" \ | ||
438 | ".section\t.fixup,\"ax\"\n\t" \ | ||
439 | "11:\tli\t%0, %3\n\t" \ | ||
440 | "j\t10b\n\t" \ | ||
441 | ".previous\n\t" \ | ||
442 | ".section\t__ex_table,\"a\"\n\t" \ | ||
443 | STR(PTR)"\t1b, 11b\n\t" \ | ||
444 | STR(PTR)"\t2b, 11b\n\t" \ | ||
445 | STR(PTR)"\t3b, 11b\n\t" \ | ||
446 | STR(PTR)"\t4b, 11b\n\t" \ | ||
447 | STR(PTR)"\t5b, 11b\n\t" \ | ||
448 | STR(PTR)"\t6b, 11b\n\t" \ | ||
449 | STR(PTR)"\t7b, 11b\n\t" \ | ||
450 | STR(PTR)"\t8b, 11b\n\t" \ | ||
451 | ".previous" \ | ||
452 | : "=&r" (res) \ | ||
453 | : "r" (value), "r" (addr), "i" (-EFAULT) \ | ||
454 | : "memory"); | ||
455 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
456 | |||
457 | #else /* __BIG_ENDIAN */ | ||
267 | 458 | ||
268 | #ifdef __LITTLE_ENDIAN | ||
269 | #define LoadHW(addr, value, res) \ | 459 | #define LoadHW(addr, value, res) \ |
270 | __asm__ __volatile__ (".set\tnoat\n" \ | 460 | __asm__ __volatile__ (".set\tnoat\n" \ |
271 | "1:\t"user_lb("%0", "1(%2)")"\n" \ | 461 | "1:\t"user_lb("%0", "1(%2)")"\n" \ |
@@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs); | |||
286 | : "=&r" (value), "=r" (res) \ | 476 | : "=&r" (value), "=r" (res) \ |
287 | : "r" (addr), "i" (-EFAULT)); | 477 | : "r" (addr), "i" (-EFAULT)); |
288 | 478 | ||
479 | #ifndef CONFIG_CPU_MIPSR6 | ||
289 | #define LoadW(addr, value, res) \ | 480 | #define LoadW(addr, value, res) \ |
290 | __asm__ __volatile__ ( \ | 481 | __asm__ __volatile__ ( \ |
291 | "1:\t"user_lwl("%0", "3(%2)")"\n" \ | 482 | "1:\t"user_lwl("%0", "3(%2)")"\n" \ |
@@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs); | |||
303 | ".previous" \ | 494 | ".previous" \ |
304 | : "=&r" (value), "=r" (res) \ | 495 | : "=&r" (value), "=r" (res) \ |
305 | : "r" (addr), "i" (-EFAULT)); | 496 | : "r" (addr), "i" (-EFAULT)); |
497 | #else | ||
498 | /* MIPSR6 has no lwl instruction */ | ||
499 | #define LoadW(addr, value, res) \ | ||
500 | __asm__ __volatile__ ( \ | ||
501 | ".set\tpush\n" \ | ||
502 | ".set\tnoat\n\t" \ | ||
503 | "1:"user_lb("%0", "3(%2)")"\n\t" \ | ||
504 | "2:"user_lbu("$1", "2(%2)")"\n\t" \ | ||
505 | "sll\t%0, 0x8\n\t" \ | ||
506 | "or\t%0, $1\n\t" \ | ||
507 | "3:"user_lbu("$1", "1(%2)")"\n\t" \ | ||
508 | "sll\t%0, 0x8\n\t" \ | ||
509 | "or\t%0, $1\n\t" \ | ||
510 | "4:"user_lbu("$1", "0(%2)")"\n\t" \ | ||
511 | "sll\t%0, 0x8\n\t" \ | ||
512 | "or\t%0, $1\n\t" \ | ||
513 | "li\t%1, 0\n" \ | ||
514 | ".set\tpop\n" \ | ||
515 | "10:\n\t" \ | ||
516 | ".insn\n\t" \ | ||
517 | ".section\t.fixup,\"ax\"\n\t" \ | ||
518 | "11:\tli\t%1, %3\n\t" \ | ||
519 | "j\t10b\n\t" \ | ||
520 | ".previous\n\t" \ | ||
521 | ".section\t__ex_table,\"a\"\n\t" \ | ||
522 | STR(PTR)"\t1b, 11b\n\t" \ | ||
523 | STR(PTR)"\t2b, 11b\n\t" \ | ||
524 | STR(PTR)"\t3b, 11b\n\t" \ | ||
525 | STR(PTR)"\t4b, 11b\n\t" \ | ||
526 | ".previous" \ | ||
527 | : "=&r" (value), "=r" (res) \ | ||
528 | : "r" (addr), "i" (-EFAULT)); | ||
529 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
530 | |||
306 | 531 | ||
307 | #define LoadHWU(addr, value, res) \ | 532 | #define LoadHWU(addr, value, res) \ |
308 | __asm__ __volatile__ ( \ | 533 | __asm__ __volatile__ ( \ |
@@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs); | |||
326 | : "=&r" (value), "=r" (res) \ | 551 | : "=&r" (value), "=r" (res) \ |
327 | : "r" (addr), "i" (-EFAULT)); | 552 | : "r" (addr), "i" (-EFAULT)); |
328 | 553 | ||
554 | #ifndef CONFIG_CPU_MIPSR6 | ||
329 | #define LoadWU(addr, value, res) \ | 555 | #define LoadWU(addr, value, res) \ |
330 | __asm__ __volatile__ ( \ | 556 | __asm__ __volatile__ ( \ |
331 | "1:\t"user_lwl("%0", "3(%2)")"\n" \ | 557 | "1:\t"user_lwl("%0", "3(%2)")"\n" \ |
@@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs); | |||
363 | ".previous" \ | 589 | ".previous" \ |
364 | : "=&r" (value), "=r" (res) \ | 590 | : "=&r" (value), "=r" (res) \ |
365 | : "r" (addr), "i" (-EFAULT)); | 591 | : "r" (addr), "i" (-EFAULT)); |
592 | #else | ||
593 | /* MIPSR6 has not lwl and ldl instructions */ | ||
594 | #define LoadWU(addr, value, res) \ | ||
595 | __asm__ __volatile__ ( \ | ||
596 | ".set\tpush\n\t" \ | ||
597 | ".set\tnoat\n\t" \ | ||
598 | "1:"user_lbu("%0", "3(%2)")"\n\t" \ | ||
599 | "2:"user_lbu("$1", "2(%2)")"\n\t" \ | ||
600 | "sll\t%0, 0x8\n\t" \ | ||
601 | "or\t%0, $1\n\t" \ | ||
602 | "3:"user_lbu("$1", "1(%2)")"\n\t" \ | ||
603 | "sll\t%0, 0x8\n\t" \ | ||
604 | "or\t%0, $1\n\t" \ | ||
605 | "4:"user_lbu("$1", "0(%2)")"\n\t" \ | ||
606 | "sll\t%0, 0x8\n\t" \ | ||
607 | "or\t%0, $1\n\t" \ | ||
608 | "li\t%1, 0\n" \ | ||
609 | ".set\tpop\n" \ | ||
610 | "10:\n\t" \ | ||
611 | ".insn\n\t" \ | ||
612 | ".section\t.fixup,\"ax\"\n\t" \ | ||
613 | "11:\tli\t%1, %3\n\t" \ | ||
614 | "j\t10b\n\t" \ | ||
615 | ".previous\n\t" \ | ||
616 | ".section\t__ex_table,\"a\"\n\t" \ | ||
617 | STR(PTR)"\t1b, 11b\n\t" \ | ||
618 | STR(PTR)"\t2b, 11b\n\t" \ | ||
619 | STR(PTR)"\t3b, 11b\n\t" \ | ||
620 | STR(PTR)"\t4b, 11b\n\t" \ | ||
621 | ".previous" \ | ||
622 | : "=&r" (value), "=r" (res) \ | ||
623 | : "r" (addr), "i" (-EFAULT)); | ||
624 | |||
625 | #define LoadDW(addr, value, res) \ | ||
626 | __asm__ __volatile__ ( \ | ||
627 | ".set\tpush\n\t" \ | ||
628 | ".set\tnoat\n\t" \ | ||
629 | "1:lb\t%0, 7(%2)\n\t" \ | ||
630 | "2:lbu\t$1, 6(%2)\n\t" \ | ||
631 | "dsll\t%0, 0x8\n\t" \ | ||
632 | "or\t%0, $1\n\t" \ | ||
633 | "3:lbu\t$1, 5(%2)\n\t" \ | ||
634 | "dsll\t%0, 0x8\n\t" \ | ||
635 | "or\t%0, $1\n\t" \ | ||
636 | "4:lbu\t$1, 4(%2)\n\t" \ | ||
637 | "dsll\t%0, 0x8\n\t" \ | ||
638 | "or\t%0, $1\n\t" \ | ||
639 | "5:lbu\t$1, 3(%2)\n\t" \ | ||
640 | "dsll\t%0, 0x8\n\t" \ | ||
641 | "or\t%0, $1\n\t" \ | ||
642 | "6:lbu\t$1, 2(%2)\n\t" \ | ||
643 | "dsll\t%0, 0x8\n\t" \ | ||
644 | "or\t%0, $1\n\t" \ | ||
645 | "7:lbu\t$1, 1(%2)\n\t" \ | ||
646 | "dsll\t%0, 0x8\n\t" \ | ||
647 | "or\t%0, $1\n\t" \ | ||
648 | "8:lbu\t$1, 0(%2)\n\t" \ | ||
649 | "dsll\t%0, 0x8\n\t" \ | ||
650 | "or\t%0, $1\n\t" \ | ||
651 | "li\t%1, 0\n" \ | ||
652 | ".set\tpop\n\t" \ | ||
653 | "10:\n\t" \ | ||
654 | ".insn\n\t" \ | ||
655 | ".section\t.fixup,\"ax\"\n\t" \ | ||
656 | "11:\tli\t%1, %3\n\t" \ | ||
657 | "j\t10b\n\t" \ | ||
658 | ".previous\n\t" \ | ||
659 | ".section\t__ex_table,\"a\"\n\t" \ | ||
660 | STR(PTR)"\t1b, 11b\n\t" \ | ||
661 | STR(PTR)"\t2b, 11b\n\t" \ | ||
662 | STR(PTR)"\t3b, 11b\n\t" \ | ||
663 | STR(PTR)"\t4b, 11b\n\t" \ | ||
664 | STR(PTR)"\t5b, 11b\n\t" \ | ||
665 | STR(PTR)"\t6b, 11b\n\t" \ | ||
666 | STR(PTR)"\t7b, 11b\n\t" \ | ||
667 | STR(PTR)"\t8b, 11b\n\t" \ | ||
668 | ".previous" \ | ||
669 | : "=&r" (value), "=r" (res) \ | ||
670 | : "r" (addr), "i" (-EFAULT)); | ||
671 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
366 | 672 | ||
367 | #define StoreHW(addr, value, res) \ | 673 | #define StoreHW(addr, value, res) \ |
368 | __asm__ __volatile__ ( \ | 674 | __asm__ __volatile__ ( \ |
@@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs); | |||
384 | ".previous" \ | 690 | ".previous" \ |
385 | : "=r" (res) \ | 691 | : "=r" (res) \ |
386 | : "r" (value), "r" (addr), "i" (-EFAULT)); | 692 | : "r" (value), "r" (addr), "i" (-EFAULT)); |
387 | 693 | #ifndef CONFIG_CPU_MIPSR6 | |
388 | #define StoreW(addr, value, res) \ | 694 | #define StoreW(addr, value, res) \ |
389 | __asm__ __volatile__ ( \ | 695 | __asm__ __volatile__ ( \ |
390 | "1:\t"user_swl("%1", "3(%2)")"\n" \ | 696 | "1:\t"user_swl("%1", "3(%2)")"\n" \ |
@@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs); | |||
420 | ".previous" \ | 726 | ".previous" \ |
421 | : "=r" (res) \ | 727 | : "=r" (res) \ |
422 | : "r" (value), "r" (addr), "i" (-EFAULT)); | 728 | : "r" (value), "r" (addr), "i" (-EFAULT)); |
729 | #else | ||
730 | /* MIPSR6 has no swl and sdl instructions */ | ||
731 | #define StoreW(addr, value, res) \ | ||
732 | __asm__ __volatile__ ( \ | ||
733 | ".set\tpush\n\t" \ | ||
734 | ".set\tnoat\n\t" \ | ||
735 | "1:"user_sb("%1", "0(%2)")"\n\t" \ | ||
736 | "srl\t$1, %1, 0x8\n\t" \ | ||
737 | "2:"user_sb("$1", "1(%2)")"\n\t" \ | ||
738 | "srl\t$1, $1, 0x8\n\t" \ | ||
739 | "3:"user_sb("$1", "2(%2)")"\n\t" \ | ||
740 | "srl\t$1, $1, 0x8\n\t" \ | ||
741 | "4:"user_sb("$1", "3(%2)")"\n\t" \ | ||
742 | ".set\tpop\n\t" \ | ||
743 | "li\t%0, 0\n" \ | ||
744 | "10:\n\t" \ | ||
745 | ".insn\n\t" \ | ||
746 | ".section\t.fixup,\"ax\"\n\t" \ | ||
747 | "11:\tli\t%0, %3\n\t" \ | ||
748 | "j\t10b\n\t" \ | ||
749 | ".previous\n\t" \ | ||
750 | ".section\t__ex_table,\"a\"\n\t" \ | ||
751 | STR(PTR)"\t1b, 11b\n\t" \ | ||
752 | STR(PTR)"\t2b, 11b\n\t" \ | ||
753 | STR(PTR)"\t3b, 11b\n\t" \ | ||
754 | STR(PTR)"\t4b, 11b\n\t" \ | ||
755 | ".previous" \ | ||
756 | : "=&r" (res) \ | ||
757 | : "r" (value), "r" (addr), "i" (-EFAULT) \ | ||
758 | : "memory"); | ||
759 | |||
760 | #define StoreDW(addr, value, res) \ | ||
761 | __asm__ __volatile__ ( \ | ||
762 | ".set\tpush\n\t" \ | ||
763 | ".set\tnoat\n\t" \ | ||
764 | "1:sb\t%1, 0(%2)\n\t" \ | ||
765 | "dsrl\t$1, %1, 0x8\n\t" \ | ||
766 | "2:sb\t$1, 1(%2)\n\t" \ | ||
767 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
768 | "3:sb\t$1, 2(%2)\n\t" \ | ||
769 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
770 | "4:sb\t$1, 3(%2)\n\t" \ | ||
771 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
772 | "5:sb\t$1, 4(%2)\n\t" \ | ||
773 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
774 | "6:sb\t$1, 5(%2)\n\t" \ | ||
775 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
776 | "7:sb\t$1, 6(%2)\n\t" \ | ||
777 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
778 | "8:sb\t$1, 7(%2)\n\t" \ | ||
779 | "dsrl\t$1, $1, 0x8\n\t" \ | ||
780 | ".set\tpop\n\t" \ | ||
781 | "li\t%0, 0\n" \ | ||
782 | "10:\n\t" \ | ||
783 | ".insn\n\t" \ | ||
784 | ".section\t.fixup,\"ax\"\n\t" \ | ||
785 | "11:\tli\t%0, %3\n\t" \ | ||
786 | "j\t10b\n\t" \ | ||
787 | ".previous\n\t" \ | ||
788 | ".section\t__ex_table,\"a\"\n\t" \ | ||
789 | STR(PTR)"\t1b, 11b\n\t" \ | ||
790 | STR(PTR)"\t2b, 11b\n\t" \ | ||
791 | STR(PTR)"\t3b, 11b\n\t" \ | ||
792 | STR(PTR)"\t4b, 11b\n\t" \ | ||
793 | STR(PTR)"\t5b, 11b\n\t" \ | ||
794 | STR(PTR)"\t6b, 11b\n\t" \ | ||
795 | STR(PTR)"\t7b, 11b\n\t" \ | ||
796 | STR(PTR)"\t8b, 11b\n\t" \ | ||
797 | ".previous" \ | ||
798 | : "=&r" (res) \ | ||
799 | : "r" (value), "r" (addr), "i" (-EFAULT) \ | ||
800 | : "memory"); | ||
801 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
423 | #endif | 802 | #endif |
424 | 803 | ||
425 | static void emulate_load_store_insn(struct pt_regs *regs, | 804 | static void emulate_load_store_insn(struct pt_regs *regs, |
@@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
703 | break; | 1082 | break; |
704 | return; | 1083 | return; |
705 | 1084 | ||
1085 | #ifndef CONFIG_CPU_MIPSR6 | ||
706 | /* | 1086 | /* |
707 | * COP2 is available to implementor for application specific use. | 1087 | * COP2 is available to implementor for application specific use. |
708 | * It's up to applications to register a notifier chain and do | 1088 | * It's up to applications to register a notifier chain and do |
709 | * whatever they have to do, including possible sending of signals. | 1089 | * whatever they have to do, including possible sending of signals. |
1090 | * | ||
1091 | * This instruction has been reallocated in Release 6 | ||
710 | */ | 1092 | */ |
711 | case lwc2_op: | 1093 | case lwc2_op: |
712 | cu2_notifier_call_chain(CU2_LWC2_OP, regs); | 1094 | cu2_notifier_call_chain(CU2_LWC2_OP, regs); |
@@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, | |||
723 | case sdc2_op: | 1105 | case sdc2_op: |
724 | cu2_notifier_call_chain(CU2_SDC2_OP, regs); | 1106 | cu2_notifier_call_chain(CU2_SDC2_OP, regs); |
725 | break; | 1107 | break; |
726 | 1108 | #endif | |
727 | default: | 1109 | default: |
728 | /* | 1110 | /* |
729 | * Pheeee... We encountered an yet unknown instruction or | 1111 | * Pheeee... We encountered an yet unknown instruction or |
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index eeddc58802e1..1e9e900cd3c3 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile | |||
@@ -8,6 +8,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ | |||
8 | 8 | ||
9 | obj-y += iomap.o | 9 | obj-y += iomap.o |
10 | obj-$(CONFIG_PCI) += iomap-pci.o | 10 | obj-$(CONFIG_PCI) += iomap-pci.o |
11 | lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) | ||
11 | 12 | ||
12 | obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o | 13 | obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o |
13 | obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o | 14 | obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o |
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 5d3238af9b5c..9245e1705e69 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S | |||
@@ -293,9 +293,14 @@ | |||
293 | and t0, src, ADDRMASK | 293 | and t0, src, ADDRMASK |
294 | PREFS( 0, 2*32(src) ) | 294 | PREFS( 0, 2*32(src) ) |
295 | PREFD( 1, 2*32(dst) ) | 295 | PREFD( 1, 2*32(dst) ) |
296 | #ifndef CONFIG_CPU_MIPSR6 | ||
296 | bnez t1, .Ldst_unaligned\@ | 297 | bnez t1, .Ldst_unaligned\@ |
297 | nop | 298 | nop |
298 | bnez t0, .Lsrc_unaligned_dst_aligned\@ | 299 | bnez t0, .Lsrc_unaligned_dst_aligned\@ |
300 | #else | ||
301 | or t0, t0, t1 | ||
302 | bnez t0, .Lcopy_unaligned_bytes\@ | ||
303 | #endif | ||
299 | /* | 304 | /* |
300 | * use delay slot for fall-through | 305 | * use delay slot for fall-through |
301 | * src and dst are aligned; need to compute rem | 306 | * src and dst are aligned; need to compute rem |
@@ -376,6 +381,7 @@ | |||
376 | bne rem, len, 1b | 381 | bne rem, len, 1b |
377 | .set noreorder | 382 | .set noreorder |
378 | 383 | ||
384 | #ifndef CONFIG_CPU_MIPSR6 | ||
379 | /* | 385 | /* |
380 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) | 386 | * src and dst are aligned, need to copy rem bytes (rem < NBYTES) |
381 | * A loop would do only a byte at a time with possible branch | 387 | * A loop would do only a byte at a time with possible branch |
@@ -477,6 +483,7 @@ | |||
477 | bne len, rem, 1b | 483 | bne len, rem, 1b |
478 | .set noreorder | 484 | .set noreorder |
479 | 485 | ||
486 | #endif /* !CONFIG_CPU_MIPSR6 */ | ||
480 | .Lcopy_bytes_checklen\@: | 487 | .Lcopy_bytes_checklen\@: |
481 | beqz len, .Ldone\@ | 488 | beqz len, .Ldone\@ |
482 | nop | 489 | nop |
@@ -504,6 +511,22 @@ | |||
504 | .Ldone\@: | 511 | .Ldone\@: |
505 | jr ra | 512 | jr ra |
506 | nop | 513 | nop |
514 | |||
515 | #ifdef CONFIG_CPU_MIPSR6 | ||
516 | .Lcopy_unaligned_bytes\@: | ||
517 | 1: | ||
518 | COPY_BYTE(0) | ||
519 | COPY_BYTE(1) | ||
520 | COPY_BYTE(2) | ||
521 | COPY_BYTE(3) | ||
522 | COPY_BYTE(4) | ||
523 | COPY_BYTE(5) | ||
524 | COPY_BYTE(6) | ||
525 | COPY_BYTE(7) | ||
526 | ADD src, src, 8 | ||
527 | b 1b | ||
528 | ADD dst, dst, 8 | ||
529 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
507 | .if __memcpy == 1 | 530 | .if __memcpy == 1 |
508 | END(memcpy) | 531 | END(memcpy) |
509 | .set __memcpy, 0 | 532 | .set __memcpy, 0 |
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index c8fe6b1968fb..b8e63fd00375 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S | |||
@@ -111,6 +111,7 @@ | |||
111 | .set at | 111 | .set at |
112 | #endif | 112 | #endif |
113 | 113 | ||
114 | #ifndef CONFIG_CPU_MIPSR6 | ||
114 | R10KCBARRIER(0(ra)) | 115 | R10KCBARRIER(0(ra)) |
115 | #ifdef __MIPSEB__ | 116 | #ifdef __MIPSEB__ |
116 | EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ | 117 | EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ |
@@ -120,6 +121,30 @@ | |||
120 | PTR_SUBU a0, t0 /* long align ptr */ | 121 | PTR_SUBU a0, t0 /* long align ptr */ |
121 | PTR_ADDU a2, t0 /* correct size */ | 122 | PTR_ADDU a2, t0 /* correct size */ |
122 | 123 | ||
124 | #else /* CONFIG_CPU_MIPSR6 */ | ||
125 | #define STORE_BYTE(N) \ | ||
126 | EX(sb, a1, N(a0), .Lbyte_fixup\@); \ | ||
127 | beqz t0, 0f; \ | ||
128 | PTR_ADDU t0, 1; | ||
129 | |||
130 | PTR_ADDU a2, t0 /* correct size */ | ||
131 | PTR_ADDU t0, 1 | ||
132 | STORE_BYTE(0) | ||
133 | STORE_BYTE(1) | ||
134 | #if LONGSIZE == 4 | ||
135 | EX(sb, a1, 2(a0), .Lbyte_fixup\@) | ||
136 | #else | ||
137 | STORE_BYTE(2) | ||
138 | STORE_BYTE(3) | ||
139 | STORE_BYTE(4) | ||
140 | STORE_BYTE(5) | ||
141 | EX(sb, a1, 6(a0), .Lbyte_fixup\@) | ||
142 | #endif | ||
143 | 0: | ||
144 | ori a0, STORMASK | ||
145 | xori a0, STORMASK | ||
146 | PTR_ADDIU a0, STORSIZE | ||
147 | #endif /* CONFIG_CPU_MIPSR6 */ | ||
123 | 1: ori t1, a2, 0x3f /* # of full blocks */ | 148 | 1: ori t1, a2, 0x3f /* # of full blocks */ |
124 | xori t1, 0x3f | 149 | xori t1, 0x3f |
125 | beqz t1, .Lmemset_partial\@ /* no block to fill */ | 150 | beqz t1, .Lmemset_partial\@ /* no block to fill */ |
@@ -159,6 +184,7 @@ | |||
159 | andi a2, STORMASK /* At most one long to go */ | 184 | andi a2, STORMASK /* At most one long to go */ |
160 | 185 | ||
161 | beqz a2, 1f | 186 | beqz a2, 1f |
187 | #ifndef CONFIG_CPU_MIPSR6 | ||
162 | PTR_ADDU a0, a2 /* What's left */ | 188 | PTR_ADDU a0, a2 /* What's left */ |
163 | R10KCBARRIER(0(ra)) | 189 | R10KCBARRIER(0(ra)) |
164 | #ifdef __MIPSEB__ | 190 | #ifdef __MIPSEB__ |
@@ -166,6 +192,22 @@ | |||
166 | #else | 192 | #else |
167 | EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) | 193 | EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) |
168 | #endif | 194 | #endif |
195 | #else | ||
196 | PTR_SUBU t0, $0, a2 | ||
197 | PTR_ADDIU t0, 1 | ||
198 | STORE_BYTE(0) | ||
199 | STORE_BYTE(1) | ||
200 | #if LONGSIZE == 4 | ||
201 | EX(sb, a1, 2(a0), .Lbyte_fixup\@) | ||
202 | #else | ||
203 | STORE_BYTE(2) | ||
204 | STORE_BYTE(3) | ||
205 | STORE_BYTE(4) | ||
206 | STORE_BYTE(5) | ||
207 | EX(sb, a1, 6(a0), .Lbyte_fixup\@) | ||
208 | #endif | ||
209 | 0: | ||
210 | #endif | ||
169 | 1: jr ra | 211 | 1: jr ra |
170 | move a2, zero | 212 | move a2, zero |
171 | 213 | ||
@@ -186,6 +228,11 @@ | |||
186 | .hidden __memset | 228 | .hidden __memset |
187 | .endif | 229 | .endif |
188 | 230 | ||
231 | .Lbyte_fixup\@: | ||
232 | PTR_SUBU a2, $0, t0 | ||
233 | jr ra | ||
234 | PTR_ADDIU a2, 1 | ||
235 | |||
189 | .Lfirst_fixup\@: | 236 | .Lfirst_fixup\@: |
190 | jr ra | 237 | jr ra |
191 | nop | 238 | nop |
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index be777d9a3f85..272af8ac2425 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/export.h> | 15 | #include <linux/export.h> |
16 | #include <linux/stringify.h> | 16 | #include <linux/stringify.h> |
17 | 17 | ||
18 | #ifndef CONFIG_CPU_MIPSR2 | 18 | #if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6) |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * For cli() we have to insert nops to make sure that the new value | 21 | * For cli() we have to insert nops to make sure that the new value |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 9dfcd7fc1bc3..b30bf65c7d7d 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <asm/processor.h> | 48 | #include <asm/processor.h> |
49 | #include <asm/fpu_emulator.h> | 49 | #include <asm/fpu_emulator.h> |
50 | #include <asm/fpu.h> | 50 | #include <asm/fpu.h> |
51 | #include <asm/mips-r2-to-r6-emul.h> | ||
51 | 52 | ||
52 | #include "ieee754.h" | 53 | #include "ieee754.h" |
53 | 54 | ||
@@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *, | |||
68 | #define modeindex(v) ((v) & FPU_CSR_RM) | 69 | #define modeindex(v) ((v) & FPU_CSR_RM) |
69 | 70 | ||
70 | /* convert condition code register number to csr bit */ | 71 | /* convert condition code register number to csr bit */ |
71 | static const unsigned int fpucondbit[8] = { | 72 | const unsigned int fpucondbit[8] = { |
72 | FPU_CSR_COND0, | 73 | FPU_CSR_COND0, |
73 | FPU_CSR_COND1, | 74 | FPU_CSR_COND1, |
74 | FPU_CSR_COND2, | 75 | FPU_CSR_COND2, |
@@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
448 | dec_insn.next_pc_inc; | 449 | dec_insn.next_pc_inc; |
449 | /* Fall through */ | 450 | /* Fall through */ |
450 | case jr_op: | 451 | case jr_op: |
452 | /* For R6, JR already emulated in jalr_op */ | ||
453 | if (NO_R6EMU && insn.r_format.opcode == jr_op) | ||
454 | break; | ||
451 | *contpc = regs->regs[insn.r_format.rs]; | 455 | *contpc = regs->regs[insn.r_format.rs]; |
452 | return 1; | 456 | return 1; |
453 | } | 457 | } |
@@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
456 | switch (insn.i_format.rt) { | 460 | switch (insn.i_format.rt) { |
457 | case bltzal_op: | 461 | case bltzal_op: |
458 | case bltzall_op: | 462 | case bltzall_op: |
463 | if (NO_R6EMU && (insn.i_format.rs || | ||
464 | insn.i_format.rt == bltzall_op)) | ||
465 | break; | ||
466 | |||
459 | regs->regs[31] = regs->cp0_epc + | 467 | regs->regs[31] = regs->cp0_epc + |
460 | dec_insn.pc_inc + | 468 | dec_insn.pc_inc + |
461 | dec_insn.next_pc_inc; | 469 | dec_insn.next_pc_inc; |
462 | /* Fall through */ | 470 | /* Fall through */ |
463 | case bltz_op: | ||
464 | case bltzl_op: | 471 | case bltzl_op: |
472 | if (NO_R6EMU) | ||
473 | break; | ||
474 | case bltz_op: | ||
465 | if ((long)regs->regs[insn.i_format.rs] < 0) | 475 | if ((long)regs->regs[insn.i_format.rs] < 0) |
466 | *contpc = regs->cp0_epc + | 476 | *contpc = regs->cp0_epc + |
467 | dec_insn.pc_inc + | 477 | dec_insn.pc_inc + |
@@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
473 | return 1; | 483 | return 1; |
474 | case bgezal_op: | 484 | case bgezal_op: |
475 | case bgezall_op: | 485 | case bgezall_op: |
486 | if (NO_R6EMU && (insn.i_format.rs || | ||
487 | insn.i_format.rt == bgezall_op)) | ||
488 | break; | ||
489 | |||
476 | regs->regs[31] = regs->cp0_epc + | 490 | regs->regs[31] = regs->cp0_epc + |
477 | dec_insn.pc_inc + | 491 | dec_insn.pc_inc + |
478 | dec_insn.next_pc_inc; | 492 | dec_insn.next_pc_inc; |
479 | /* Fall through */ | 493 | /* Fall through */ |
480 | case bgez_op: | ||
481 | case bgezl_op: | 494 | case bgezl_op: |
495 | if (NO_R6EMU) | ||
496 | break; | ||
497 | case bgez_op: | ||
482 | if ((long)regs->regs[insn.i_format.rs] >= 0) | 498 | if ((long)regs->regs[insn.i_format.rs] >= 0) |
483 | *contpc = regs->cp0_epc + | 499 | *contpc = regs->cp0_epc + |
484 | dec_insn.pc_inc + | 500 | dec_insn.pc_inc + |
@@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
505 | /* Set microMIPS mode bit: XOR for jalx. */ | 521 | /* Set microMIPS mode bit: XOR for jalx. */ |
506 | *contpc ^= bit; | 522 | *contpc ^= bit; |
507 | return 1; | 523 | return 1; |
508 | case beq_op: | ||
509 | case beql_op: | 524 | case beql_op: |
525 | if (NO_R6EMU) | ||
526 | break; | ||
527 | case beq_op: | ||
510 | if (regs->regs[insn.i_format.rs] == | 528 | if (regs->regs[insn.i_format.rs] == |
511 | regs->regs[insn.i_format.rt]) | 529 | regs->regs[insn.i_format.rt]) |
512 | *contpc = regs->cp0_epc + | 530 | *contpc = regs->cp0_epc + |
@@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
517 | dec_insn.pc_inc + | 535 | dec_insn.pc_inc + |
518 | dec_insn.next_pc_inc; | 536 | dec_insn.next_pc_inc; |
519 | return 1; | 537 | return 1; |
520 | case bne_op: | ||
521 | case bnel_op: | 538 | case bnel_op: |
539 | if (NO_R6EMU) | ||
540 | break; | ||
541 | case bne_op: | ||
522 | if (regs->regs[insn.i_format.rs] != | 542 | if (regs->regs[insn.i_format.rs] != |
523 | regs->regs[insn.i_format.rt]) | 543 | regs->regs[insn.i_format.rt]) |
524 | *contpc = regs->cp0_epc + | 544 | *contpc = regs->cp0_epc + |
@@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
529 | dec_insn.pc_inc + | 549 | dec_insn.pc_inc + |
530 | dec_insn.next_pc_inc; | 550 | dec_insn.next_pc_inc; |
531 | return 1; | 551 | return 1; |
532 | case blez_op: | ||
533 | case blezl_op: | 552 | case blezl_op: |
553 | if (NO_R6EMU) | ||
554 | break; | ||
555 | case blez_op: | ||
556 | |||
557 | /* | ||
558 | * Compact branches for R6 for the | ||
559 | * blez and blezl opcodes. | ||
560 | * BLEZ | rs = 0 | rt != 0 == BLEZALC | ||
561 | * BLEZ | rs = rt != 0 == BGEZALC | ||
562 | * BLEZ | rs != 0 | rt != 0 == BGEUC | ||
563 | * BLEZL | rs = 0 | rt != 0 == BLEZC | ||
564 | * BLEZL | rs = rt != 0 == BGEZC | ||
565 | * BLEZL | rs != 0 | rt != 0 == BGEC | ||
566 | * | ||
567 | * For real BLEZ{,L}, rt is always 0. | ||
568 | */ | ||
569 | if (cpu_has_mips_r6 && insn.i_format.rt) { | ||
570 | if ((insn.i_format.opcode == blez_op) && | ||
571 | ((!insn.i_format.rs && insn.i_format.rt) || | ||
572 | (insn.i_format.rs == insn.i_format.rt))) | ||
573 | regs->regs[31] = regs->cp0_epc + | ||
574 | dec_insn.pc_inc; | ||
575 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
576 | dec_insn.next_pc_inc; | ||
577 | |||
578 | return 1; | ||
579 | } | ||
534 | if ((long)regs->regs[insn.i_format.rs] <= 0) | 580 | if ((long)regs->regs[insn.i_format.rs] <= 0) |
535 | *contpc = regs->cp0_epc + | 581 | *contpc = regs->cp0_epc + |
536 | dec_insn.pc_inc + | 582 | dec_insn.pc_inc + |
@@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
540 | dec_insn.pc_inc + | 586 | dec_insn.pc_inc + |
541 | dec_insn.next_pc_inc; | 587 | dec_insn.next_pc_inc; |
542 | return 1; | 588 | return 1; |
543 | case bgtz_op: | ||
544 | case bgtzl_op: | 589 | case bgtzl_op: |
590 | if (NO_R6EMU) | ||
591 | break; | ||
592 | case bgtz_op: | ||
593 | /* | ||
594 | * Compact branches for R6 for the | ||
595 | * bgtz and bgtzl opcodes. | ||
596 | * BGTZ | rs = 0 | rt != 0 == BGTZALC | ||
597 | * BGTZ | rs = rt != 0 == BLTZALC | ||
598 | * BGTZ | rs != 0 | rt != 0 == BLTUC | ||
599 | * BGTZL | rs = 0 | rt != 0 == BGTZC | ||
600 | * BGTZL | rs = rt != 0 == BLTZC | ||
601 | * BGTZL | rs != 0 | rt != 0 == BLTC | ||
602 | * | ||
603 | * *ZALC varint for BGTZ &&& rt != 0 | ||
604 | * For real GTZ{,L}, rt is always 0. | ||
605 | */ | ||
606 | if (cpu_has_mips_r6 && insn.i_format.rt) { | ||
607 | if ((insn.i_format.opcode == blez_op) && | ||
608 | ((!insn.i_format.rs && insn.i_format.rt) || | ||
609 | (insn.i_format.rs == insn.i_format.rt))) | ||
610 | regs->regs[31] = regs->cp0_epc + | ||
611 | dec_insn.pc_inc; | ||
612 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
613 | dec_insn.next_pc_inc; | ||
614 | |||
615 | return 1; | ||
616 | } | ||
617 | |||
545 | if ((long)regs->regs[insn.i_format.rs] > 0) | 618 | if ((long)regs->regs[insn.i_format.rs] > 0) |
546 | *contpc = regs->cp0_epc + | 619 | *contpc = regs->cp0_epc + |
547 | dec_insn.pc_inc + | 620 | dec_insn.pc_inc + |
@@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
551 | dec_insn.pc_inc + | 624 | dec_insn.pc_inc + |
552 | dec_insn.next_pc_inc; | 625 | dec_insn.next_pc_inc; |
553 | return 1; | 626 | return 1; |
627 | case cbcond0_op: | ||
628 | case cbcond1_op: | ||
629 | if (!cpu_has_mips_r6) | ||
630 | break; | ||
631 | if (insn.i_format.rt && !insn.i_format.rs) | ||
632 | regs->regs[31] = regs->cp0_epc + 4; | ||
633 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
634 | dec_insn.next_pc_inc; | ||
635 | |||
636 | return 1; | ||
554 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | 637 | #ifdef CONFIG_CPU_CAVIUM_OCTEON |
555 | case lwc2_op: /* This is bbit0 on Octeon */ | 638 | case lwc2_op: /* This is bbit0 on Octeon */ |
556 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) | 639 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) |
@@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
576 | else | 659 | else |
577 | *contpc = regs->cp0_epc + 8; | 660 | *contpc = regs->cp0_epc + 8; |
578 | return 1; | 661 | return 1; |
662 | #else | ||
663 | case bc6_op: | ||
664 | /* | ||
665 | * Only valid for MIPS R6 but we can still end up | ||
666 | * here from a broken userland so just tell emulator | ||
667 | * this is not a branch and let it break later on. | ||
668 | */ | ||
669 | if (!cpu_has_mips_r6) | ||
670 | break; | ||
671 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
672 | dec_insn.next_pc_inc; | ||
673 | |||
674 | return 1; | ||
675 | case balc6_op: | ||
676 | if (!cpu_has_mips_r6) | ||
677 | break; | ||
678 | regs->regs[31] = regs->cp0_epc + 4; | ||
679 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
680 | dec_insn.next_pc_inc; | ||
681 | |||
682 | return 1; | ||
683 | case beqzcjic_op: | ||
684 | if (!cpu_has_mips_r6) | ||
685 | break; | ||
686 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
687 | dec_insn.next_pc_inc; | ||
688 | |||
689 | return 1; | ||
690 | case bnezcjialc_op: | ||
691 | if (!cpu_has_mips_r6) | ||
692 | break; | ||
693 | if (!insn.i_format.rs) | ||
694 | regs->regs[31] = regs->cp0_epc + 4; | ||
695 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
696 | dec_insn.next_pc_inc; | ||
697 | |||
698 | return 1; | ||
579 | #endif | 699 | #endif |
580 | case cop0_op: | 700 | case cop0_op: |
581 | case cop1_op: | 701 | case cop1_op: |
702 | /* Need to check for R6 bc1nez and bc1eqz branches */ | ||
703 | if (cpu_has_mips_r6 && | ||
704 | ((insn.i_format.rs == bc1eqz_op) || | ||
705 | (insn.i_format.rs == bc1nez_op))) { | ||
706 | bit = 0; | ||
707 | switch (insn.i_format.rs) { | ||
708 | case bc1eqz_op: | ||
709 | if (get_fpr32(¤t->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1) | ||
710 | bit = 1; | ||
711 | break; | ||
712 | case bc1nez_op: | ||
713 | if (!(get_fpr32(¤t->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)) | ||
714 | bit = 1; | ||
715 | break; | ||
716 | } | ||
717 | if (bit) | ||
718 | *contpc = regs->cp0_epc + | ||
719 | dec_insn.pc_inc + | ||
720 | (insn.i_format.simmediate << 2); | ||
721 | else | ||
722 | *contpc = regs->cp0_epc + | ||
723 | dec_insn.pc_inc + | ||
724 | dec_insn.next_pc_inc; | ||
725 | |||
726 | return 1; | ||
727 | } | ||
728 | /* R2/R6 compatible cop1 instruction. Fall through */ | ||
582 | case cop2_op: | 729 | case cop2_op: |
583 | case cop1x_op: | 730 | case cop1x_op: |
584 | if (insn.i_format.rs == bc_op) { | 731 | if (insn.i_format.rs == bc_op) { |
@@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, | |||
1414 | * achieve full IEEE-754 accuracy - however this emulator does. | 1561 | * achieve full IEEE-754 accuracy - however this emulator does. |
1415 | */ | 1562 | */ |
1416 | case frsqrt_op: | 1563 | case frsqrt_op: |
1417 | if (!cpu_has_mips_4_5_r2) | 1564 | if (!cpu_has_mips_4_5_r2_r6) |
1418 | return SIGILL; | 1565 | return SIGILL; |
1419 | 1566 | ||
1420 | handler.u = fpemu_sp_rsqrt; | 1567 | handler.u = fpemu_sp_rsqrt; |
1421 | goto scopuop; | 1568 | goto scopuop; |
1422 | 1569 | ||
1423 | case frecip_op: | 1570 | case frecip_op: |
1424 | if (!cpu_has_mips_4_5_r2) | 1571 | if (!cpu_has_mips_4_5_r2_r6) |
1425 | return SIGILL; | 1572 | return SIGILL; |
1426 | 1573 | ||
1427 | handler.u = fpemu_sp_recip; | 1574 | handler.u = fpemu_sp_recip; |
@@ -1616,13 +1763,13 @@ copcsr: | |||
1616 | * achieve full IEEE-754 accuracy - however this emulator does. | 1763 | * achieve full IEEE-754 accuracy - however this emulator does. |
1617 | */ | 1764 | */ |
1618 | case frsqrt_op: | 1765 | case frsqrt_op: |
1619 | if (!cpu_has_mips_4_5_r2) | 1766 | if (!cpu_has_mips_4_5_r2_r6) |
1620 | return SIGILL; | 1767 | return SIGILL; |
1621 | 1768 | ||
1622 | handler.u = fpemu_dp_rsqrt; | 1769 | handler.u = fpemu_dp_rsqrt; |
1623 | goto dcopuop; | 1770 | goto dcopuop; |
1624 | case frecip_op: | 1771 | case frecip_op: |
1625 | if (!cpu_has_mips_4_5_r2) | 1772 | if (!cpu_has_mips_4_5_r2_r6) |
1626 | return SIGILL; | 1773 | return SIGILL; |
1627 | 1774 | ||
1628 | handler.u = fpemu_dp_recip; | 1775 | handler.u = fpemu_dp_recip; |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index dd261df005c2..3f8059602765 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) | |||
794 | __asm__ __volatile__ ( | 794 | __asm__ __volatile__ ( |
795 | ".set push\n\t" | 795 | ".set push\n\t" |
796 | ".set noat\n\t" | 796 | ".set noat\n\t" |
797 | ".set mips3\n\t" | 797 | ".set "MIPS_ISA_LEVEL"\n\t" |
798 | #ifdef CONFIG_32BIT | 798 | #ifdef CONFIG_32BIT |
799 | "la $at,1f\n\t" | 799 | "la $at,1f\n\t" |
800 | #endif | 800 | #endif |
@@ -1255,6 +1255,7 @@ static void probe_pcache(void) | |||
1255 | case CPU_P5600: | 1255 | case CPU_P5600: |
1256 | case CPU_PROAPTIV: | 1256 | case CPU_PROAPTIV: |
1257 | case CPU_M5150: | 1257 | case CPU_M5150: |
1258 | case CPU_QEMU_GENERIC: | ||
1258 | if (!(read_c0_config7() & MIPS_CONF7_IAR) && | 1259 | if (!(read_c0_config7() & MIPS_CONF7_IAR) && |
1259 | (c->icache.waysize > PAGE_SIZE)) | 1260 | (c->icache.waysize > PAGE_SIZE)) |
1260 | c->icache.flags |= MIPS_CACHE_ALIASES; | 1261 | c->icache.flags |= MIPS_CACHE_ALIASES; |
@@ -1472,7 +1473,8 @@ static void setup_scache(void) | |||
1472 | 1473 | ||
1473 | default: | 1474 | default: |
1474 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | | 1475 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
1475 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { | 1476 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | |
1477 | MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) { | ||
1476 | #ifdef CONFIG_MIPS_CPU_SCACHE | 1478 | #ifdef CONFIG_MIPS_CPU_SCACHE |
1477 | if (mips_sc_init ()) { | 1479 | if (mips_sc_init ()) { |
1478 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; | 1480 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 70ab5d664332..7ff8637e530d 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/ptrace.h> | 16 | #include <linux/ptrace.h> |
17 | #include <linux/ratelimit.h> | ||
17 | #include <linux/mman.h> | 18 | #include <linux/mman.h> |
18 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
19 | #include <linux/smp.h> | 20 | #include <linux/smp.h> |
@@ -28,6 +29,8 @@ | |||
28 | #include <asm/highmem.h> /* For VMALLOC_END */ | 29 | #include <asm/highmem.h> /* For VMALLOC_END */ |
29 | #include <linux/kdebug.h> | 30 | #include <linux/kdebug.h> |
30 | 31 | ||
32 | int show_unhandled_signals = 1; | ||
33 | |||
31 | /* | 34 | /* |
32 | * This routine handles page faults. It determines the address, | 35 | * This routine handles page faults. It determines the address, |
33 | * and the problem, and then passes it off to one of the appropriate | 36 | * and the problem, and then passes it off to one of the appropriate |
@@ -44,6 +47,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, | |||
44 | int fault; | 47 | int fault; |
45 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 48 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
46 | 49 | ||
50 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | ||
51 | |||
47 | #if 0 | 52 | #if 0 |
48 | printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), | 53 | printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), |
49 | current->comm, current->pid, field, address, write, | 54 | current->comm, current->pid, field, address, write, |
@@ -203,15 +208,21 @@ bad_area_nosemaphore: | |||
203 | if (user_mode(regs)) { | 208 | if (user_mode(regs)) { |
204 | tsk->thread.cp0_badvaddr = address; | 209 | tsk->thread.cp0_badvaddr = address; |
205 | tsk->thread.error_code = write; | 210 | tsk->thread.error_code = write; |
206 | #if 0 | 211 | if (show_unhandled_signals && |
207 | printk("do_page_fault() #2: sending SIGSEGV to %s for " | 212 | unhandled_signal(tsk, SIGSEGV) && |
208 | "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", | 213 | __ratelimit(&ratelimit_state)) { |
209 | tsk->comm, | 214 | pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", |
210 | write ? "write access to" : "read access from", | 215 | tsk->comm, |
211 | field, address, | 216 | write ? "write access to" : "read access from", |
212 | field, (unsigned long) regs->cp0_epc, | 217 | field, address); |
213 | field, (unsigned long) regs->regs[31]); | 218 | pr_info("epc = %0*lx in", field, |
214 | #endif | 219 | (unsigned long) regs->cp0_epc); |
220 | print_vma_addr(" ", regs->cp0_epc); | ||
221 | pr_info("ra = %0*lx in", field, | ||
222 | (unsigned long) regs->regs[31]); | ||
223 | print_vma_addr(" ", regs->regs[31]); | ||
224 | pr_info("\n"); | ||
225 | } | ||
215 | info.si_signo = SIGSEGV; | 226 | info.si_signo = SIGSEGV; |
216 | info.si_errno = 0; | 227 | info.si_errno = 0; |
217 | /* info.si_code has been set above */ | 228 | /* info.si_code has been set above */ |
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index b611102e23b5..3f85f921801b 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c | |||
@@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5]; | |||
72 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) | 72 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) |
73 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) | 73 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) |
74 | 74 | ||
75 | /* | ||
76 | * R6 has a limited offset of the pref instruction. | ||
77 | * Skip it if the offset is more than 9 bits. | ||
78 | */ | ||
79 | #define _uasm_i_pref(a, b, c, d) \ | ||
80 | do { \ | ||
81 | if (cpu_has_mips_r6) { \ | ||
82 | if (c <= 0xff && c >= -0x100) \ | ||
83 | uasm_i_pref(a, b, c, d);\ | ||
84 | } else { \ | ||
85 | uasm_i_pref(a, b, c, d); \ | ||
86 | } \ | ||
87 | } while(0) | ||
88 | |||
75 | static int pref_bias_clear_store; | 89 | static int pref_bias_clear_store; |
76 | static int pref_bias_copy_load; | 90 | static int pref_bias_copy_load; |
77 | static int pref_bias_copy_store; | 91 | static int pref_bias_copy_store; |
@@ -178,7 +192,15 @@ static void set_prefetch_parameters(void) | |||
178 | pref_bias_copy_load = 256; | 192 | pref_bias_copy_load = 256; |
179 | pref_bias_copy_store = 128; | 193 | pref_bias_copy_store = 128; |
180 | pref_src_mode = Pref_LoadStreamed; | 194 | pref_src_mode = Pref_LoadStreamed; |
181 | pref_dst_mode = Pref_PrepareForStore; | 195 | if (cpu_has_mips_r6) |
196 | /* | ||
197 | * Bit 30 (Pref_PrepareForStore) has been | ||
198 | * removed from MIPS R6. Use bit 5 | ||
199 | * (Pref_StoreStreamed). | ||
200 | */ | ||
201 | pref_dst_mode = Pref_StoreStreamed; | ||
202 | else | ||
203 | pref_dst_mode = Pref_PrepareForStore; | ||
182 | break; | 204 | break; |
183 | } | 205 | } |
184 | } else { | 206 | } else { |
@@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off) | |||
214 | return; | 236 | return; |
215 | 237 | ||
216 | if (pref_bias_clear_store) { | 238 | if (pref_bias_clear_store) { |
217 | uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, | 239 | _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, |
218 | A0); | 240 | A0); |
219 | } else if (cache_line_size == (half_clear_loop_size << 1)) { | 241 | } else if (cache_line_size == (half_clear_loop_size << 1)) { |
220 | if (cpu_has_cache_cdex_s) { | 242 | if (cpu_has_cache_cdex_s) { |
@@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off) | |||
357 | return; | 379 | return; |
358 | 380 | ||
359 | if (pref_bias_copy_load) | 381 | if (pref_bias_copy_load) |
360 | uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); | 382 | _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); |
361 | } | 383 | } |
362 | 384 | ||
363 | static inline void build_copy_store_pref(u32 **buf, int off) | 385 | static inline void build_copy_store_pref(u32 **buf, int off) |
@@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) | |||
366 | return; | 388 | return; |
367 | 389 | ||
368 | if (pref_bias_copy_store) { | 390 | if (pref_bias_copy_store) { |
369 | uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, | 391 | _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, |
370 | A0); | 392 | A0); |
371 | } else if (cache_line_size == (half_copy_loop_size << 1)) { | 393 | } else if (cache_line_size == (half_copy_loop_size << 1)) { |
372 | if (cpu_has_cache_cdex_s) { | 394 | if (cpu_has_cache_cdex_s) { |
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 99eb8fabab60..4ceafd13870c 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c | |||
@@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) | |||
81 | case CPU_PROAPTIV: | 81 | case CPU_PROAPTIV: |
82 | case CPU_P5600: | 82 | case CPU_P5600: |
83 | case CPU_BMIPS5000: | 83 | case CPU_BMIPS5000: |
84 | case CPU_QEMU_GENERIC: | ||
84 | if (config2 & (1 << 12)) | 85 | if (config2 & (1 << 12)) |
85 | return 0; | 86 | return 0; |
86 | } | 87 | } |
@@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void) | |||
104 | 105 | ||
105 | /* Ignore anything but MIPSxx processors */ | 106 | /* Ignore anything but MIPSxx processors */ |
106 | if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | | 107 | if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | |
107 | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) | 108 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | |
109 | MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6))) | ||
108 | return 0; | 110 | return 0; |
109 | 111 | ||
110 | /* Does this MIPS32/MIPS64 CPU have a config2 register? */ | 112 | /* Does this MIPS32/MIPS64 CPU have a config2 register? */ |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 30639a6e9b8c..b2afa49beab0 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -485,13 +485,11 @@ static void r4k_tlb_configure(void) | |||
485 | * Enable the no read, no exec bits, and enable large virtual | 485 | * Enable the no read, no exec bits, and enable large virtual |
486 | * address. | 486 | * address. |
487 | */ | 487 | */ |
488 | u32 pg = PG_RIE | PG_XIE; | ||
489 | #ifdef CONFIG_64BIT | 488 | #ifdef CONFIG_64BIT |
490 | pg |= PG_ELPA; | 489 | set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); |
490 | #else | ||
491 | set_c0_pagegrain(PG_RIE | PG_XIE); | ||
491 | #endif | 492 | #endif |
492 | if (cpu_has_rixiex) | ||
493 | pg |= PG_IEC; | ||
494 | write_c0_pagegrain(pg); | ||
495 | } | 493 | } |
496 | 494 | ||
497 | temp_tlb_entry = current_cpu_data.tlbsize - 1; | 495 | temp_tlb_entry = current_cpu_data.tlbsize - 1; |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 3978a3d81366..d75ff73a2012 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
501 | case tlb_indexed: tlbw = uasm_i_tlbwi; break; | 501 | case tlb_indexed: tlbw = uasm_i_tlbwi; break; |
502 | } | 502 | } |
503 | 503 | ||
504 | if (cpu_has_mips_r2) { | 504 | if (cpu_has_mips_r2_exec_hazard) { |
505 | /* | 505 | /* |
506 | * The architecture spec says an ehb is required here, | 506 | * The architecture spec says an ehb is required here, |
507 | * but a number of cores do not have the hazard and | 507 | * but a number of cores do not have the hazard and |
@@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
514 | case CPU_PROAPTIV: | 514 | case CPU_PROAPTIV: |
515 | case CPU_P5600: | 515 | case CPU_P5600: |
516 | case CPU_M5150: | 516 | case CPU_M5150: |
517 | case CPU_QEMU_GENERIC: | ||
517 | break; | 518 | break; |
518 | 519 | ||
519 | default: | 520 | default: |
@@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void) | |||
1952 | 1953 | ||
1953 | switch (current_cpu_type()) { | 1954 | switch (current_cpu_type()) { |
1954 | default: | 1955 | default: |
1955 | if (cpu_has_mips_r2) { | 1956 | if (cpu_has_mips_r2_exec_hazard) { |
1956 | uasm_i_ehb(&p); | 1957 | uasm_i_ehb(&p); |
1957 | 1958 | ||
1958 | case CPU_CAVIUM_OCTEON: | 1959 | case CPU_CAVIUM_OCTEON: |
@@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void) | |||
2019 | 2020 | ||
2020 | switch (current_cpu_type()) { | 2021 | switch (current_cpu_type()) { |
2021 | default: | 2022 | default: |
2022 | if (cpu_has_mips_r2) { | 2023 | if (cpu_has_mips_r2_exec_hazard) { |
2023 | uasm_i_ehb(&p); | 2024 | uasm_i_ehb(&p); |
2024 | 2025 | ||
2025 | case CPU_CAVIUM_OCTEON: | 2026 | case CPU_CAVIUM_OCTEON: |
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 8399ddf03a02..d78178daea4b 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c | |||
@@ -38,14 +38,6 @@ | |||
38 | | (e) << RE_SH \ | 38 | | (e) << RE_SH \ |
39 | | (f) << FUNC_SH) | 39 | | (f) << FUNC_SH) |
40 | 40 | ||
41 | /* Define these when we are not the ISA the kernel is being compiled with. */ | ||
42 | #ifndef CONFIG_CPU_MICROMIPS | ||
43 | #define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) | ||
44 | #define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) | ||
45 | #define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) | ||
46 | #define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) | ||
47 | #endif | ||
48 | |||
49 | #include "uasm.c" | 41 | #include "uasm.c" |
50 | 42 | ||
51 | static struct insn insn_table_MM[] = { | 43 | static struct insn insn_table_MM[] = { |
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 8e02291cfc0c..b4a837893562 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c | |||
@@ -38,13 +38,13 @@ | |||
38 | | (e) << RE_SH \ | 38 | | (e) << RE_SH \ |
39 | | (f) << FUNC_SH) | 39 | | (f) << FUNC_SH) |
40 | 40 | ||
41 | /* Define these when we are not the ISA the kernel is being compiled with. */ | 41 | /* This macro sets the non-variable bits of an R6 instruction. */ |
42 | #ifdef CONFIG_CPU_MICROMIPS | 42 | #define M6(a, b, c, d, e) \ |
43 | #define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) | 43 | ((a) << OP_SH \ |
44 | #define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) | 44 | | (b) << RS_SH \ |
45 | #define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) | 45 | | (c) << RT_SH \ |
46 | #define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) | 46 | | (d) << SIMM9_SH \ |
47 | #endif | 47 | | (e) << FUNC_SH) |
48 | 48 | ||
49 | #include "uasm.c" | 49 | #include "uasm.c" |
50 | 50 | ||
@@ -62,7 +62,11 @@ static struct insn insn_table[] = { | |||
62 | { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, | 62 | { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, |
63 | { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, | 63 | { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, |
64 | { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, | 64 | { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, |
65 | #ifndef CONFIG_CPU_MIPSR6 | ||
65 | { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 66 | { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
67 | #else | ||
68 | { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, | ||
69 | #endif | ||
66 | { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 70 | { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
67 | { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, | 71 | { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, |
68 | { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, | 72 | { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, |
@@ -85,13 +89,22 @@ static struct insn insn_table[] = { | |||
85 | { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, | 89 | { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, |
86 | { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, | 90 | { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, |
87 | { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, | 91 | { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, |
92 | #ifndef CONFIG_CPU_MIPSR6 | ||
88 | { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, | 93 | { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, |
94 | #else | ||
95 | { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS }, | ||
96 | #endif | ||
89 | { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 97 | { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
90 | { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 98 | { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
91 | { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, | 99 | { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, |
92 | { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 100 | { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
101 | #ifndef CONFIG_CPU_MIPSR6 | ||
93 | { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 102 | { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
94 | { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 103 | { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
104 | #else | ||
105 | { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 }, | ||
106 | { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 }, | ||
107 | #endif | ||
95 | { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, | 108 | { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, |
96 | { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 109 | { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
97 | { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, | 110 | { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, |
@@ -104,11 +117,20 @@ static struct insn insn_table[] = { | |||
104 | { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, | 117 | { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, |
105 | { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, | 118 | { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, |
106 | { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, | 119 | { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, |
120 | #ifndef CONFIG_CPU_MIPSR6 | ||
107 | { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 121 | { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
122 | #else | ||
123 | { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 }, | ||
124 | #endif | ||
108 | { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, | 125 | { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, |
109 | { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, | 126 | { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, |
127 | #ifndef CONFIG_CPU_MIPSR6 | ||
110 | { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 128 | { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
111 | { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 129 | { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
130 | #else | ||
131 | { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 }, | ||
132 | { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 }, | ||
133 | #endif | ||
112 | { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 134 | { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
113 | { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, | 135 | { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, |
114 | { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, | 136 | { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, |
@@ -198,6 +220,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...) | |||
198 | op |= build_set(va_arg(ap, u32)); | 220 | op |= build_set(va_arg(ap, u32)); |
199 | if (ip->fields & SCIMM) | 221 | if (ip->fields & SCIMM) |
200 | op |= build_scimm(va_arg(ap, u32)); | 222 | op |= build_scimm(va_arg(ap, u32)); |
223 | if (ip->fields & SIMM9) | ||
224 | op |= build_scimm9(va_arg(ap, u32)); | ||
201 | va_end(ap); | 225 | va_end(ap); |
202 | 226 | ||
203 | **buf = op; | 227 | **buf = op; |
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 4adf30284813..319051c34343 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c | |||
@@ -24,7 +24,8 @@ enum fields { | |||
24 | JIMM = 0x080, | 24 | JIMM = 0x080, |
25 | FUNC = 0x100, | 25 | FUNC = 0x100, |
26 | SET = 0x200, | 26 | SET = 0x200, |
27 | SCIMM = 0x400 | 27 | SCIMM = 0x400, |
28 | SIMM9 = 0x800, | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | #define OP_MASK 0x3f | 31 | #define OP_MASK 0x3f |
@@ -41,6 +42,8 @@ enum fields { | |||
41 | #define FUNC_SH 0 | 42 | #define FUNC_SH 0 |
42 | #define SET_MASK 0x7 | 43 | #define SET_MASK 0x7 |
43 | #define SET_SH 0 | 44 | #define SET_SH 0 |
45 | #define SIMM9_SH 7 | ||
46 | #define SIMM9_MASK 0x1ff | ||
44 | 47 | ||
45 | enum opcode { | 48 | enum opcode { |
46 | insn_invalid, | 49 | insn_invalid, |
@@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg) | |||
116 | return (arg & SCIMM_MASK) << SCIMM_SH; | 119 | return (arg & SCIMM_MASK) << SCIMM_SH; |
117 | } | 120 | } |
118 | 121 | ||
122 | static inline u32 build_scimm9(s32 arg) | ||
123 | { | ||
124 | WARN((arg > 0xff || arg < -0x100), | ||
125 | KERN_WARNING "Micro-assembler field overflow\n"); | ||
126 | |||
127 | return (arg & SIMM9_MASK) << SIMM9_SH; | ||
128 | } | ||
129 | |||
119 | static inline u32 build_func(u32 arg) | 130 | static inline u32 build_func(u32 arg) |
120 | { | 131 | { |
121 | WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); | 132 | WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); |
@@ -330,7 +341,7 @@ I_u3u1u2(_ldx) | |||
330 | void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, | 341 | void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, |
331 | unsigned int c) | 342 | unsigned int c) |
332 | { | 343 | { |
333 | if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) | 344 | if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5) |
334 | /* | 345 | /* |
335 | * As per erratum Core-14449, replace prefetches 0-4, | 346 | * As per erratum Core-14449, replace prefetches 0-4, |
336 | * 6-24 with 'pref 28'. | 347 | * 6-24 with 'pref 28'. |
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index ec1dd2491f96..e1d69895fb1d 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c | |||
@@ -72,7 +72,7 @@ void read_persistent_clock(struct timespec *ts) | |||
72 | int get_c0_perfcount_int(void) | 72 | int get_c0_perfcount_int(void) |
73 | { | 73 | { |
74 | if (gic_present) | 74 | if (gic_present) |
75 | return gic_get_c0_compare_int(); | 75 | return gic_get_c0_perfcount_int(); |
76 | if (cp0_perfcount_irq >= 0) | 76 | if (cp0_perfcount_irq >= 0) |
77 | return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; | 77 | return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; |
78 | return -1; | 78 | return -1; |
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c index f2355e3e65a1..f97e169393bc 100644 --- a/arch/mips/pci/pci-bcm1480.c +++ b/arch/mips/pci/pci-bcm1480.c | |||
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn, | |||
173 | } | 173 | } |
174 | 174 | ||
175 | struct pci_ops bcm1480_pci_ops = { | 175 | struct pci_ops bcm1480_pci_ops = { |
176 | .read = bcm1480_pcibios_read, | 176 | .read = bcm1480_pcibios_read, |
177 | .write = bcm1480_pcibios_write, | 177 | .write = bcm1480_pcibios_write, |
178 | }; | 178 | }; |
179 | 179 | ||
180 | static struct resource bcm1480_mem_resource = { | 180 | static struct resource bcm1480_mem_resource = { |
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index bedb72bd3a27..a04af55d89f1 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c | |||
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, | |||
327 | 327 | ||
328 | 328 | ||
329 | static struct pci_ops octeon_pci_ops = { | 329 | static struct pci_ops octeon_pci_ops = { |
330 | .read = octeon_read_config, | 330 | .read = octeon_read_config, |
331 | .write = octeon_write_config, | 331 | .write = octeon_write_config, |
332 | }; | 332 | }; |
333 | 333 | ||
334 | static struct resource octeon_pci_mem_resource = { | 334 | static struct resource octeon_pci_mem_resource = { |
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index eb4a17ba4a53..1bb0b2bf8d6e 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c | |||
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn, | |||
1792 | } | 1792 | } |
1793 | 1793 | ||
1794 | static struct pci_ops octeon_pcie0_ops = { | 1794 | static struct pci_ops octeon_pcie0_ops = { |
1795 | .read = octeon_pcie0_read_config, | 1795 | .read = octeon_pcie0_read_config, |
1796 | .write = octeon_pcie0_write_config, | 1796 | .write = octeon_pcie0_write_config, |
1797 | }; | 1797 | }; |
1798 | 1798 | ||
1799 | static struct resource octeon_pcie0_mem_resource = { | 1799 | static struct resource octeon_pcie0_mem_resource = { |
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = { | |||
1813 | }; | 1813 | }; |
1814 | 1814 | ||
1815 | static struct pci_ops octeon_pcie1_ops = { | 1815 | static struct pci_ops octeon_pcie1_ops = { |
1816 | .read = octeon_pcie1_read_config, | 1816 | .read = octeon_pcie1_read_config, |
1817 | .write = octeon_pcie1_write_config, | 1817 | .write = octeon_pcie1_write_config, |
1818 | }; | 1818 | }; |
1819 | 1819 | ||
1820 | static struct resource octeon_pcie1_mem_resource = { | 1820 | static struct resource octeon_pcie1_mem_resource = { |
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = { | |||
1834 | }; | 1834 | }; |
1835 | 1835 | ||
1836 | static struct pci_ops octeon_dummy_ops = { | 1836 | static struct pci_ops octeon_dummy_ops = { |
1837 | .read = octeon_dummy_read_config, | 1837 | .read = octeon_dummy_read_config, |
1838 | .write = octeon_dummy_write_config, | 1838 | .write = octeon_dummy_write_config, |
1839 | }; | 1839 | }; |
1840 | 1840 | ||
1841 | static struct resource octeon_dummy_mem_resource = { | 1841 | static struct resource octeon_dummy_mem_resource = { |
diff --git a/arch/mips/pmcs-msp71xx/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig index 6073ca456d11..4190093d3053 100644 --- a/arch/mips/pmcs-msp71xx/Kconfig +++ b/arch/mips/pmcs-msp71xx/Kconfig | |||
@@ -36,14 +36,14 @@ config PMC_MSP7120_FPGA | |||
36 | endchoice | 36 | endchoice |
37 | 37 | ||
38 | config MSP_HAS_USB | 38 | config MSP_HAS_USB |
39 | boolean | 39 | bool |
40 | depends on PMC_MSP | 40 | depends on PMC_MSP |
41 | 41 | ||
42 | config MSP_ETH | 42 | config MSP_ETH |
43 | boolean | 43 | bool |
44 | select MSP_HAS_MAC | 44 | select MSP_HAS_MAC |
45 | depends on PMC_MSP | 45 | depends on PMC_MSP |
46 | 46 | ||
47 | config MSP_HAS_MAC | 47 | config MSP_HAS_MAC |
48 | boolean | 48 | bool |
49 | depends on PMC_MSP | 49 | depends on PMC_MSP |
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c index 8f1b86d4da84..cdf187600010 100644 --- a/arch/mips/sgi-ip22/ip22-gio.c +++ b/arch/mips/sgi-ip22/ip22-gio.c | |||
@@ -152,28 +152,6 @@ static int gio_device_remove(struct device *dev) | |||
152 | return 0; | 152 | return 0; |
153 | } | 153 | } |
154 | 154 | ||
155 | static int gio_device_suspend(struct device *dev, pm_message_t state) | ||
156 | { | ||
157 | struct gio_device *gio_dev = to_gio_device(dev); | ||
158 | struct gio_driver *drv = to_gio_driver(dev->driver); | ||
159 | int error = 0; | ||
160 | |||
161 | if (dev->driver && drv->suspend) | ||
162 | error = drv->suspend(gio_dev, state); | ||
163 | return error; | ||
164 | } | ||
165 | |||
166 | static int gio_device_resume(struct device *dev) | ||
167 | { | ||
168 | struct gio_device *gio_dev = to_gio_device(dev); | ||
169 | struct gio_driver *drv = to_gio_driver(dev->driver); | ||
170 | int error = 0; | ||
171 | |||
172 | if (dev->driver && drv->resume) | ||
173 | error = drv->resume(gio_dev); | ||
174 | return error; | ||
175 | } | ||
176 | |||
177 | static void gio_device_shutdown(struct device *dev) | 155 | static void gio_device_shutdown(struct device *dev) |
178 | { | 156 | { |
179 | struct gio_device *gio_dev = to_gio_device(dev); | 157 | struct gio_device *gio_dev = to_gio_device(dev); |
@@ -400,8 +378,6 @@ static struct bus_type gio_bus_type = { | |||
400 | .match = gio_bus_match, | 378 | .match = gio_bus_match, |
401 | .probe = gio_device_probe, | 379 | .probe = gio_device_probe, |
402 | .remove = gio_device_remove, | 380 | .remove = gio_device_remove, |
403 | .suspend = gio_device_suspend, | ||
404 | .resume = gio_device_resume, | ||
405 | .shutdown = gio_device_shutdown, | 381 | .shutdown = gio_device_shutdown, |
406 | .uevent = gio_device_uevent, | 382 | .uevent = gio_device_uevent, |
407 | }; | 383 | }; |
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c index ac37e54b3d5e..e44a15d4f573 100644 --- a/arch/mips/sgi-ip27/ip27-reset.c +++ b/arch/mips/sgi-ip27/ip27-reset.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle | 8 | * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle |
9 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 9 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
10 | */ | 10 | */ |
11 | #include <linux/compiler.h> | ||
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
12 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
13 | #include <linux/timer.h> | 14 | #include <linux/timer.h> |
@@ -25,9 +26,9 @@ | |||
25 | #include <asm/sn/gda.h> | 26 | #include <asm/sn/gda.h> |
26 | #include <asm/sn/sn0/hub.h> | 27 | #include <asm/sn/sn0/hub.h> |
27 | 28 | ||
28 | void machine_restart(char *command) __attribute__((noreturn)); | 29 | void machine_restart(char *command) __noreturn; |
29 | void machine_halt(void) __attribute__((noreturn)); | 30 | void machine_halt(void) __noreturn; |
30 | void machine_power_off(void) __attribute__((noreturn)); | 31 | void machine_power_off(void) __noreturn; |
31 | 32 | ||
32 | #define noreturn while(1); /* Silence gcc. */ | 33 | #define noreturn while(1); /* Silence gcc. */ |
33 | 34 | ||
diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 1f823da4c77b..44b3470a0bbb 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> | 8 | * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/compiler.h> | ||
11 | #include <linux/init.h> | 12 | #include <linux/init.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
@@ -35,9 +36,9 @@ | |||
35 | static struct timer_list power_timer, blink_timer, debounce_timer; | 36 | static struct timer_list power_timer, blink_timer, debounce_timer; |
36 | static int has_panicked, shuting_down; | 37 | static int has_panicked, shuting_down; |
37 | 38 | ||
38 | static void ip32_machine_restart(char *command) __attribute__((noreturn)); | 39 | static void ip32_machine_restart(char *command) __noreturn; |
39 | static void ip32_machine_halt(void) __attribute__((noreturn)); | 40 | static void ip32_machine_halt(void) __noreturn; |
40 | static void ip32_machine_power_off(void) __attribute__((noreturn)); | 41 | static void ip32_machine_power_off(void) __noreturn; |
41 | 42 | ||
42 | static void ip32_machine_restart(char *cmd) | 43 | static void ip32_machine_restart(char *cmd) |
43 | { | 44 | { |