aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-04-14 10:44:42 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-14 10:44:42 -0400
commit740c699a8d316c8bf8593f19e2ca47795e690622 (patch)
treea78886955770a477945c5d84e06b2e7678733b54 /arch/mips
parente69af4657e7764d03ad555f0b583d9c4217bcefa (diff)
parentc9eaa447e77efe77b7fa4c953bd62de8297fd6c5 (diff)
Merge tag 'v3.15-rc1' into perf/urgent
Pick up the latest fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig146
-rw-r--r--arch/mips/Kconfig.debug10
-rw-r--r--arch/mips/Makefile5
-rw-r--r--arch/mips/alchemy/Kconfig24
-rw-r--r--arch/mips/alchemy/Platform16
-rw-r--r--arch/mips/alchemy/common/setup.c10
-rw-r--r--arch/mips/alchemy/common/sleeper.S6
-rw-r--r--arch/mips/alchemy/devboards/Makefile4
-rw-r--r--arch/mips/alchemy/devboards/db1000.c47
-rw-r--r--arch/mips/alchemy/devboards/db1200.c78
-rw-r--r--arch/mips/alchemy/devboards/db1300.c40
-rw-r--r--arch/mips/alchemy/devboards/db1550.c10
-rw-r--r--arch/mips/alchemy/devboards/db1xxx.c (renamed from arch/mips/alchemy/devboards/db1235.c)41
-rw-r--r--arch/mips/ar7/time.c1
-rw-r--r--arch/mips/ath79/Kconfig8
-rw-r--r--arch/mips/bcm47xx/Makefile2
-rw-r--r--arch/mips/bcm47xx/bcm47xx_private.h3
-rw-r--r--arch/mips/bcm47xx/board.c25
-rw-r--r--arch/mips/bcm47xx/buttons.c31
-rw-r--r--arch/mips/bcm47xx/leds.c49
-rw-r--r--arch/mips/bcm47xx/setup.c3
-rw-r--r--arch/mips/bcm47xx/workarounds.c31
-rw-r--r--arch/mips/bcm63xx/cpu.c3
-rw-r--r--arch/mips/configs/db1000_defconfig359
-rw-r--r--arch/mips/configs/db1235_defconfig434
-rw-r--r--arch/mips/configs/db1xxx_defconfig245
-rw-r--r--arch/mips/configs/loongson3_defconfig362
-rw-r--r--arch/mips/configs/malta_defconfig9
-rw-r--r--arch/mips/configs/malta_kvm_defconfig10
-rw-r--r--arch/mips/configs/malta_kvm_guest_defconfig7
-rw-r--r--arch/mips/configs/maltaaprp_defconfig3
-rw-r--r--arch/mips/configs/maltasmtc_defconfig4
-rw-r--r--arch/mips/configs/maltasmvp_defconfig5
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig200
-rw-r--r--arch/mips/configs/maltaup_defconfig3
-rw-r--r--arch/mips/include/asm/asm-eva.h135
-rw-r--r--arch/mips/include/asm/asm.h13
-rw-r--r--arch/mips/include/asm/asmmacro-32.h128
-rw-r--r--arch/mips/include/asm/asmmacro.h330
-rw-r--r--arch/mips/include/asm/atomic.h40
-rw-r--r--arch/mips/include/asm/bitops.h28
-rw-r--r--arch/mips/include/asm/bootinfo.h26
-rw-r--r--arch/mips/include/asm/checksum.h44
-rw-r--r--arch/mips/include/asm/cmpxchg.h20
-rw-r--r--arch/mips/include/asm/cpu-features.h10
-rw-r--r--arch/mips/include/asm/cpu-info.h28
-rw-r--r--arch/mips/include/asm/cpu-type.h6
-rw-r--r--arch/mips/include/asm/cpu.h15
-rw-r--r--arch/mips/include/asm/dma-mapping.h5
-rw-r--r--arch/mips/include/asm/fpu.h2
-rw-r--r--arch/mips/include/asm/futex.h25
-rw-r--r--arch/mips/include/asm/fw/fw.h2
-rw-r--r--arch/mips/include/asm/gcmpregs.h125
-rw-r--r--arch/mips/include/asm/gic.h3
-rw-r--r--arch/mips/include/asm/io.h8
-rw-r--r--arch/mips/include/asm/kvm_host.h417
-rw-r--r--arch/mips/include/asm/local.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h12
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h8
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1200.h91
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1300.h40
-rw-r--r--arch/mips/include/asm/mach-loongson/boot_param.h163
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h22
-rw-r--r--arch/mips/include/asm/mach-loongson/irq.h44
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h28
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h6
-rw-r--r--arch/mips/include/asm/mach-loongson/pci.h5
-rw-r--r--arch/mips/include/asm/mach-loongson/spaces.h9
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h115
-rw-r--r--arch/mips/include/asm/mach-malta/spaces.h46
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h12
-rw-r--r--arch/mips/include/asm/mips-boards/malta.h5
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h5
-rw-r--r--arch/mips/include/asm/mips-cm.h322
-rw-r--r--arch/mips/include/asm/mips-cpc.h150
-rw-r--r--arch/mips/include/asm/mips_mt.h5
-rw-r--r--arch/mips/include/asm/mipsmtregs.h11
-rw-r--r--arch/mips/include/asm/mipsregs.h22
-rw-r--r--arch/mips/include/asm/module.h2
-rw-r--r--arch/mips/include/asm/msa.h203
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pgtable-bits.h9
-rw-r--r--arch/mips/include/asm/processor.h45
-rw-r--r--arch/mips/include/asm/ptrace.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h175
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/smp-cps.h33
-rw-r--r--arch/mips/include/asm/smp-ops.h17
-rw-r--r--arch/mips/include/asm/smp.h1
-rw-r--r--arch/mips/include/asm/stackframe.h2
-rw-r--r--arch/mips/include/asm/switch_to.h22
-rw-r--r--arch/mips/include/asm/syscall.h33
-rw-r--r--arch/mips/include/asm/thread_info.h7
-rw-r--r--arch/mips/include/asm/uaccess.h559
-rw-r--r--arch/mips/include/uapi/asm/inst.h25
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h8
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c82
-rw-r--r--arch/mips/kernel/bmips_vec.S2
-rw-r--r--arch/mips/kernel/cps-vec.S191
-rw-r--r--arch/mips/kernel/cpu-probe.c74
-rw-r--r--arch/mips/kernel/ftrace.c9
-rw-r--r--arch/mips/kernel/genex.S8
-rw-r--r--arch/mips/kernel/head.S2
-rw-r--r--arch/mips/kernel/idle.c9
-rw-r--r--arch/mips/kernel/irq-gic.c1
-rw-r--r--arch/mips/kernel/kgdb.c18
-rw-r--r--arch/mips/kernel/mips-cm.c121
-rw-r--r--arch/mips/kernel/mips-cpc.c52
-rw-r--r--arch/mips/kernel/mips_ksyms.c24
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c80
-rw-r--r--arch/mips/kernel/proc.c25
-rw-r--r--arch/mips/kernel/process.c23
-rw-r--r--arch/mips/kernel/ptrace.c161
-rw-r--r--arch/mips/kernel/ptrace32.c67
-rw-r--r--arch/mips/kernel/r4k_fpu.S215
-rw-r--r--arch/mips/kernel/r4k_switch.S60
-rw-r--r--arch/mips/kernel/scall32-o32.S24
-rw-r--r--arch/mips/kernel/scall64-64.S5
-rw-r--r--arch/mips/kernel/scall64-n32.S5
-rw-r--r--arch/mips/kernel/scall64-o32.S17
-rw-r--r--arch/mips/kernel/signal.c170
-rw-r--r--arch/mips/kernel/signal32.c137
-rw-r--r--arch/mips/kernel/smp-cmp.c55
-rw-r--r--arch/mips/kernel/smp-cps.c335
-rw-r--r--arch/mips/kernel/smp-gic.c53
-rw-r--r--arch/mips/kernel/smp-mt.c45
-rw-r--r--arch/mips/kernel/smtc-proc.c23
-rw-r--r--arch/mips/kernel/spram.c5
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/kernel/traps.c144
-rw-r--r--arch/mips/kernel/unaligned.c135
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c40
-rw-r--r--arch/mips/lasat/picvue_proc.c2
-rw-r--r--arch/mips/lib/csum_partial.S282
-rw-r--r--arch/mips/lib/memcpy.S416
-rw-r--r--arch/mips/lib/memset.S146
-rw-r--r--arch/mips/lib/strlen_user.S36
-rw-r--r--arch/mips/lib/strncpy_user.S40
-rw-r--r--arch/mips/lib/strnlen_user.S36
-rw-r--r--arch/mips/loongson/Kconfig47
-rw-r--r--arch/mips/loongson/Makefile6
-rw-r--r--arch/mips/loongson/Platform1
-rw-r--r--arch/mips/loongson/common/Makefile5
-rw-r--r--arch/mips/loongson/common/dma-swiotlb.c136
-rw-r--r--arch/mips/loongson/common/env.c67
-rw-r--r--arch/mips/loongson/common/init.c11
-rw-r--r--arch/mips/loongson/common/machtype.c4
-rw-r--r--arch/mips/loongson/common/mem.c42
-rw-r--r--arch/mips/loongson/common/pci.c6
-rw-r--r--arch/mips/loongson/common/reset.c21
-rw-r--r--arch/mips/loongson/common/serial.c26
-rw-r--r--arch/mips/loongson/common/setup.c8
-rw-r--r--arch/mips/loongson/common/uart_base.c9
-rw-r--r--arch/mips/loongson/lemote-2f/clock.c20
-rw-r--r--arch/mips/loongson/loongson-3/Makefile6
-rw-r--r--arch/mips/loongson/loongson-3/irq.c126
-rw-r--r--arch/mips/loongson/loongson-3/smp.c443
-rw-r--r--arch/mips/loongson/loongson-3/smp.h29
-rw-r--r--arch/mips/math-emu/cp1emu.c58
-rw-r--r--arch/mips/math-emu/kernel_linkage.c76
-rw-r--r--arch/mips/mm/c-r4k.c147
-rw-r--r--arch/mips/mm/cache.c4
-rw-r--r--arch/mips/mm/init.c12
-rw-r--r--arch/mips/mm/sc-mips.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c5
-rw-r--r--arch/mips/mm/tlbex.c6
-rw-r--r--arch/mips/mti-malta/malta-init.c30
-rw-r--r--arch/mips/mti-malta/malta-int.c89
-rw-r--r--arch/mips/mti-malta/malta-memory.c58
-rw-r--r--arch/mips/mti-malta/malta-setup.c30
-rw-r--r--arch/mips/mti-sead3/sead3-mtd.c3
-rw-r--r--arch/mips/oprofile/common.c3
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c9
-rw-r--r--arch/mips/pci/Makefile1
-rw-r--r--arch/mips/pci/fixup-loongson3.c66
-rw-r--r--arch/mips/pci/fixup-malta.c13
-rw-r--r--arch/mips/pci/ops-loongson3.c101
-rw-r--r--arch/mips/pci/pci-alchemy.c5
-rw-r--r--arch/mips/pci/pci-malta.c22
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c2
-rw-r--r--arch/mips/power/hibernate.S1
-rw-r--r--arch/mips/ralink/Kconfig6
183 files changed, 8279 insertions, 2787 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 95fa1f1d5c8b..5cd695f905a1 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -10,6 +10,7 @@ config MIPS
10 select HAVE_PERF_EVENTS 10 select HAVE_PERF_EVENTS
11 select PERF_USE_VMALLOC 11 select PERF_USE_VMALLOC
12 select HAVE_ARCH_KGDB 12 select HAVE_ARCH_KGDB
13 select HAVE_ARCH_SECCOMP_FILTER
13 select HAVE_ARCH_TRACEHOOK 14 select HAVE_ARCH_TRACEHOOK
14 select ARCH_HAVE_CUSTOM_GPIO_H 15 select ARCH_HAVE_CUSTOM_GPIO_H
15 select HAVE_FUNCTION_TRACER 16 select HAVE_FUNCTION_TRACER
@@ -62,13 +63,12 @@ config MIPS_ALCHEMY
62 select CEVT_R4K 63 select CEVT_R4K
63 select CSRC_R4K 64 select CSRC_R4K
64 select IRQ_CPU 65 select IRQ_CPU
66 select DMA_MAYBE_COHERENT # Au1000,1500,1100 aren't, rest is
65 select SYS_HAS_CPU_MIPS32_R1 67 select SYS_HAS_CPU_MIPS32_R1
66 select SYS_SUPPORTS_32BIT_KERNEL 68 select SYS_SUPPORTS_32BIT_KERNEL
67 select SYS_SUPPORTS_APM_EMULATION 69 select SYS_SUPPORTS_APM_EMULATION
68 select ARCH_REQUIRE_GPIOLIB 70 select ARCH_REQUIRE_GPIOLIB
69 select SYS_SUPPORTS_ZBOOT 71 select SYS_SUPPORTS_ZBOOT
70 select USB_ARCH_HAS_OHCI
71 select USB_ARCH_HAS_EHCI
72 72
73config AR7 73config AR7
74 bool "Texas Instruments AR7" 74 bool "Texas Instruments AR7"
@@ -123,7 +123,7 @@ config BCM47XX
123 select SYS_SUPPORTS_32BIT_KERNEL 123 select SYS_SUPPORTS_32BIT_KERNEL
124 select SYS_SUPPORTS_LITTLE_ENDIAN 124 select SYS_SUPPORTS_LITTLE_ENDIAN
125 select SYS_HAS_EARLY_PRINTK 125 select SYS_HAS_EARLY_PRINTK
126 select EARLY_PRINTK_8250 if EARLY_PRINTK 126 select USE_GENERIC_EARLY_PRINTK_8250
127 help 127 help
128 Support for BCM47XX based boards 128 Support for BCM47XX based boards
129 129
@@ -150,7 +150,6 @@ config MIPS_COBALT
150 select CSRC_R4K 150 select CSRC_R4K
151 select CEVT_GT641XX 151 select CEVT_GT641XX
152 select DMA_NONCOHERENT 152 select DMA_NONCOHERENT
153 select EARLY_PRINTK_8250 if EARLY_PRINTK
154 select HW_HAS_PCI 153 select HW_HAS_PCI
155 select I8253 154 select I8253
156 select I8259 155 select I8259
@@ -163,6 +162,7 @@ config MIPS_COBALT
163 select SYS_SUPPORTS_32BIT_KERNEL 162 select SYS_SUPPORTS_32BIT_KERNEL
164 select SYS_SUPPORTS_64BIT_KERNEL 163 select SYS_SUPPORTS_64BIT_KERNEL
165 select SYS_SUPPORTS_LITTLE_ENDIAN 164 select SYS_SUPPORTS_LITTLE_ENDIAN
165 select USE_GENERIC_EARLY_PRINTK_8250
166 166
167config MACH_DECSTATION 167config MACH_DECSTATION
168 bool "DECstations" 168 bool "DECstations"
@@ -175,7 +175,7 @@ config MACH_DECSTATION
175 select CPU_R4000_WORKAROUNDS if 64BIT 175 select CPU_R4000_WORKAROUNDS if 64BIT
176 select CPU_R4400_WORKAROUNDS if 64BIT 176 select CPU_R4400_WORKAROUNDS if 64BIT
177 select DMA_NONCOHERENT 177 select DMA_NONCOHERENT
178 select NO_IOPORT 178 select NO_IOPORT_MAP
179 select IRQ_CPU 179 select IRQ_CPU
180 select SYS_HAS_CPU_R3000 180 select SYS_HAS_CPU_R3000
181 select SYS_HAS_CPU_R4X00 181 select SYS_HAS_CPU_R4X00
@@ -235,7 +235,6 @@ config MACH_JZ4740
235 select IRQ_CPU 235 select IRQ_CPU
236 select ARCH_REQUIRE_GPIOLIB 236 select ARCH_REQUIRE_GPIOLIB
237 select SYS_HAS_EARLY_PRINTK 237 select SYS_HAS_EARLY_PRINTK
238 select HAVE_PWM
239 select HAVE_CLK 238 select HAVE_CLK
240 select GENERIC_IRQ_CHIP 239 select GENERIC_IRQ_CHIP
241 240
@@ -320,6 +319,7 @@ config MIPS_MALTA
320 select SWAP_IO_SPACE 319 select SWAP_IO_SPACE
321 select SYS_HAS_CPU_MIPS32_R1 320 select SYS_HAS_CPU_MIPS32_R1
322 select SYS_HAS_CPU_MIPS32_R2 321 select SYS_HAS_CPU_MIPS32_R2
322 select SYS_HAS_CPU_MIPS32_R3_5
323 select SYS_HAS_CPU_MIPS64_R1 323 select SYS_HAS_CPU_MIPS64_R1
324 select SYS_HAS_CPU_MIPS64_R2 324 select SYS_HAS_CPU_MIPS64_R2
325 select SYS_HAS_CPU_NEVADA 325 select SYS_HAS_CPU_NEVADA
@@ -329,6 +329,7 @@ config MIPS_MALTA
329 select SYS_SUPPORTS_BIG_ENDIAN 329 select SYS_SUPPORTS_BIG_ENDIAN
330 select SYS_SUPPORTS_LITTLE_ENDIAN 330 select SYS_SUPPORTS_LITTLE_ENDIAN
331 select SYS_SUPPORTS_MIPS_CMP 331 select SYS_SUPPORTS_MIPS_CMP
332 select SYS_SUPPORTS_MIPS_CPS
332 select SYS_SUPPORTS_MULTITHREADING 333 select SYS_SUPPORTS_MULTITHREADING
333 select SYS_SUPPORTS_SMARTMIPS 334 select SYS_SUPPORTS_SMARTMIPS
334 select SYS_SUPPORTS_ZBOOT 335 select SYS_SUPPORTS_ZBOOT
@@ -360,7 +361,6 @@ config MIPS_SEAD3
360 select SYS_SUPPORTS_LITTLE_ENDIAN 361 select SYS_SUPPORTS_LITTLE_ENDIAN
361 select SYS_SUPPORTS_SMARTMIPS 362 select SYS_SUPPORTS_SMARTMIPS
362 select SYS_SUPPORTS_MICROMIPS 363 select SYS_SUPPORTS_MICROMIPS
363 select USB_ARCH_HAS_EHCI
364 select USB_EHCI_BIG_ENDIAN_DESC 364 select USB_EHCI_BIG_ENDIAN_DESC
365 select USB_EHCI_BIG_ENDIAN_MMIO 365 select USB_EHCI_BIG_ENDIAN_MMIO
366 select USE_OF 366 select USE_OF
@@ -674,6 +674,7 @@ config SNI_RM
674 select SYS_SUPPORTS_BIG_ENDIAN 674 select SYS_SUPPORTS_BIG_ENDIAN
675 select SYS_SUPPORTS_HIGHMEM 675 select SYS_SUPPORTS_HIGHMEM
676 select SYS_SUPPORTS_LITTLE_ENDIAN 676 select SYS_SUPPORTS_LITTLE_ENDIAN
677 select USE_GENERIC_EARLY_PRINTK_8250
677 help 678 help
678 The SNI RM200/300/400 are MIPS-based machines manufactured by 679 The SNI RM200/300/400 are MIPS-based machines manufactured by
679 Siemens Nixdorf Informationssysteme (SNI), parent company of Pyramid 680 Siemens Nixdorf Informationssysteme (SNI), parent company of Pyramid
@@ -718,8 +719,6 @@ config CAVIUM_OCTEON_SOC
718 select SWAP_IO_SPACE 719 select SWAP_IO_SPACE
719 select HW_HAS_PCI 720 select HW_HAS_PCI
720 select ZONE_DMA32 721 select ZONE_DMA32
721 select USB_ARCH_HAS_OHCI
722 select USB_ARCH_HAS_EHCI
723 select HOLES_IN_ZONE 722 select HOLES_IN_ZONE
724 select ARCH_REQUIRE_GPIOLIB 723 select ARCH_REQUIRE_GPIOLIB
725 help 724 help
@@ -756,8 +755,6 @@ config NLM_XLR_BOARD
756 select ZONE_DMA32 if 64BIT 755 select ZONE_DMA32 if 64BIT
757 select SYNC_R4K 756 select SYNC_R4K
758 select SYS_HAS_EARLY_PRINTK 757 select SYS_HAS_EARLY_PRINTK
759 select USB_ARCH_HAS_OHCI if USB_SUPPORT
760 select USB_ARCH_HAS_EHCI if USB_SUPPORT
761 select SYS_SUPPORTS_ZBOOT 758 select SYS_SUPPORTS_ZBOOT
762 select SYS_SUPPORTS_ZBOOT_UART16550 759 select SYS_SUPPORTS_ZBOOT_UART16550
763 help 760 help
@@ -782,7 +779,6 @@ config NLM_XLP_BOARD
782 select CEVT_R4K 779 select CEVT_R4K
783 select CSRC_R4K 780 select CSRC_R4K
784 select IRQ_CPU 781 select IRQ_CPU
785 select ARCH_SUPPORTS_MSI
786 select ZONE_DMA32 if 64BIT 782 select ZONE_DMA32 if 64BIT
787 select SYNC_R4K 783 select SYNC_R4K
788 select SYS_HAS_EARLY_PRINTK 784 select SYS_HAS_EARLY_PRINTK
@@ -868,6 +864,7 @@ config CEVT_R4K
868 bool 864 bool
869 865
870config CEVT_GIC 866config CEVT_GIC
867 select MIPS_CM
871 bool 868 bool
872 869
873config CEVT_SB1250 870config CEVT_SB1250
@@ -886,6 +883,7 @@ config CSRC_R4K
886 bool 883 bool
887 884
888config CSRC_GIC 885config CSRC_GIC
886 select MIPS_CM
889 bool 887 bool
890 888
891config CSRC_SB1250 889config CSRC_SB1250
@@ -949,7 +947,7 @@ config SYNC_R4K
949config MIPS_MACHINE 947config MIPS_MACHINE
950 def_bool n 948 def_bool n
951 949
952config NO_IOPORT 950config NO_IOPORT_MAP
953 def_bool n 951 def_bool n
954 952
955config GENERIC_ISA_DMA 953config GENERIC_ISA_DMA
@@ -1030,6 +1028,7 @@ config IRQ_GT641XX
1030 bool 1028 bool
1031 1029
1032config IRQ_GIC 1030config IRQ_GIC
1031 select MIPS_CM
1033 bool 1032 bool
1034 1033
1035config PCI_GT64XXX_PCI0 1034config PCI_GT64XXX_PCI0
@@ -1148,6 +1147,18 @@ choice
1148 prompt "CPU type" 1147 prompt "CPU type"
1149 default CPU_R4X00 1148 default CPU_R4X00
1150 1149
1150config CPU_LOONGSON3
1151 bool "Loongson 3 CPU"
1152 depends on SYS_HAS_CPU_LOONGSON3
1153 select CPU_SUPPORTS_64BIT_KERNEL
1154 select CPU_SUPPORTS_HIGHMEM
1155 select CPU_SUPPORTS_HUGEPAGES
1156 select WEAK_ORDERING
1157 select WEAK_REORDERING_BEYOND_LLSC
1158 help
1159 The Loongson 3 processor implements the MIPS64R2 instruction
1160 set with many extensions.
1161
1151config CPU_LOONGSON2E 1162config CPU_LOONGSON2E
1152 bool "Loongson 2E" 1163 bool "Loongson 2E"
1153 depends on SYS_HAS_CPU_LOONGSON2E 1164 depends on SYS_HAS_CPU_LOONGSON2E
@@ -1203,6 +1214,7 @@ config CPU_MIPS32_R2
1203 select CPU_HAS_PREFETCH 1214 select CPU_HAS_PREFETCH
1204 select CPU_SUPPORTS_32BIT_KERNEL 1215 select CPU_SUPPORTS_32BIT_KERNEL
1205 select CPU_SUPPORTS_HIGHMEM 1216 select CPU_SUPPORTS_HIGHMEM
1217 select CPU_SUPPORTS_MSA
1206 select HAVE_KVM 1218 select HAVE_KVM
1207 help 1219 help
1208 Choose this option to build a kernel for release 2 or later of the 1220 Choose this option to build a kernel for release 2 or later of the
@@ -1238,6 +1250,7 @@ config CPU_MIPS64_R2
1238 select CPU_SUPPORTS_64BIT_KERNEL 1250 select CPU_SUPPORTS_64BIT_KERNEL
1239 select CPU_SUPPORTS_HIGHMEM 1251 select CPU_SUPPORTS_HIGHMEM
1240 select CPU_SUPPORTS_HUGEPAGES 1252 select CPU_SUPPORTS_HUGEPAGES
1253 select CPU_SUPPORTS_MSA
1241 help 1254 help
1242 Choose this option to build a kernel for release 2 or later of the 1255 Choose this option to build a kernel for release 2 or later of the
1243 MIPS64 architecture. Many modern embedded systems with a 64-bit 1256 MIPS64 architecture. Many modern embedded systems with a 64-bit
@@ -1396,7 +1409,6 @@ config CPU_CAVIUM_OCTEON
1396 select LIBFDT 1409 select LIBFDT
1397 select USE_OF 1410 select USE_OF
1398 select USB_EHCI_BIG_ENDIAN_MMIO 1411 select USB_EHCI_BIG_ENDIAN_MMIO
1399 select SYS_HAS_DMA_OPS
1400 select MIPS_L1_CACHE_SHIFT_7 1412 select MIPS_L1_CACHE_SHIFT_7
1401 help 1413 help
1402 The Cavium Octeon processor is a highly integrated chip containing 1414 The Cavium Octeon processor is a highly integrated chip containing
@@ -1448,6 +1460,26 @@ config CPU_XLP
1448 Netlogic Microsystems XLP processors. 1460 Netlogic Microsystems XLP processors.
1449endchoice 1461endchoice
1450 1462
1463config CPU_MIPS32_3_5_FEATURES
1464 bool "MIPS32 Release 3.5 Features"
1465 depends on SYS_HAS_CPU_MIPS32_R3_5
1466 depends on CPU_MIPS32_R2
1467 help
1468 Choose this option to build a kernel for release 2 or later of the
1469 MIPS32 architecture including features from the 3.5 release such as
1470 support for Enhanced Virtual Addressing (EVA).
1471
1472config CPU_MIPS32_3_5_EVA
1473 bool "Enhanced Virtual Addressing (EVA)"
1474 depends on CPU_MIPS32_3_5_FEATURES
1475 select EVA
1476 default y
1477 help
1478 Choose this option if you want to enable the Enhanced Virtual
1479 Addressing (EVA) on your MIPS32 core (such as proAptiv).
1480 One of its primary benefits is an increase in the maximum size
1481 of lowmem (up to 3GB). If unsure, say 'N' here.
1482
1451if CPU_LOONGSON2F 1483if CPU_LOONGSON2F
1452config CPU_NOP_WORKAROUNDS 1484config CPU_NOP_WORKAROUNDS
1453 bool 1485 bool
@@ -1523,6 +1555,10 @@ config CPU_BMIPS5000
1523 select SYS_SUPPORTS_SMP 1555 select SYS_SUPPORTS_SMP
1524 select SYS_SUPPORTS_HOTPLUG_CPU 1556 select SYS_SUPPORTS_HOTPLUG_CPU
1525 1557
1558config SYS_HAS_CPU_LOONGSON3
1559 bool
1560 select CPU_SUPPORTS_CPUFREQ
1561
1526config SYS_HAS_CPU_LOONGSON2E 1562config SYS_HAS_CPU_LOONGSON2E
1527 bool 1563 bool
1528 1564
@@ -1541,6 +1577,9 @@ config SYS_HAS_CPU_MIPS32_R1
1541config SYS_HAS_CPU_MIPS32_R2 1577config SYS_HAS_CPU_MIPS32_R2
1542 bool 1578 bool
1543 1579
1580config SYS_HAS_CPU_MIPS32_R3_5
1581 bool
1582
1544config SYS_HAS_CPU_MIPS64_R1 1583config SYS_HAS_CPU_MIPS64_R1
1545 bool 1584 bool
1546 1585
@@ -1657,6 +1696,9 @@ config CPU_MIPSR2
1657 bool 1696 bool
1658 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON 1697 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
1659 1698
1699config EVA
1700 bool
1701
1660config SYS_SUPPORTS_32BIT_KERNEL 1702config SYS_SUPPORTS_32BIT_KERNEL
1661 bool 1703 bool
1662config SYS_SUPPORTS_64BIT_KERNEL 1704config SYS_SUPPORTS_64BIT_KERNEL
@@ -1729,7 +1771,7 @@ choice
1729 1771
1730config PAGE_SIZE_4KB 1772config PAGE_SIZE_4KB
1731 bool "4kB" 1773 bool "4kB"
1732 depends on !CPU_LOONGSON2 1774 depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
1733 help 1775 help
1734 This option select the standard 4kB Linux page size. On some 1776 This option select the standard 4kB Linux page size. On some
1735 R3000-family processors this is the only available page size. Using 1777 R3000-family processors this is the only available page size. Using
@@ -1870,6 +1912,7 @@ config MIPS_MT_SMP
1870 select CPU_MIPSR2_IRQ_VI 1912 select CPU_MIPSR2_IRQ_VI
1871 select CPU_MIPSR2_IRQ_EI 1913 select CPU_MIPSR2_IRQ_EI
1872 select SYNC_R4K 1914 select SYNC_R4K
1915 select MIPS_GIC_IPI
1873 select MIPS_MT 1916 select MIPS_MT
1874 select SMP 1917 select SMP
1875 select SMP_UP 1918 select SMP_UP
@@ -1887,6 +1930,7 @@ config MIPS_MT_SMTC
1887 bool "Use all TCs on all VPEs for SMP (DEPRECATED)" 1930 bool "Use all TCs on all VPEs for SMP (DEPRECATED)"
1888 depends on CPU_MIPS32_R2 1931 depends on CPU_MIPS32_R2
1889 depends on SYS_SUPPORTS_MULTITHREADING 1932 depends on SYS_SUPPORTS_MULTITHREADING
1933 depends on !MIPS_CPS
1890 select CPU_MIPSR2_IRQ_VI 1934 select CPU_MIPSR2_IRQ_VI
1891 select CPU_MIPSR2_IRQ_EI 1935 select CPU_MIPSR2_IRQ_EI
1892 select MIPS_MT 1936 select MIPS_MT
@@ -1994,13 +2038,45 @@ config MIPS_VPE_APSP_API_MT
1994 depends on MIPS_VPE_APSP_API && !MIPS_CMP 2038 depends on MIPS_VPE_APSP_API && !MIPS_CMP
1995 2039
1996config MIPS_CMP 2040config MIPS_CMP
1997 bool "MIPS CMP support" 2041 bool "MIPS CMP framework support (DEPRECATED)"
1998 depends on SYS_SUPPORTS_MIPS_CMP && MIPS_MT_SMP 2042 depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC
2043 select MIPS_GIC_IPI
1999 select SYNC_R4K 2044 select SYNC_R4K
2000 select WEAK_ORDERING 2045 select WEAK_ORDERING
2001 default n 2046 default n
2002 help 2047 help
2003 Enable Coherency Manager processor (CMP) support. 2048 Select this if you are using a bootloader which implements the "CMP
2049 framework" protocol (ie. YAMON) and want your kernel to make use of
2050 its ability to start secondary CPUs.
2051
2052 Unless you have a specific need, you should use CONFIG_MIPS_CPS
2053 instead of this.
2054
2055config MIPS_CPS
2056 bool "MIPS Coherent Processing System support"
2057 depends on SYS_SUPPORTS_MIPS_CPS
2058 select MIPS_CM
2059 select MIPS_CPC
2060 select MIPS_GIC_IPI
2061 select SMP
2062 select SYNC_R4K if (CEVT_R4K || CSRC_R4K)
2063 select SYS_SUPPORTS_SMP
2064 select WEAK_ORDERING
2065 help
2066 Select this if you wish to run an SMP kernel across multiple cores
2067 within a MIPS Coherent Processing System. When this option is
2068 enabled the kernel will probe for other cores and boot them with
2069 no external assistance. It is safe to enable this when hardware
2070 support is unavailable.
2071
2072config MIPS_GIC_IPI
2073 bool
2074
2075config MIPS_CM
2076 bool
2077
2078config MIPS_CPC
2079 bool
2004 2080
2005config SB1_PASS_1_WORKAROUNDS 2081config SB1_PASS_1_WORKAROUNDS
2006 bool 2082 bool
@@ -2043,6 +2119,21 @@ config CPU_MICROMIPS
2043 When this option is enabled the kernel will be built using the 2119 When this option is enabled the kernel will be built using the
2044 microMIPS ISA 2120 microMIPS ISA
2045 2121
2122config CPU_HAS_MSA
2123 bool "Support for the MIPS SIMD Architecture"
2124 depends on CPU_SUPPORTS_MSA
2125 default y
2126 help
2127 MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers
2128 and a set of SIMD instructions to operate on them. When this option
2129 is enabled the kernel will support allocating & switching MSA
2130 vector register contexts. If you know that your kernel will only be
2131 running on CPUs which do not support MSA or that your userland will
2132 not be making use of it then you may wish to say N here to reduce
2133 the size & complexity of your kernel.
2134
2135 If unsure, say Y.
2136
2046config CPU_HAS_WB 2137config CPU_HAS_WB
2047 bool 2138 bool
2048 2139
@@ -2094,7 +2185,7 @@ config CPU_R4400_WORKAROUNDS
2094# 2185#
2095config HIGHMEM 2186config HIGHMEM
2096 bool "High Memory Support" 2187 bool "High Memory Support"
2097 depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM 2188 depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
2098 2189
2099config CPU_SUPPORTS_HIGHMEM 2190config CPU_SUPPORTS_HIGHMEM
2100 bool 2191 bool
@@ -2108,6 +2199,9 @@ config SYS_SUPPORTS_SMARTMIPS
2108config SYS_SUPPORTS_MICROMIPS 2199config SYS_SUPPORTS_MICROMIPS
2109 bool 2200 bool
2110 2201
2202config CPU_SUPPORTS_MSA
2203 bool
2204
2111config ARCH_FLATMEM_ENABLE 2205config ARCH_FLATMEM_ENABLE
2112 def_bool y 2206 def_bool y
2113 depends on !NUMA && !CPU_LOONGSON2 2207 depends on !NUMA && !CPU_LOONGSON2
@@ -2181,6 +2275,9 @@ config SMP_UP
2181config SYS_SUPPORTS_MIPS_CMP 2275config SYS_SUPPORTS_MIPS_CMP
2182 bool 2276 bool
2183 2277
2278config SYS_SUPPORTS_MIPS_CPS
2279 bool
2280
2184config SYS_SUPPORTS_SMP 2281config SYS_SUPPORTS_SMP
2185 bool 2282 bool
2186 2283
@@ -2413,6 +2510,17 @@ config PCI
2413 your box. Other bus systems are ISA, EISA, or VESA. If you have PCI, 2510 your box. Other bus systems are ISA, EISA, or VESA. If you have PCI,
2414 say Y, otherwise N. 2511 say Y, otherwise N.
2415 2512
2513config HT_PCI
2514 bool "Support for HT-linked PCI"
2515 default y
2516 depends on CPU_LOONGSON3
2517 select PCI
2518 select PCI_DOMAINS
2519 help
2520 Loongson family machines use Hyper-Transport bus for inter-core
2521 connection and device connection. The PCI bus is a subordinate
2522 linked at HT. Choose Y for Loongson-3 based machines.
2523
2416config PCI_DOMAINS 2524config PCI_DOMAINS
2417 bool 2525 bool
2418 2526
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index b147e7038ff0..25de29211d76 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -21,13 +21,17 @@ config EARLY_PRINTK
21 unless you want to debug such a crash. 21 unless you want to debug such a crash.
22 22
23config EARLY_PRINTK_8250 23config EARLY_PRINTK_8250
24 bool "8250/16550 and compatible serial early printk driver" 24 bool
25 depends on EARLY_PRINTK 25 depends on EARLY_PRINTK && USE_GENERIC_EARLY_PRINTK_8250
26 default n 26 default y
27 help 27 help
28 "8250/16550 and compatible serial early printk driver"
28 If you say Y here, it will be possible to use a 8250/16550 serial 29 If you say Y here, it will be possible to use a 8250/16550 serial
29 port as the boot console. 30 port as the boot console.
30 31
32config USE_GENERIC_EARLY_PRINTK_8250
33 bool
34
31config CMDLINE_BOOL 35config CMDLINE_BOOL
32 bool "Built-in kernel command line" 36 bool "Built-in kernel command line"
33 default n 37 default n
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 9b8556de9993..1a5b4032cb66 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -119,6 +119,11 @@ cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
119cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ 119cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
120 -fno-omit-frame-pointer 120 -fno-omit-frame-pointer
121 121
122ifeq ($(CONFIG_CPU_HAS_MSA),y)
123toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -mmsa)
124cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
125endif
126
122# 127#
123# CPU-dependent compiler/assembler options for optimization. 128# CPU-dependent compiler/assembler options for optimization.
124# 129#
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index 7032ac7ecd1b..b9628983d620 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -16,36 +16,29 @@ config ALCHEMY_GPIO_INDIRECT
16choice 16choice
17 prompt "Machine type" 17 prompt "Machine type"
18 depends on MIPS_ALCHEMY 18 depends on MIPS_ALCHEMY
19 default MIPS_DB1000 19 default MIPS_DB1XXX
20 20
21config MIPS_MTX1 21config MIPS_MTX1
22 bool "4G Systems MTX-1 board" 22 bool "4G Systems MTX-1 board"
23 select DMA_NONCOHERENT
24 select HW_HAS_PCI 23 select HW_HAS_PCI
25 select ALCHEMY_GPIOINT_AU1000 24 select ALCHEMY_GPIOINT_AU1000
26 select SYS_SUPPORTS_LITTLE_ENDIAN 25 select SYS_SUPPORTS_LITTLE_ENDIAN
27 select SYS_HAS_EARLY_PRINTK 26 select SYS_HAS_EARLY_PRINTK
28 27
29config MIPS_DB1000 28config MIPS_DB1XXX
30 bool "Alchemy DB1000/DB1500/DB1100 PB1500/1100 boards" 29 bool "Alchemy DB1XXX / PB1XXX boards"
31 select ALCHEMY_GPIOINT_AU1000
32 select DMA_NONCOHERENT
33 select HW_HAS_PCI
34 select SYS_SUPPORTS_BIG_ENDIAN
35 select SYS_SUPPORTS_LITTLE_ENDIAN
36 select SYS_HAS_EARLY_PRINTK
37
38config MIPS_DB1235
39 bool "Alchemy DB1200/PB1200/DB1300/DB1550/PB1550 boards"
40 select ARCH_REQUIRE_GPIOLIB 30 select ARCH_REQUIRE_GPIOLIB
41 select HW_HAS_PCI 31 select HW_HAS_PCI
42 select DMA_COHERENT
43 select SYS_SUPPORTS_LITTLE_ENDIAN 32 select SYS_SUPPORTS_LITTLE_ENDIAN
44 select SYS_HAS_EARLY_PRINTK 33 select SYS_HAS_EARLY_PRINTK
34 help
35 Select this option if you have one of the following Alchemy
36 development boards: DB1000 DB1500 DB1100 DB1550 DB1200 DB1300
37 PB1500 PB1100 PB1550 PB1200
38 Board type is autodetected during boot.
45 39
46config MIPS_XXS1500 40config MIPS_XXS1500
47 bool "MyCable XXS1500 board" 41 bool "MyCable XXS1500 board"
48 select DMA_NONCOHERENT
49 select ALCHEMY_GPIOINT_AU1000 42 select ALCHEMY_GPIOINT_AU1000
50 select SYS_SUPPORTS_LITTLE_ENDIAN 43 select SYS_SUPPORTS_LITTLE_ENDIAN
51 select SYS_HAS_EARLY_PRINTK 44 select SYS_HAS_EARLY_PRINTK
@@ -54,7 +47,6 @@ config MIPS_GPR
54 bool "Trapeze ITS GPR board" 47 bool "Trapeze ITS GPR board"
55 select ALCHEMY_GPIOINT_AU1000 48 select ALCHEMY_GPIOINT_AU1000
56 select HW_HAS_PCI 49 select HW_HAS_PCI
57 select DMA_NONCOHERENT
58 select SYS_SUPPORTS_LITTLE_ENDIAN 50 select SYS_SUPPORTS_LITTLE_ENDIAN
59 select SYS_HAS_EARLY_PRINTK 51 select SYS_HAS_EARLY_PRINTK
60 52
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
index b3afcdd8d77a..33c9da3b077b 100644
--- a/arch/mips/alchemy/Platform
+++ b/arch/mips/alchemy/Platform
@@ -5,18 +5,12 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
5 5
6 6
7# 7#
8# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards 8# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100
9# Db1550/Pb1550/Db1200/Pb1200/Db1300
9# 10#
10platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/ 11platform-$(CONFIG_MIPS_DB1XXX) += alchemy/devboards/
11cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 12cflags-$(CONFIG_MIPS_DB1XXX) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
12load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000 13load-$(CONFIG_MIPS_DB1XXX) += 0xffffffff80100000
13
14#
15# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards
16#
17platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/
18cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
19load-$(CONFIG_MIPS_DB1235) += 0xffffffff80100000
20 14
21# 15#
22# 4G-Systems MTX-1 "MeshCube" wireless router 16# 4G-Systems MTX-1 "MeshCube" wireless router
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 62b4e7bbeab9..566a1743f685 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -30,6 +30,7 @@
30#include <linux/jiffies.h> 30#include <linux/jiffies.h>
31#include <linux/module.h> 31#include <linux/module.h>
32 32
33#include <asm/dma-coherence.h>
33#include <asm/mipsregs.h> 34#include <asm/mipsregs.h>
34#include <asm/time.h> 35#include <asm/time.h>
35 36
@@ -59,6 +60,15 @@ void __init plat_mem_setup(void)
59 /* Clear to obtain best system bus performance */ 60 /* Clear to obtain best system bus performance */
60 clear_c0_config(1 << 19); /* Clear Config[OD] */ 61 clear_c0_config(1 << 19); /* Clear Config[OD] */
61 62
63 hw_coherentio = 0;
64 coherentio = 1;
65 switch (alchemy_get_cputype()) {
66 case ALCHEMY_CPU_AU1000:
67 case ALCHEMY_CPU_AU1500:
68 case ALCHEMY_CPU_AU1100:
69 coherentio = 0;
70 }
71
62 board_setup(); /* board specific setup */ 72 board_setup(); /* board specific setup */
63 73
64 /* IO/MEM resources. */ 74 /* IO/MEM resources. */
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
index 706d933e0085..c73d81270b42 100644
--- a/arch/mips/alchemy/common/sleeper.S
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -95,7 +95,7 @@ LEAF(alchemy_sleep_au1000)
95 95
96 /* cache following instructions, as memory gets put to sleep */ 96 /* cache following instructions, as memory gets put to sleep */
97 la t0, 1f 97 la t0, 1f
98 .set mips3 98 .set arch=r4000
99 cache 0x14, 0(t0) 99 cache 0x14, 0(t0)
100 cache 0x14, 32(t0) 100 cache 0x14, 32(t0)
101 cache 0x14, 64(t0) 101 cache 0x14, 64(t0)
@@ -121,7 +121,7 @@ LEAF(alchemy_sleep_au1550)
121 121
122 /* cache following instructions, as memory gets put to sleep */ 122 /* cache following instructions, as memory gets put to sleep */
123 la t0, 1f 123 la t0, 1f
124 .set mips3 124 .set arch=r4000
125 cache 0x14, 0(t0) 125 cache 0x14, 0(t0)
126 cache 0x14, 32(t0) 126 cache 0x14, 32(t0)
127 cache 0x14, 64(t0) 127 cache 0x14, 64(t0)
@@ -163,7 +163,7 @@ LEAF(alchemy_sleep_au1300)
163 la t1, 4f 163 la t1, 4f
164 subu t2, t1, t0 164 subu t2, t1, t0
165 165
166 .set mips3 166 .set arch=r4000
167 167
1681: cache 0x14, 0(t0) 1681: cache 0x14, 0(t0)
169 subu t2, t2, 32 169 subu t2, t2, 32
diff --git a/arch/mips/alchemy/devboards/Makefile b/arch/mips/alchemy/devboards/Makefile
index 15bf7306648b..9da3659a9d1c 100644
--- a/arch/mips/alchemy/devboards/Makefile
+++ b/arch/mips/alchemy/devboards/Makefile
@@ -2,7 +2,5 @@
2# Alchemy Develboards 2# Alchemy Develboards
3# 3#
4 4
5obj-y += bcsr.o platform.o 5obj-y += bcsr.o platform.o db1000.o db1200.o db1300.o db1550.o db1xxx.o
6obj-$(CONFIG_PM) += pm.o 6obj-$(CONFIG_PM) += pm.o
7obj-$(CONFIG_MIPS_DB1000) += db1000.o
8obj-$(CONFIG_MIPS_DB1235) += db1235.o db1200.o db1300.o db1550.o
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 5483906e0f86..92dd929d4057 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -41,42 +41,27 @@
41 41
42#define F_SWAPPED (bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT) 42#define F_SWAPPED (bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT)
43 43
44struct pci_dev; 44const char *get_system_type(void);
45 45
46static const char *board_type_str(void) 46int __init db1000_board_setup(void)
47{ 47{
48 /* initialize board register space */
49 bcsr_init(DB1000_BCSR_PHYS_ADDR,
50 DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
51
48 switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { 52 switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
49 case BCSR_WHOAMI_DB1000: 53 case BCSR_WHOAMI_DB1000:
50 return "DB1000";
51 case BCSR_WHOAMI_DB1500: 54 case BCSR_WHOAMI_DB1500:
52 return "DB1500";
53 case BCSR_WHOAMI_DB1100: 55 case BCSR_WHOAMI_DB1100:
54 return "DB1100";
55 case BCSR_WHOAMI_PB1500: 56 case BCSR_WHOAMI_PB1500:
56 case BCSR_WHOAMI_PB1500R2: 57 case BCSR_WHOAMI_PB1500R2:
57 return "PB1500";
58 case BCSR_WHOAMI_PB1100: 58 case BCSR_WHOAMI_PB1100:
59 return "PB1100"; 59 pr_info("AMD Alchemy %s Board\n", get_system_type());
60 default: 60 return 0;
61 return "(unknown)";
62 } 61 }
62 return -ENODEV;
63} 63}
64 64
65const char *get_system_type(void)
66{
67 return board_type_str();
68}
69
70void __init board_setup(void)
71{
72 /* initialize board register space */
73 bcsr_init(DB1000_BCSR_PHYS_ADDR,
74 DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
75
76 printk(KERN_INFO "AMD Alchemy %s Board\n", board_type_str());
77}
78
79
80static int db1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 65static int db1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin)
81{ 66{
82 if ((slot < 12) || (slot > 13) || pin == 0) 67 if ((slot < 12) || (slot > 13) || pin == 0)
@@ -114,17 +99,10 @@ static struct platform_device db1500_pci_host_dev = {
114 .resource = alchemy_pci_host_res, 99 .resource = alchemy_pci_host_res,
115}; 100};
116 101
117static int __init db1500_pci_init(void) 102int __init db1500_pci_setup(void)
118{ 103{
119 int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); 104 return platform_device_register(&db1500_pci_host_dev);
120 if ((id == BCSR_WHOAMI_DB1500) || (id == BCSR_WHOAMI_PB1500) ||
121 (id == BCSR_WHOAMI_PB1500R2))
122 return platform_device_register(&db1500_pci_host_dev);
123 return 0;
124} 105}
125/* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */
126arch_initcall(db1500_pci_init);
127
128 106
129static struct resource au1100_lcd_resources[] = { 107static struct resource au1100_lcd_resources[] = {
130 [0] = { 108 [0] = {
@@ -513,7 +491,7 @@ static struct platform_device *db1100_devs[] = {
513 &db1000_irda_dev, 491 &db1000_irda_dev,
514}; 492};
515 493
516static int __init db1000_dev_init(void) 494int __init db1000_dev_setup(void)
517{ 495{
518 int board = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); 496 int board = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
519 int c0, c1, d0, d1, s0, s1, flashsize = 32, twosocks = 1; 497 int c0, c1, d0, d1, s0, s1, flashsize = 32, twosocks = 1;
@@ -623,4 +601,3 @@ static int __init db1000_dev_init(void)
623 db1x_register_norflash(flashsize << 20, 4 /* 32bit */, F_SWAPPED); 601 db1x_register_norflash(flashsize << 20, 4 /* 32bit */, F_SWAPPED);
624 return 0; 602 return 0;
625} 603}
626device_initcall(db1000_dev_init);
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index a84d98b8f96e..9e46667f2597 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -35,16 +35,63 @@
35#include <linux/spi/spi.h> 35#include <linux/spi/spi.h>
36#include <linux/spi/flash.h> 36#include <linux/spi/flash.h>
37#include <linux/smc91x.h> 37#include <linux/smc91x.h>
38#include <linux/ata_platform.h>
38#include <asm/mach-au1x00/au1000.h> 39#include <asm/mach-au1x00/au1000.h>
39#include <asm/mach-au1x00/au1100_mmc.h> 40#include <asm/mach-au1x00/au1100_mmc.h>
40#include <asm/mach-au1x00/au1xxx_dbdma.h> 41#include <asm/mach-au1x00/au1xxx_dbdma.h>
42#include <asm/mach-au1x00/au1xxx_psc.h>
41#include <asm/mach-au1x00/au1200fb.h> 43#include <asm/mach-au1x00/au1200fb.h>
42#include <asm/mach-au1x00/au1550_spi.h> 44#include <asm/mach-au1x00/au1550_spi.h>
43#include <asm/mach-db1x00/bcsr.h> 45#include <asm/mach-db1x00/bcsr.h>
44#include <asm/mach-db1x00/db1200.h>
45 46
46#include "platform.h" 47#include "platform.h"
47 48
49#define BCSR_INT_IDE 0x0001
50#define BCSR_INT_ETH 0x0002
51#define BCSR_INT_PC0 0x0004
52#define BCSR_INT_PC0STSCHG 0x0008
53#define BCSR_INT_PC1 0x0010
54#define BCSR_INT_PC1STSCHG 0x0020
55#define BCSR_INT_DC 0x0040
56#define BCSR_INT_FLASHBUSY 0x0080
57#define BCSR_INT_PC0INSERT 0x0100
58#define BCSR_INT_PC0EJECT 0x0200
59#define BCSR_INT_PC1INSERT 0x0400
60#define BCSR_INT_PC1EJECT 0x0800
61#define BCSR_INT_SD0INSERT 0x1000
62#define BCSR_INT_SD0EJECT 0x2000
63#define BCSR_INT_SD1INSERT 0x4000
64#define BCSR_INT_SD1EJECT 0x8000
65
66#define DB1200_IDE_PHYS_ADDR 0x18800000
67#define DB1200_IDE_REG_SHIFT 5
68#define DB1200_IDE_PHYS_LEN (16 << DB1200_IDE_REG_SHIFT)
69#define DB1200_ETH_PHYS_ADDR 0x19000300
70#define DB1200_NAND_PHYS_ADDR 0x20000000
71
72#define PB1200_IDE_PHYS_ADDR 0x0C800000
73#define PB1200_ETH_PHYS_ADDR 0x0D000300
74#define PB1200_NAND_PHYS_ADDR 0x1C000000
75
76#define DB1200_INT_BEGIN (AU1000_MAX_INTR + 1)
77#define DB1200_IDE_INT (DB1200_INT_BEGIN + 0)
78#define DB1200_ETH_INT (DB1200_INT_BEGIN + 1)
79#define DB1200_PC0_INT (DB1200_INT_BEGIN + 2)
80#define DB1200_PC0_STSCHG_INT (DB1200_INT_BEGIN + 3)
81#define DB1200_PC1_INT (DB1200_INT_BEGIN + 4)
82#define DB1200_PC1_STSCHG_INT (DB1200_INT_BEGIN + 5)
83#define DB1200_DC_INT (DB1200_INT_BEGIN + 6)
84#define DB1200_FLASHBUSY_INT (DB1200_INT_BEGIN + 7)
85#define DB1200_PC0_INSERT_INT (DB1200_INT_BEGIN + 8)
86#define DB1200_PC0_EJECT_INT (DB1200_INT_BEGIN + 9)
87#define DB1200_PC1_INSERT_INT (DB1200_INT_BEGIN + 10)
88#define DB1200_PC1_EJECT_INT (DB1200_INT_BEGIN + 11)
89#define DB1200_SD0_INSERT_INT (DB1200_INT_BEGIN + 12)
90#define DB1200_SD0_EJECT_INT (DB1200_INT_BEGIN + 13)
91#define PB1200_SD1_INSERT_INT (DB1200_INT_BEGIN + 14)
92#define PB1200_SD1_EJECT_INT (DB1200_INT_BEGIN + 15)
93#define DB1200_INT_END (DB1200_INT_BEGIN + 15)
94
48const char *get_system_type(void); 95const char *get_system_type(void);
49 96
50static int __init db1200_detect_board(void) 97static int __init db1200_detect_board(void)
@@ -89,6 +136,15 @@ int __init db1200_board_setup(void)
89 return -ENODEV; 136 return -ENODEV;
90 137
91 whoami = bcsr_read(BCSR_WHOAMI); 138 whoami = bcsr_read(BCSR_WHOAMI);
139 switch (BCSR_WHOAMI_BOARD(whoami)) {
140 case BCSR_WHOAMI_PB1200_DDR1:
141 case BCSR_WHOAMI_PB1200_DDR2:
142 case BCSR_WHOAMI_DB1200:
143 break;
144 default:
145 return -ENODEV;
146 }
147
92 printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d" 148 printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d"
93 " Board-ID %d Daughtercard ID %d\n", get_system_type(), 149 " Board-ID %d Daughtercard ID %d\n", get_system_type(),
94 (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf); 150 (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
@@ -275,32 +331,38 @@ static struct platform_device db1200_eth_dev = {
275 331
276/**********************************************************************/ 332/**********************************************************************/
277 333
334static struct pata_platform_info db1200_ide_info = {
335 .ioport_shift = DB1200_IDE_REG_SHIFT,
336};
337
338#define IDE_ALT_START (14 << DB1200_IDE_REG_SHIFT)
278static struct resource db1200_ide_res[] = { 339static struct resource db1200_ide_res[] = {
279 [0] = { 340 [0] = {
280 .start = DB1200_IDE_PHYS_ADDR, 341 .start = DB1200_IDE_PHYS_ADDR,
281 .end = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1, 342 .end = DB1200_IDE_PHYS_ADDR + IDE_ALT_START - 1,
282 .flags = IORESOURCE_MEM, 343 .flags = IORESOURCE_MEM,
283 }, 344 },
284 [1] = { 345 [1] = {
346 .start = DB1200_IDE_PHYS_ADDR + IDE_ALT_START,
347 .end = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1,
348 .flags = IORESOURCE_MEM,
349 },
350 [2] = {
285 .start = DB1200_IDE_INT, 351 .start = DB1200_IDE_INT,
286 .end = DB1200_IDE_INT, 352 .end = DB1200_IDE_INT,
287 .flags = IORESOURCE_IRQ, 353 .flags = IORESOURCE_IRQ,
288 }, 354 },
289 [2] = {
290 .start = AU1200_DSCR_CMD0_DMA_REQ1,
291 .end = AU1200_DSCR_CMD0_DMA_REQ1,
292 .flags = IORESOURCE_DMA,
293 },
294}; 355};
295 356
296static u64 au1200_ide_dmamask = DMA_BIT_MASK(32); 357static u64 au1200_ide_dmamask = DMA_BIT_MASK(32);
297 358
298static struct platform_device db1200_ide_dev = { 359static struct platform_device db1200_ide_dev = {
299 .name = "au1200-ide", 360 .name = "pata_platform",
300 .id = 0, 361 .id = 0,
301 .dev = { 362 .dev = {
302 .dma_mask = &au1200_ide_dmamask, 363 .dma_mask = &au1200_ide_dmamask,
303 .coherent_dma_mask = DMA_BIT_MASK(32), 364 .coherent_dma_mask = DMA_BIT_MASK(32),
365 .platform_data = &db1200_ide_info,
304 }, 366 },
305 .num_resources = ARRAY_SIZE(db1200_ide_res), 367 .num_resources = ARRAY_SIZE(db1200_ide_res),
306 .resource = db1200_ide_res, 368 .resource = db1200_ide_res,
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index 6167e73eef9c..1aed6be4de10 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -26,12 +26,44 @@
26#include <asm/mach-au1x00/au1200fb.h> 26#include <asm/mach-au1x00/au1200fb.h>
27#include <asm/mach-au1x00/au1xxx_dbdma.h> 27#include <asm/mach-au1x00/au1xxx_dbdma.h>
28#include <asm/mach-au1x00/au1xxx_psc.h> 28#include <asm/mach-au1x00/au1xxx_psc.h>
29#include <asm/mach-db1x00/db1300.h>
30#include <asm/mach-db1x00/bcsr.h> 29#include <asm/mach-db1x00/bcsr.h>
31#include <asm/mach-au1x00/prom.h> 30#include <asm/mach-au1x00/prom.h>
32 31
33#include "platform.h" 32#include "platform.h"
34 33
34/* FPGA (external mux) interrupt sources */
35#define DB1300_FIRST_INT (ALCHEMY_GPIC_INT_LAST + 1)
36#define DB1300_IDE_INT (DB1300_FIRST_INT + 0)
37#define DB1300_ETH_INT (DB1300_FIRST_INT + 1)
38#define DB1300_CF_INT (DB1300_FIRST_INT + 2)
39#define DB1300_VIDEO_INT (DB1300_FIRST_INT + 4)
40#define DB1300_HDMI_INT (DB1300_FIRST_INT + 5)
41#define DB1300_DC_INT (DB1300_FIRST_INT + 6)
42#define DB1300_FLASH_INT (DB1300_FIRST_INT + 7)
43#define DB1300_CF_INSERT_INT (DB1300_FIRST_INT + 8)
44#define DB1300_CF_EJECT_INT (DB1300_FIRST_INT + 9)
45#define DB1300_AC97_INT (DB1300_FIRST_INT + 10)
46#define DB1300_AC97_PEN_INT (DB1300_FIRST_INT + 11)
47#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
48#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
49#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
50#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
51#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
52
53/* SMSC9210 CS */
54#define DB1300_ETH_PHYS_ADDR 0x19000000
55#define DB1300_ETH_PHYS_END 0x197fffff
56
57/* ATA CS */
58#define DB1300_IDE_PHYS_ADDR 0x18800000
59#define DB1300_IDE_REG_SHIFT 5
60#define DB1300_IDE_PHYS_LEN (16 << DB1300_IDE_REG_SHIFT)
61
62/* NAND CS */
63#define DB1300_NAND_PHYS_ADDR 0x20000000
64#define DB1300_NAND_PHYS_END 0x20000fff
65
66
35static struct i2c_board_info db1300_i2c_devs[] __initdata = { 67static struct i2c_board_info db1300_i2c_devs[] __initdata = {
36 { I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec */ 68 { I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec */
37 { I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */ 69 { I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */
@@ -759,11 +791,15 @@ int __init db1300_board_setup(void)
759{ 791{
760 unsigned short whoami; 792 unsigned short whoami;
761 793
762 db1300_gpio_config();
763 bcsr_init(DB1300_BCSR_PHYS_ADDR, 794 bcsr_init(DB1300_BCSR_PHYS_ADDR,
764 DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS); 795 DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS);
765 796
766 whoami = bcsr_read(BCSR_WHOAMI); 797 whoami = bcsr_read(BCSR_WHOAMI);
798 if (BCSR_WHOAMI_BOARD(whoami) != BCSR_WHOAMI_DB1300)
799 return -ENODEV;
800
801 db1300_gpio_config();
802
767 printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t" 803 printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t"
768 "BoardID %d CPLD Rev %d DaughtercardID %d\n", 804 "BoardID %d CPLD Rev %d DaughtercardID %d\n",
769 BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami), 805 BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami),
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 016cddacd7ea..bbd8d9884702 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -62,10 +62,16 @@ int __init db1550_board_setup(void)
62 DB1550_BCSR_PHYS_ADDR + DB1550_BCSR_HEXLED_OFS); 62 DB1550_BCSR_PHYS_ADDR + DB1550_BCSR_HEXLED_OFS);
63 63
64 whoami = bcsr_read(BCSR_WHOAMI); /* PB1550 hexled offset differs */ 64 whoami = bcsr_read(BCSR_WHOAMI); /* PB1550 hexled offset differs */
65 if ((BCSR_WHOAMI_BOARD(whoami) == BCSR_WHOAMI_PB1550_SDR) || 65 switch (BCSR_WHOAMI_BOARD(whoami)) {
66 (BCSR_WHOAMI_BOARD(whoami) == BCSR_WHOAMI_PB1550_DDR)) 66 case BCSR_WHOAMI_PB1550_SDR:
67 case BCSR_WHOAMI_PB1550_DDR:
67 bcsr_init(PB1550_BCSR_PHYS_ADDR, 68 bcsr_init(PB1550_BCSR_PHYS_ADDR,
68 PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS); 69 PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
70 case BCSR_WHOAMI_DB1550:
71 break;
72 default:
73 return -ENODEV;
74 }
69 75
70 pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \ 76 pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \
71 "Daughtercard ID %d\n", get_system_type(), 77 "Daughtercard ID %d\n", get_system_type(),
diff --git a/arch/mips/alchemy/devboards/db1235.c b/arch/mips/alchemy/devboards/db1xxx.c
index bac19dc43d1d..2d47f951121a 100644
--- a/arch/mips/alchemy/devboards/db1235.c
+++ b/arch/mips/alchemy/devboards/db1xxx.c
@@ -1,12 +1,13 @@
1/* 1/*
2 * DB1200/PB1200 / DB1550 / DB1300 board support. 2 * Alchemy DB/PB1xxx board support.
3 *
4 * These 4 boards can reliably be supported in a single kernel image.
5 */ 3 */
6 4
7#include <asm/mach-au1x00/au1000.h> 5#include <asm/mach-au1x00/au1000.h>
8#include <asm/mach-db1x00/bcsr.h> 6#include <asm/mach-db1x00/bcsr.h>
9 7
8int __init db1000_board_setup(void);
9int __init db1000_dev_setup(void);
10int __init db1500_pci_setup(void);
10int __init db1200_board_setup(void); 11int __init db1200_board_setup(void);
11int __init db1200_dev_setup(void); 12int __init db1200_dev_setup(void);
12int __init db1300_board_setup(void); 13int __init db1300_board_setup(void);
@@ -18,6 +19,17 @@ int __init db1550_pci_setup(int);
18static const char *board_type_str(void) 19static const char *board_type_str(void)
19{ 20{
20 switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { 21 switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
22 case BCSR_WHOAMI_DB1000:
23 return "DB1000";
24 case BCSR_WHOAMI_DB1500:
25 return "DB1500";
26 case BCSR_WHOAMI_DB1100:
27 return "DB1100";
28 case BCSR_WHOAMI_PB1500:
29 case BCSR_WHOAMI_PB1500R2:
30 return "PB1500";
31 case BCSR_WHOAMI_PB1100:
32 return "PB1100";
21 case BCSR_WHOAMI_PB1200_DDR1: 33 case BCSR_WHOAMI_PB1200_DDR1:
22 case BCSR_WHOAMI_PB1200_DDR2: 34 case BCSR_WHOAMI_PB1200_DDR2:
23 return "PB1200"; 35 return "PB1200";
@@ -45,6 +57,11 @@ void __init board_setup(void)
45 int ret; 57 int ret;
46 58
47 switch (alchemy_get_cputype()) { 59 switch (alchemy_get_cputype()) {
60 case ALCHEMY_CPU_AU1000:
61 case ALCHEMY_CPU_AU1500:
62 case ALCHEMY_CPU_AU1100:
63 ret = db1000_board_setup();
64 break;
48 case ALCHEMY_CPU_AU1550: 65 case ALCHEMY_CPU_AU1550:
49 ret = db1550_board_setup(); 66 ret = db1550_board_setup();
50 break; 67 break;
@@ -62,7 +79,7 @@ void __init board_setup(void)
62 panic("cannot initialize board support"); 79 panic("cannot initialize board support");
63} 80}
64 81
65int __init db1235_arch_init(void) 82static int __init db1xxx_arch_init(void)
66{ 83{
67 int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI)); 84 int id = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
68 if (id == BCSR_WHOAMI_DB1550) 85 if (id == BCSR_WHOAMI_DB1550)
@@ -70,14 +87,24 @@ int __init db1235_arch_init(void)
70 else if ((id == BCSR_WHOAMI_PB1550_SDR) || 87 else if ((id == BCSR_WHOAMI_PB1550_SDR) ||
71 (id == BCSR_WHOAMI_PB1550_DDR)) 88 (id == BCSR_WHOAMI_PB1550_DDR))
72 return db1550_pci_setup(1); 89 return db1550_pci_setup(1);
90 else if ((id == BCSR_WHOAMI_DB1500) || (id == BCSR_WHOAMI_PB1500) ||
91 (id == BCSR_WHOAMI_PB1500R2))
92 return db1500_pci_setup();
73 93
74 return 0; 94 return 0;
75} 95}
76arch_initcall(db1235_arch_init); 96arch_initcall(db1xxx_arch_init);
77 97
78int __init db1235_dev_init(void) 98static int __init db1xxx_dev_init(void)
79{ 99{
80 switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) { 100 switch (BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI))) {
101 case BCSR_WHOAMI_DB1000:
102 case BCSR_WHOAMI_DB1500:
103 case BCSR_WHOAMI_DB1100:
104 case BCSR_WHOAMI_PB1500:
105 case BCSR_WHOAMI_PB1500R2:
106 case BCSR_WHOAMI_PB1100:
107 return db1000_dev_setup();
81 case BCSR_WHOAMI_PB1200_DDR1: 108 case BCSR_WHOAMI_PB1200_DDR1:
82 case BCSR_WHOAMI_PB1200_DDR2: 109 case BCSR_WHOAMI_PB1200_DDR2:
83 case BCSR_WHOAMI_DB1200: 110 case BCSR_WHOAMI_DB1200:
@@ -91,4 +118,4 @@ int __init db1235_dev_init(void)
91 } 118 }
92 return 0; 119 return 0;
93} 120}
94device_initcall(db1235_dev_init); 121device_initcall(db1xxx_dev_init);
diff --git a/arch/mips/ar7/time.c b/arch/mips/ar7/time.c
index 1dc6c3b37f91..22c93213b233 100644
--- a/arch/mips/ar7/time.c
+++ b/arch/mips/ar7/time.c
@@ -18,6 +18,7 @@
18 * Setting up the clock on the MIPS boards. 18 * Setting up the clock on the MIPS boards.
19 */ 19 */
20 20
21#include <linux/init.h>
21#include <linux/time.h> 22#include <linux/time.h>
22#include <linux/err.h> 23#include <linux/err.h>
23#include <linux/clk.h> 24#include <linux/clk.h>
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index 3995e31a73e2..dfc60209dc63 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -74,34 +74,26 @@ config ATH79_MACH_UBNT_XM
74endmenu 74endmenu
75 75
76config SOC_AR71XX 76config SOC_AR71XX
77 select USB_ARCH_HAS_EHCI
78 select USB_ARCH_HAS_OHCI
79 select HW_HAS_PCI 77 select HW_HAS_PCI
80 def_bool n 78 def_bool n
81 79
82config SOC_AR724X 80config SOC_AR724X
83 select USB_ARCH_HAS_EHCI
84 select USB_ARCH_HAS_OHCI
85 select HW_HAS_PCI 81 select HW_HAS_PCI
86 select PCI_AR724X if PCI 82 select PCI_AR724X if PCI
87 def_bool n 83 def_bool n
88 84
89config SOC_AR913X 85config SOC_AR913X
90 select USB_ARCH_HAS_EHCI
91 def_bool n 86 def_bool n
92 87
93config SOC_AR933X 88config SOC_AR933X
94 select USB_ARCH_HAS_EHCI
95 def_bool n 89 def_bool n
96 90
97config SOC_AR934X 91config SOC_AR934X
98 select USB_ARCH_HAS_EHCI
99 select HW_HAS_PCI 92 select HW_HAS_PCI
100 select PCI_AR724X if PCI 93 select PCI_AR724X if PCI
101 def_bool n 94 def_bool n
102 95
103config SOC_QCA955X 96config SOC_QCA955X
104 select USB_ARCH_HAS_EHCI
105 select HW_HAS_PCI 97 select HW_HAS_PCI
106 select PCI_AR724X if PCI 98 select PCI_AR724X if PCI
107 def_bool n 99 def_bool n
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 4688b6a6211b..d58c51b5e501 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -4,4 +4,4 @@
4# 4#
5 5
6obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o 6obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
7obj-y += board.o buttons.o leds.o 7obj-y += board.o buttons.o leds.o workarounds.o
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
index 5c94acebf76a..0194c3b9a729 100644
--- a/arch/mips/bcm47xx/bcm47xx_private.h
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -9,4 +9,7 @@ int __init bcm47xx_buttons_register(void);
9/* leds.c */ 9/* leds.c */
10void __init bcm47xx_leds_register(void); 10void __init bcm47xx_leds_register(void);
11 11
12/* workarounds.c */
13void __init bcm47xx_workarounds(void);
14
12#endif 15#endif
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index cdd8246f92b3..44ab1be68c3c 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -72,7 +72,11 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_hardware_version[] __initcons
72 {{BCM47XX_BOARD_ASUS_WL500W, "Asus WL500W"}, "WL500gW-"}, 72 {{BCM47XX_BOARD_ASUS_WL500W, "Asus WL500W"}, "WL500gW-"},
73 {{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"}, 73 {{BCM47XX_BOARD_ASUS_WL520GC, "Asus WL520GC"}, "WL520GC-"},
74 {{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"}, 74 {{BCM47XX_BOARD_ASUS_WL520GU, "Asus WL520GU"}, "WL520GU-"},
75 {{BCM47XX_BOARD_BELKIN_F7D3301, "Belkin F7D3301"}, "F7D3301"},
76 {{BCM47XX_BOARD_BELKIN_F7D3302, "Belkin F7D3302"}, "F7D3302"},
75 {{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"}, 77 {{BCM47XX_BOARD_BELKIN_F7D4301, "Belkin F7D4301"}, "F7D4301"},
78 {{BCM47XX_BOARD_BELKIN_F7D4302, "Belkin F7D4302"}, "F7D4302"},
79 {{BCM47XX_BOARD_BELKIN_F7D4401, "Belkin F7D4401"}, "F7D4401"},
76 { {0}, NULL}, 80 { {0}, NULL},
77}; 81};
78 82
@@ -176,7 +180,16 @@ struct bcm47xx_board_type_list3 bcm47xx_board_list_board[] __initconst = {
176 {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"}, 180 {{BCM47XX_BOARD_PHICOMM_M1, "Phicomm M1"}, "0x0590", "80", "0x1104"},
177 {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"}, 181 {{BCM47XX_BOARD_ZTE_H218N, "ZTE H218N"}, "0x053d", "1234", "0x1305"},
178 {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "0x04CF", "3500", "02"}, 182 {{BCM47XX_BOARD_NETGEAR_WNR3500L, "Netgear WNR3500L"}, "0x04CF", "3500", "02"},
179 {{BCM47XX_BOARD_LINKSYS_WRT54GSV1, "Linksys WRT54GS V1"}, "0x0101", "42", "0x10"}, 183 {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0101", "42", "0x10"},
184 {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0467", "42", "0x10"},
185 {{BCM47XX_BOARD_LINKSYS_WRT54G, "Linksys WRT54G/GS/GL"}, "0x0708", "42", "0x10"},
186 { {0}, NULL},
187};
188
189/* boardtype, boardrev */
190static const
191struct bcm47xx_board_type_list2 bcm47xx_board_list_board_type_rev[] __initconst = {
192 {{BCM47XX_BOARD_SIEMENS_SE505V2, "Siemens SE505 V2"}, "0x0101", "0x10"},
180 { {0}, NULL}, 193 { {0}, NULL},
181}; 194};
182 195
@@ -273,6 +286,16 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
273 return &e3->board; 286 return &e3->board;
274 } 287 }
275 } 288 }
289
290 if (bcm47xx_nvram_getenv("boardtype", buf1, sizeof(buf1)) >= 0 &&
291 bcm47xx_nvram_getenv("boardrev", buf2, sizeof(buf2)) >= 0 &&
292 bcm47xx_nvram_getenv("boardnum", buf3, sizeof(buf3)) == -ENOENT) {
293 for (e2 = bcm47xx_board_list_board_type_rev; e2->value1; e2++) {
294 if (!strcmp(buf1, e2->value1) &&
295 !strcmp(buf2, e2->value2))
296 return &e2->board;
297 }
298 }
276 return bcm47xx_board_unknown; 299 return bcm47xx_board_unknown;
277} 300}
278 301
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 872c62e93e0e..49a1ce06844b 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -259,6 +259,18 @@ bcm47xx_buttons_linksys_wrt310nv1[] __initconst = {
259}; 259};
260 260
261static const struct gpio_keys_button 261static const struct gpio_keys_button
262bcm47xx_buttons_linksys_wrt54g3gv2[] __initconst = {
263 BCM47XX_GPIO_KEY(5, KEY_WIMAX),
264 BCM47XX_GPIO_KEY(6, KEY_RESTART),
265};
266
267static const struct gpio_keys_button
268bcm47xx_buttons_linksys_wrt54gsv1[] __initconst = {
269 BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
270 BCM47XX_GPIO_KEY(6, KEY_RESTART),
271};
272
273static const struct gpio_keys_button
262bcm47xx_buttons_linksys_wrt610nv1[] __initconst = { 274bcm47xx_buttons_linksys_wrt610nv1[] __initconst = {
263 BCM47XX_GPIO_KEY(6, KEY_RESTART), 275 BCM47XX_GPIO_KEY(6, KEY_RESTART),
264 BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON), 276 BCM47XX_GPIO_KEY(8, KEY_WPS_BUTTON),
@@ -270,6 +282,12 @@ bcm47xx_buttons_linksys_wrt610nv2[] __initconst = {
270 BCM47XX_GPIO_KEY(6, KEY_RESTART), 282 BCM47XX_GPIO_KEY(6, KEY_RESTART),
271}; 283};
272 284
285static const struct gpio_keys_button
286bcm47xx_buttons_linksys_wrtsl54gs[] __initconst = {
287 BCM47XX_GPIO_KEY(4, KEY_WPS_BUTTON),
288 BCM47XX_GPIO_KEY(6, KEY_RESTART),
289};
290
273/* Motorola */ 291/* Motorola */
274 292
275static const struct gpio_keys_button 293static const struct gpio_keys_button
@@ -402,7 +420,11 @@ int __init bcm47xx_buttons_register(void)
402 err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wlhdd); 420 err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_wlhdd);
403 break; 421 break;
404 422
423 case BCM47XX_BOARD_BELKIN_F7D3301:
424 case BCM47XX_BOARD_BELKIN_F7D3302:
405 case BCM47XX_BOARD_BELKIN_F7D4301: 425 case BCM47XX_BOARD_BELKIN_F7D4301:
426 case BCM47XX_BOARD_BELKIN_F7D4302:
427 case BCM47XX_BOARD_BELKIN_F7D4401:
406 err = bcm47xx_copy_bdata(bcm47xx_buttons_belkin_f7d4301); 428 err = bcm47xx_copy_bdata(bcm47xx_buttons_belkin_f7d4301);
407 break; 429 break;
408 430
@@ -479,12 +501,21 @@ int __init bcm47xx_buttons_register(void)
479 case BCM47XX_BOARD_LINKSYS_WRT310NV1: 501 case BCM47XX_BOARD_LINKSYS_WRT310NV1:
480 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310nv1); 502 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310nv1);
481 break; 503 break;
504 case BCM47XX_BOARD_LINKSYS_WRT54G:
505 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54gsv1);
506 break;
507 case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
508 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54g3gv2);
509 break;
482 case BCM47XX_BOARD_LINKSYS_WRT610NV1: 510 case BCM47XX_BOARD_LINKSYS_WRT610NV1:
483 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv1); 511 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv1);
484 break; 512 break;
485 case BCM47XX_BOARD_LINKSYS_WRT610NV2: 513 case BCM47XX_BOARD_LINKSYS_WRT610NV2:
486 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv2); 514 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt610nv2);
487 break; 515 break;
516 case BCM47XX_BOARD_LINKSYS_WRTSL54GS:
517 err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrtsl54gs);
518 break;
488 519
489 case BCM47XX_BOARD_MOTOROLA_WE800G: 520 case BCM47XX_BOARD_MOTOROLA_WE800G:
490 err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_we800g); 521 err = bcm47xx_copy_bdata(bcm47xx_buttons_motorola_we800g);
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index 647d15527066..adcb547a91c3 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -292,6 +292,21 @@ bcm47xx_leds_linksys_wrt310nv1[] __initconst = {
292}; 292};
293 293
294static const struct gpio_led 294static const struct gpio_led
295bcm47xx_leds_linksys_wrt54gsv1[] __initconst = {
296 BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
297 BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
298 BCM47XX_GPIO_LED(5, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
299 BCM47XX_GPIO_LED(7, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
300};
301
302static const struct gpio_led
303bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
304 BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
305 BCM47XX_GPIO_LED(2, "green", "3g", 0, LEDS_GPIO_DEFSTATE_OFF),
306 BCM47XX_GPIO_LED(3, "blue", "3g", 0, LEDS_GPIO_DEFSTATE_OFF),
307};
308
309static const struct gpio_led
295bcm47xx_leds_linksys_wrt610nv1[] __initconst = { 310bcm47xx_leds_linksys_wrt610nv1[] __initconst = {
296 BCM47XX_GPIO_LED(0, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF), 311 BCM47XX_GPIO_LED(0, "unk", "usb", 1, LEDS_GPIO_DEFSTATE_OFF),
297 BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_OFF), 312 BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_OFF),
@@ -308,6 +323,15 @@ bcm47xx_leds_linksys_wrt610nv2[] __initconst = {
308 BCM47XX_GPIO_LED(7, "unk", "usb", 0, LEDS_GPIO_DEFSTATE_OFF), 323 BCM47XX_GPIO_LED(7, "unk", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
309}; 324};
310 325
326static const struct gpio_led
327bcm47xx_leds_linksys_wrtsl54gs[] __initconst = {
328 BCM47XX_GPIO_LED(0, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
329 BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
330 BCM47XX_GPIO_LED(2, "white", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
331 BCM47XX_GPIO_LED(3, "orange", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
332 BCM47XX_GPIO_LED(7, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
333};
334
311/* Motorola */ 335/* Motorola */
312 336
313static const struct gpio_led 337static const struct gpio_led
@@ -359,6 +383,14 @@ bcm47xx_leds_netgear_wnr834bv2[] __initconst = {
359 BCM47XX_GPIO_LED(7, "unk", "connected", 0, LEDS_GPIO_DEFSTATE_OFF), 383 BCM47XX_GPIO_LED(7, "unk", "connected", 0, LEDS_GPIO_DEFSTATE_OFF),
360}; 384};
361 385
386/* Siemens */
387static const struct gpio_led
388bcm47xx_leds_siemens_se505v2[] __initconst = {
389 BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
390 BCM47XX_GPIO_LED(3, "unk", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
391 BCM47XX_GPIO_LED(5, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
392};
393
362/* SimpleTech */ 394/* SimpleTech */
363 395
364static const struct gpio_led 396static const struct gpio_led
@@ -425,7 +457,11 @@ void __init bcm47xx_leds_register(void)
425 bcm47xx_set_pdata(bcm47xx_leds_asus_wlhdd); 457 bcm47xx_set_pdata(bcm47xx_leds_asus_wlhdd);
426 break; 458 break;
427 459
460 case BCM47XX_BOARD_BELKIN_F7D3301:
461 case BCM47XX_BOARD_BELKIN_F7D3302:
428 case BCM47XX_BOARD_BELKIN_F7D4301: 462 case BCM47XX_BOARD_BELKIN_F7D4301:
463 case BCM47XX_BOARD_BELKIN_F7D4302:
464 case BCM47XX_BOARD_BELKIN_F7D4401:
429 bcm47xx_set_pdata(bcm47xx_leds_belkin_f7d4301); 465 bcm47xx_set_pdata(bcm47xx_leds_belkin_f7d4301);
430 break; 466 break;
431 467
@@ -502,12 +538,21 @@ void __init bcm47xx_leds_register(void)
502 case BCM47XX_BOARD_LINKSYS_WRT310NV1: 538 case BCM47XX_BOARD_LINKSYS_WRT310NV1:
503 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt310nv1); 539 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt310nv1);
504 break; 540 break;
541 case BCM47XX_BOARD_LINKSYS_WRT54G:
542 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54gsv1);
543 break;
544 case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
545 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g3gv2);
546 break;
505 case BCM47XX_BOARD_LINKSYS_WRT610NV1: 547 case BCM47XX_BOARD_LINKSYS_WRT610NV1:
506 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv1); 548 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv1);
507 break; 549 break;
508 case BCM47XX_BOARD_LINKSYS_WRT610NV2: 550 case BCM47XX_BOARD_LINKSYS_WRT610NV2:
509 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv2); 551 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt610nv2);
510 break; 552 break;
553 case BCM47XX_BOARD_LINKSYS_WRTSL54GS:
554 bcm47xx_set_pdata(bcm47xx_leds_linksys_wrtsl54gs);
555 break;
511 556
512 case BCM47XX_BOARD_MOTOROLA_WE800G: 557 case BCM47XX_BOARD_MOTOROLA_WE800G:
513 bcm47xx_set_pdata(bcm47xx_leds_motorola_we800g); 558 bcm47xx_set_pdata(bcm47xx_leds_motorola_we800g);
@@ -529,6 +574,10 @@ void __init bcm47xx_leds_register(void)
529 bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr834bv2); 574 bcm47xx_set_pdata(bcm47xx_leds_netgear_wnr834bv2);
530 break; 575 break;
531 576
577 case BCM47XX_BOARD_SIEMENS_SE505V2:
578 bcm47xx_set_pdata(bcm47xx_leds_siemens_se505v2);
579 break;
580
532 case BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE: 581 case BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE:
533 bcm47xx_set_pdata(bcm47xx_leds_simpletech_simpleshare); 582 bcm47xx_set_pdata(bcm47xx_leds_simpletech_simpleshare);
534 break; 583 break;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 025be218ea15..63a4b0e915dc 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -212,7 +212,7 @@ void __init plat_mem_setup(void)
212{ 212{
213 struct cpuinfo_mips *c = &current_cpu_data; 213 struct cpuinfo_mips *c = &current_cpu_data;
214 214
215 if (c->cputype == CPU_74K) { 215 if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) {
216 printk(KERN_INFO "bcm47xx: using bcma bus\n"); 216 printk(KERN_INFO "bcm47xx: using bcma bus\n");
217#ifdef CONFIG_BCM47XX_BCMA 217#ifdef CONFIG_BCM47XX_BCMA
218 bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA; 218 bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
@@ -282,6 +282,7 @@ static int __init bcm47xx_register_bus_complete(void)
282 } 282 }
283 bcm47xx_buttons_register(); 283 bcm47xx_buttons_register();
284 bcm47xx_leds_register(); 284 bcm47xx_leds_register();
285 bcm47xx_workarounds();
285 286
286 fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status); 287 fixed_phy_add(PHY_POLL, 0, &bcm47xx_fixed_phy_status);
287 return 0; 288 return 0;
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
new file mode 100644
index 000000000000..e81ce4623070
--- /dev/null
+++ b/arch/mips/bcm47xx/workarounds.c
@@ -0,0 +1,31 @@
1#include "bcm47xx_private.h"
2
3#include <linux/gpio.h>
4#include <bcm47xx_board.h>
5#include <bcm47xx.h>
6
7static void __init bcm47xx_workarounds_netgear_wnr3500l(void)
8{
9 const int usb_power = 12;
10 int err;
11
12 err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power");
13 if (err)
14 pr_err("Failed to request USB power gpio: %d\n", err);
15 else
16 gpio_free(usb_power);
17}
18
19void __init bcm47xx_workarounds(void)
20{
21 enum bcm47xx_board board = bcm47xx_board_get();
22
23 switch (board) {
24 case BCM47XX_BOARD_NETGEAR_WNR3500L:
25 bcm47xx_workarounds_netgear_wnr3500l();
26 break;
27 default:
28 /* No workaround(s) needed */
29 break;
30 }
31}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 1b1b8a89959b..fd4e76c00a42 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -299,14 +299,13 @@ static unsigned int detect_memory_size(void)
299void __init bcm63xx_cpu_init(void) 299void __init bcm63xx_cpu_init(void)
300{ 300{
301 unsigned int tmp; 301 unsigned int tmp;
302 struct cpuinfo_mips *c = &current_cpu_data;
303 unsigned int cpu = smp_processor_id(); 302 unsigned int cpu = smp_processor_id();
304 u32 chipid_reg; 303 u32 chipid_reg;
305 304
306 /* soc registers location depends on cpu type */ 305 /* soc registers location depends on cpu type */
307 chipid_reg = 0; 306 chipid_reg = 0;
308 307
309 switch (c->cputype) { 308 switch (current_cpu_type()) {
310 case CPU_BMIPS3300: 309 case CPU_BMIPS3300:
311 if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT) 310 if ((read_c0_prid() & PRID_IMP_MASK) != PRID_IMP_BMIPS3300_ALT)
312 __cpu_name[cpu] = "Broadcom BCM6338"; 311 __cpu_name[cpu] = "Broadcom BCM6338";
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
deleted file mode 100644
index bac26b971c5e..000000000000
--- a/arch/mips/configs/db1000_defconfig
+++ /dev/null
@@ -1,359 +0,0 @@
1CONFIG_MIPS=y
2CONFIG_MIPS_ALCHEMY=y
3CONFIG_MIPS_DB1000=y
4CONFIG_SCHED_OMIT_FRAME_POINTER=y
5CONFIG_TICK_ONESHOT=y
6CONFIG_NO_HZ=y
7CONFIG_HIGH_RES_TIMERS=y
8CONFIG_HZ_100=y
9CONFIG_HZ=100
10CONFIG_PREEMPT_NONE=y
11CONFIG_EXPERIMENTAL=y
12CONFIG_BROKEN_ON_SMP=y
13CONFIG_INIT_ENV_ARG_LIMIT=32
14CONFIG_CROSS_COMPILE=""
15CONFIG_LOCALVERSION="-db1x00"
16CONFIG_LOCALVERSION_AUTO=y
17CONFIG_KERNEL_LZMA=y
18CONFIG_DEFAULT_HOSTNAME="db1x00"
19CONFIG_SWAP=y
20CONFIG_SYSVIPC=y
21CONFIG_SYSVIPC_SYSCTL=y
22CONFIG_FHANDLE=y
23CONFIG_AUDIT=y
24CONFIG_TINY_RCU=y
25CONFIG_LOG_BUF_SHIFT=18
26CONFIG_NAMESPACES=y
27CONFIG_UTS_NS=y
28CONFIG_IPC_NS=y
29CONFIG_USER_NS=y
30CONFIG_PID_NS=y
31CONFIG_NET_NS=y
32CONFIG_SYSCTL=y
33CONFIG_EXPERT=y
34CONFIG_KALLSYMS=y
35CONFIG_KALLSYMS_ALL=y
36CONFIG_HOTPLUG=y
37CONFIG_PRINTK=y
38CONFIG_BUG=y
39CONFIG_ELF_CORE=y
40CONFIG_BASE_FULL=y
41CONFIG_FUTEX=y
42CONFIG_EPOLL=y
43CONFIG_SIGNALFD=y
44CONFIG_TIMERFD=y
45CONFIG_EVENTFD=y
46CONFIG_SHMEM=y
47CONFIG_AIO=y
48CONFIG_EMBEDDED=y
49CONFIG_HAVE_PERF_EVENTS=y
50CONFIG_PERF_USE_VMALLOC=y
51CONFIG_PCI_QUIRKS=y
52CONFIG_SLAB=y
53CONFIG_SLABINFO=y
54CONFIG_BLOCK=y
55CONFIG_LBDAF=y
56CONFIG_BLK_DEV_BSG=y
57CONFIG_BLK_DEV_BSGLIB=y
58CONFIG_IOSCHED_NOOP=y
59CONFIG_DEFAULT_NOOP=y
60CONFIG_DEFAULT_IOSCHED="noop"
61CONFIG_FREEZER=y
62CONFIG_PCI=y
63CONFIG_PCI_DOMAINS=y
64CONFIG_PCCARD=y
65CONFIG_PCMCIA=y
66CONFIG_PCMCIA_LOAD_CIS=y
67CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
68CONFIG_BINFMT_ELF=y
69CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
70CONFIG_SUSPEND=y
71CONFIG_SUSPEND_FREEZER=y
72CONFIG_PM_SLEEP=y
73CONFIG_PM_RUNTIME=y
74CONFIG_PM=y
75CONFIG_NET=y
76CONFIG_PACKET=y
77CONFIG_UNIX=y
78CONFIG_XFRM=y
79CONFIG_INET=y
80CONFIG_IP_MULTICAST=y
81CONFIG_IP_PNP=y
82CONFIG_IP_PNP_DHCP=y
83CONFIG_IP_PNP_BOOTP=y
84CONFIG_IP_PNP_RARP=y
85CONFIG_NET_IPIP=y
86CONFIG_INET_TUNNEL=y
87CONFIG_INET_LRO=y
88CONFIG_TCP_CONG_CUBIC=y
89CONFIG_DEFAULT_TCP_CONG="cubic"
90CONFIG_IPV6=y
91CONFIG_INET6_XFRM_MODE_TRANSPORT=y
92CONFIG_INET6_XFRM_MODE_TUNNEL=y
93CONFIG_INET6_XFRM_MODE_BEET=y
94CONFIG_IPV6_SIT=y
95CONFIG_IPV6_NDISC_NODETYPE=y
96CONFIG_STP=y
97CONFIG_GARP=y
98CONFIG_BRIDGE=y
99CONFIG_BRIDGE_IGMP_SNOOPING=y
100CONFIG_VLAN_8021Q=y
101CONFIG_VLAN_8021Q_GVRP=y
102CONFIG_LLC=y
103CONFIG_LLC2=y
104CONFIG_DNS_RESOLVER=y
105CONFIG_BT=y
106CONFIG_BT_L2CAP=y
107CONFIG_BT_SCO=y
108CONFIG_BT_RFCOMM=y
109CONFIG_BT_RFCOMM_TTY=y
110CONFIG_BT_BNEP=y
111CONFIG_BT_BNEP_MC_FILTER=y
112CONFIG_BT_BNEP_PROTO_FILTER=y
113CONFIG_BT_HIDP=y
114CONFIG_BT_HCIBTUSB=y
115CONFIG_UEVENT_HELPER_PATH=""
116CONFIG_STANDALONE=y
117CONFIG_PREVENT_FIRMWARE_BUILD=y
118CONFIG_FW_LOADER=y
119CONFIG_MTD=y
120CONFIG_MTD_CMDLINE_PARTS=y
121CONFIG_MTD_CHAR=y
122CONFIG_MTD_BLKDEVS=y
123CONFIG_MTD_BLOCK=y
124CONFIG_MTD_CFI=y
125CONFIG_MTD_GEN_PROBE=y
126CONFIG_MTD_CFI_ADV_OPTIONS=y
127CONFIG_MTD_CFI_NOSWAP=y
128CONFIG_MTD_CFI_GEOMETRY=y
129CONFIG_MTD_MAP_BANK_WIDTH_1=y
130CONFIG_MTD_MAP_BANK_WIDTH_2=y
131CONFIG_MTD_MAP_BANK_WIDTH_4=y
132CONFIG_MTD_CFI_I1=y
133CONFIG_MTD_CFI_I2=y
134CONFIG_MTD_CFI_I4=y
135CONFIG_MTD_CFI_I8=y
136CONFIG_MTD_CFI_INTELEXT=y
137CONFIG_MTD_CFI_AMDSTD=y
138CONFIG_MTD_CFI_UTIL=y
139CONFIG_MTD_PHYSMAP=y
140CONFIG_SCSI_MOD=y
141CONFIG_SCSI=y
142CONFIG_SCSI_DMA=y
143CONFIG_SCSI_PROC_FS=y
144CONFIG_BLK_DEV_SD=y
145CONFIG_CHR_DEV_SG=y
146CONFIG_SCSI_MULTI_LUN=y
147CONFIG_SCSI_CONSTANTS=y
148CONFIG_ATA=y
149CONFIG_ATA_VERBOSE_ERROR=y
150CONFIG_ATA_SFF=y
151CONFIG_ATA_BMDMA=y
152CONFIG_PATA_HPT37X=y
153CONFIG_PATA_PCMCIA=y
154CONFIG_MD=y
155CONFIG_BLK_DEV_DM=y
156CONFIG_FIREWIRE=y
157CONFIG_FIREWIRE_OHCI=y
158CONFIG_FIREWIRE_OHCI_DEBUG=y
159CONFIG_FIREWIRE_NET=y
160CONFIG_NETDEVICES=y
161CONFIG_MII=y
162CONFIG_PHYLIB=y
163CONFIG_NET_ETHERNET=y
164CONFIG_MIPS_AU1X00_ENET=y
165CONFIG_NET_PCMCIA=y
166CONFIG_PCMCIA_3C589=y
167CONFIG_PCMCIA_PCNET=y
168CONFIG_PPP=y
169CONFIG_PPP_MULTILINK=y
170CONFIG_PPP_FILTER=y
171CONFIG_PPP_ASYNC=y
172CONFIG_PPP_SYNC_TTY=y
173CONFIG_PPP_DEFLATE=y
174CONFIG_PPP_BSDCOMP=y
175CONFIG_PPP_MPPE=y
176CONFIG_PPPOE=y
177CONFIG_INPUT=y
178CONFIG_INPUT_EVDEV=y
179CONFIG_INPUT_MISC=y
180CONFIG_INPUT_UINPUT=y
181CONFIG_VT=y
182CONFIG_CONSOLE_TRANSLATIONS=y
183CONFIG_VT_CONSOLE=y
184CONFIG_HW_CONSOLE=y
185CONFIG_UNIX98_PTYS=y
186CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
187CONFIG_DEVKMEM=y
188CONFIG_SERIAL_8250=y
189CONFIG_SERIAL_8250_CONSOLE=y
190CONFIG_SERIAL_8250_NR_UARTS=4
191CONFIG_SERIAL_8250_RUNTIME_UARTS=4
192CONFIG_SERIAL_CORE=y
193CONFIG_SERIAL_CORE_CONSOLE=y
194CONFIG_TTY_PRINTK=y
195CONFIG_DEVPORT=y
196CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
197CONFIG_FB=y
198CONFIG_FB_CFB_FILLRECT=y
199CONFIG_FB_CFB_COPYAREA=y
200CONFIG_FB_CFB_IMAGEBLIT=y
201CONFIG_FB_AU1100=y
202CONFIG_DUMMY_CONSOLE=y
203CONFIG_FRAMEBUFFER_CONSOLE=y
204CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
205CONFIG_FONTS=y
206CONFIG_FONT_8x16=y
207CONFIG_SOUND=y
208CONFIG_SND=y
209CONFIG_SND_TIMER=y
210CONFIG_SND_PCM=y
211CONFIG_SND_JACK=y
212CONFIG_SND_SEQUENCER=y
213CONFIG_SND_HRTIMER=y
214CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
215CONFIG_SND_DYNAMIC_MINORS=y
216CONFIG_SND_VMASTER=y
217CONFIG_SND_AC97_CODEC=y
218CONFIG_SND_SOC=y
219CONFIG_SND_SOC_AC97_BUS=y
220CONFIG_SND_SOC_AU1XAUDIO=y
221CONFIG_SND_SOC_AU1XAC97C=y
222CONFIG_SND_SOC_DB1000=y
223CONFIG_SND_SOC_AC97_CODEC=y
224CONFIG_AC97_BUS=y
225CONFIG_HID_SUPPORT=y
226CONFIG_HID=y
227CONFIG_HIDRAW=y
228CONFIG_USB_HID=y
229CONFIG_USB_SUPPORT=y
230CONFIG_USB=y
231CONFIG_USB_EHCI_HCD=y
232CONFIG_USB_EHCI_ROOT_HUB_TT=y
233CONFIG_USB_EHCI_TT_NEWSCHED=y
234CONFIG_USB_OHCI_HCD=y
235CONFIG_USB_OHCI_HCD_PLATFORM=y
236CONFIG_USB_UHCI_HCD=y
237CONFIG_USB_STORAGE=y
238CONFIG_NEW_LEDS=y
239CONFIG_LEDS_CLASS=y
240CONFIG_LEDS_TRIGGERS=y
241CONFIG_RTC_LIB=y
242CONFIG_RTC_CLASS=y
243CONFIG_RTC_HCTOSYS=y
244CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
245CONFIG_RTC_INTF_SYSFS=y
246CONFIG_RTC_INTF_PROC=y
247CONFIG_RTC_INTF_DEV=y
248CONFIG_RTC_DRV_AU1XXX=y
249CONFIG_EXT4_FS=y
250CONFIG_EXT4_USE_FOR_EXT23=y
251CONFIG_EXT4_FS_XATTR=y
252CONFIG_EXT4_FS_POSIX_ACL=y
253CONFIG_EXT4_FS_SECURITY=y
254CONFIG_JBD2=y
255CONFIG_FS_MBCACHE=y
256CONFIG_FS_POSIX_ACL=y
257CONFIG_EXPORTFS=y
258CONFIG_FILE_LOCKING=y
259CONFIG_FSNOTIFY=y
260CONFIG_DNOTIFY=y
261CONFIG_INOTIFY_USER=y
262CONFIG_GENERIC_ACL=y
263CONFIG_PROC_FS=y
264CONFIG_PROC_KCORE=y
265CONFIG_PROC_SYSCTL=y
266CONFIG_SYSFS=y
267CONFIG_TMPFS=y
268CONFIG_TMPFS_POSIX_ACL=y
269CONFIG_TMPFS_XATTR=y
270CONFIG_MISC_FILESYSTEMS=y
271CONFIG_JFFS2_FS=y
272CONFIG_JFFS2_FS_DEBUG=0
273CONFIG_JFFS2_FS_WRITEBUFFER=y
274CONFIG_JFFS2_SUMMARY=y
275CONFIG_JFFS2_FS_XATTR=y
276CONFIG_JFFS2_FS_POSIX_ACL=y
277CONFIG_JFFS2_FS_SECURITY=y
278CONFIG_JFFS2_COMPRESSION_OPTIONS=y
279CONFIG_JFFS2_ZLIB=y
280CONFIG_JFFS2_LZO=y
281CONFIG_JFFS2_RTIME=y
282CONFIG_JFFS2_RUBIN=y
283CONFIG_JFFS2_CMODE_PRIORITY=y
284CONFIG_SQUASHFS=y
285CONFIG_SQUASHFS_ZLIB=y
286CONFIG_SQUASHFS_LZO=y
287CONFIG_SQUASHFS_XZ=y
288CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
289CONFIG_NETWORK_FILESYSTEMS=y
290CONFIG_NFS_FS=y
291CONFIG_NFS_V3=y
292CONFIG_NFS_V4=y
293CONFIG_NFS_V4_1=y
294CONFIG_PNFS_FILE_LAYOUT=y
295CONFIG_PNFS_BLOCK=y
296CONFIG_ROOT_NFS=y
297CONFIG_NFS_USE_KERNEL_DNS=y
298CONFIG_NFS_USE_NEW_IDMAPPER=y
299CONFIG_NFSD=y
300CONFIG_NFSD_V2_ACL=y
301CONFIG_NFSD_V3=y
302CONFIG_NFSD_V3_ACL=y
303CONFIG_NFSD_V4=y
304CONFIG_LOCKD=y
305CONFIG_LOCKD_V4=y
306CONFIG_NFS_ACL_SUPPORT=y
307CONFIG_NFS_COMMON=y
308CONFIG_SUNRPC=y
309CONFIG_SUNRPC_GSS=y
310CONFIG_SUNRPC_BACKCHANNEL=y
311CONFIG_MSDOS_PARTITION=y
312CONFIG_NLS=y
313CONFIG_NLS_DEFAULT="iso8859-1"
314CONFIG_NLS_CODEPAGE_437=y
315CONFIG_NLS_CODEPAGE_850=y
316CONFIG_NLS_CODEPAGE_1250=y
317CONFIG_NLS_ASCII=y
318CONFIG_NLS_ISO8859_1=y
319CONFIG_NLS_ISO8859_15=y
320CONFIG_NLS_UTF8=y
321CONFIG_HAVE_ARCH_KGDB=y
322CONFIG_EARLY_PRINTK=y
323CONFIG_CMDLINE_BOOL=y
324CONFIG_CMDLINE="noirqdebug rootwait root=/dev/sda1 rootfstype=ext4 console=ttyS0,115200 video=au1100fb:panel:CRT_800x600_16"
325CONFIG_DEBUG_ZBOOT=y
326CONFIG_KEYS=y
327CONFIG_KEYS_DEBUG_PROC_KEYS=y
328CONFIG_SECURITYFS=y
329CONFIG_DEFAULT_SECURITY_DAC=y
330CONFIG_DEFAULT_SECURITY=""
331CONFIG_CRYPTO=y
332CONFIG_CRYPTO_ALGAPI=y
333CONFIG_CRYPTO_ALGAPI2=y
334CONFIG_CRYPTO_AEAD2=y
335CONFIG_CRYPTO_BLKCIPHER=y
336CONFIG_CRYPTO_BLKCIPHER2=y
337CONFIG_CRYPTO_HASH=y
338CONFIG_CRYPTO_HASH2=y
339CONFIG_CRYPTO_RNG=y
340CONFIG_CRYPTO_RNG2=y
341CONFIG_CRYPTO_PCOMP2=y
342CONFIG_CRYPTO_MANAGER=y
343CONFIG_CRYPTO_MANAGER2=y
344CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
345CONFIG_CRYPTO_WORKQUEUE=y
346CONFIG_CRYPTO_ECB=y
347CONFIG_CRYPTO_SHA1=y
348CONFIG_CRYPTO_AES=y
349CONFIG_CRYPTO_ANSI_CPRNG=y
350CONFIG_BITREVERSE=y
351CONFIG_CRC_CCITT=y
352CONFIG_CRC16=y
353CONFIG_CRC_ITU_T=y
354CONFIG_CRC32=y
355CONFIG_ZLIB_INFLATE=y
356CONFIG_ZLIB_DEFLATE=y
357CONFIG_LZO_COMPRESS=y
358CONFIG_LZO_DECOMPRESS=y
359CONFIG_XZ_DEC=y
diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig
deleted file mode 100644
index 28e49f226dc0..000000000000
--- a/arch/mips/configs/db1235_defconfig
+++ /dev/null
@@ -1,434 +0,0 @@
1CONFIG_MIPS_ALCHEMY=y
2CONFIG_MIPS_DB1235=y
3CONFIG_COMPACTION=y
4CONFIG_KSM=y
5CONFIG_HZ_100=y
6CONFIG_EXPERIMENTAL=y
7CONFIG_LOCALVERSION="-db1235"
8CONFIG_KERNEL_LZMA=y
9CONFIG_DEFAULT_HOSTNAME="db1235"
10CONFIG_SYSVIPC=y
11CONFIG_POSIX_MQUEUE=y
12CONFIG_BSD_PROCESS_ACCT=y
13CONFIG_BSD_PROCESS_ACCT_V3=y
14CONFIG_FHANDLE=y
15CONFIG_TASKSTATS=y
16CONFIG_TASK_DELAY_ACCT=y
17CONFIG_AUDIT=y
18CONFIG_AUDIT_LOGINUID_IMMUTABLE=y
19CONFIG_NO_HZ=y
20CONFIG_HIGH_RES_TIMERS=y
21CONFIG_LOG_BUF_SHIFT=16
22CONFIG_NAMESPACES=y
23CONFIG_EMBEDDED=y
24CONFIG_SLAB=y
25CONFIG_JUMP_LABEL=y
26CONFIG_PARTITION_ADVANCED=y
27CONFIG_LDM_PARTITION=y
28CONFIG_EFI_PARTITION=y
29CONFIG_PCI=y
30CONFIG_PCCARD=y
31CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
32CONFIG_PM_RUNTIME=y
33CONFIG_NET=y
34CONFIG_PACKET=y
35CONFIG_UNIX=y
36CONFIG_UNIX_DIAG=y
37CONFIG_XFRM_USER=y
38CONFIG_INET=y
39CONFIG_IP_MULTICAST=y
40CONFIG_IP_ADVANCED_ROUTER=y
41CONFIG_IP_MULTIPLE_TABLES=y
42CONFIG_IP_ROUTE_MULTIPATH=y
43CONFIG_IP_ROUTE_VERBOSE=y
44CONFIG_IP_PNP=y
45CONFIG_IP_PNP_DHCP=y
46CONFIG_IP_PNP_BOOTP=y
47CONFIG_IP_PNP_RARP=y
48CONFIG_NET_IPIP=y
49CONFIG_NET_IPGRE_DEMUX=y
50CONFIG_NET_IPGRE=y
51CONFIG_NET_IPGRE_BROADCAST=y
52CONFIG_IP_MROUTE=y
53CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
54CONFIG_IP_PIMSM_V1=y
55CONFIG_IP_PIMSM_V2=y
56CONFIG_ARPD=y
57CONFIG_SYN_COOKIES=y
58CONFIG_NET_IPVTI=y
59CONFIG_INET_AH=y
60CONFIG_INET_ESP=y
61CONFIG_INET_IPCOMP=y
62CONFIG_INET_UDP_DIAG=y
63CONFIG_TCP_CONG_ADVANCED=y
64CONFIG_TCP_CONG_HSTCP=y
65CONFIG_TCP_CONG_HYBLA=y
66CONFIG_TCP_CONG_SCALABLE=y
67CONFIG_TCP_CONG_LP=y
68CONFIG_TCP_CONG_VENO=y
69CONFIG_TCP_CONG_YEAH=y
70CONFIG_TCP_CONG_ILLINOIS=y
71CONFIG_DEFAULT_HYBLA=y
72CONFIG_TCP_MD5SIG=y
73CONFIG_IPV6_PRIVACY=y
74CONFIG_IPV6_ROUTER_PREF=y
75CONFIG_IPV6_ROUTE_INFO=y
76CONFIG_IPV6_OPTIMISTIC_DAD=y
77CONFIG_INET6_AH=y
78CONFIG_INET6_ESP=y
79CONFIG_INET6_IPCOMP=y
80CONFIG_IPV6_MIP6=y
81CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
82CONFIG_IPV6_SIT_6RD=y
83CONFIG_IPV6_TUNNEL=y
84CONFIG_IPV6_MULTIPLE_TABLES=y
85CONFIG_IPV6_SUBTREES=y
86CONFIG_IPV6_MROUTE=y
87CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
88CONFIG_IPV6_PIMSM_V2=y
89CONFIG_NETFILTER=y
90CONFIG_NF_CONNTRACK=y
91CONFIG_NF_CONNTRACK_EVENTS=y
92CONFIG_NF_CONNTRACK_TIMEOUT=y
93CONFIG_NF_CONNTRACK_TIMESTAMP=y
94CONFIG_NF_CT_PROTO_DCCP=y
95CONFIG_NF_CT_PROTO_SCTP=y
96CONFIG_NF_CT_PROTO_UDPLITE=y
97CONFIG_NF_CONNTRACK_AMANDA=y
98CONFIG_NF_CONNTRACK_FTP=y
99CONFIG_NF_CONNTRACK_H323=y
100CONFIG_NF_CONNTRACK_IRC=y
101CONFIG_NF_CONNTRACK_NETBIOS_NS=y
102CONFIG_NF_CONNTRACK_SNMP=y
103CONFIG_NF_CONNTRACK_PPTP=y
104CONFIG_NF_CONNTRACK_SANE=y
105CONFIG_NF_CONNTRACK_SIP=y
106CONFIG_NF_CONNTRACK_TFTP=y
107CONFIG_NF_CT_NETLINK=y
108CONFIG_NF_CT_NETLINK_TIMEOUT=y
109CONFIG_NF_CT_NETLINK_HELPER=y
110CONFIG_NETFILTER_NETLINK_QUEUE_CT=y
111CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
112CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
113CONFIG_NETFILTER_XT_TARGET_HMARK=y
114CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
115CONFIG_NETFILTER_XT_TARGET_LED=y
116CONFIG_NETFILTER_XT_TARGET_LOG=y
117CONFIG_NETFILTER_XT_TARGET_MARK=y
118CONFIG_NETFILTER_XT_TARGET_NFLOG=y
119CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
120CONFIG_NETFILTER_XT_TARGET_TEE=y
121CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
122CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y
123CONFIG_NETFILTER_XT_MATCH_CLUSTER=y
124CONFIG_NETFILTER_XT_MATCH_COMMENT=y
125CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
126CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
127CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
128CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
129CONFIG_NETFILTER_XT_MATCH_CPU=y
130CONFIG_NETFILTER_XT_MATCH_DCCP=y
131CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y
132CONFIG_NETFILTER_XT_MATCH_DSCP=y
133CONFIG_NETFILTER_XT_MATCH_ESP=y
134CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
135CONFIG_NETFILTER_XT_MATCH_HELPER=y
136CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
137CONFIG_NETFILTER_XT_MATCH_LENGTH=y
138CONFIG_NETFILTER_XT_MATCH_LIMIT=y
139CONFIG_NETFILTER_XT_MATCH_MAC=y
140CONFIG_NETFILTER_XT_MATCH_MARK=y
141CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
142CONFIG_NETFILTER_XT_MATCH_NFACCT=y
143CONFIG_NETFILTER_XT_MATCH_OSF=y
144CONFIG_NETFILTER_XT_MATCH_OWNER=y
145CONFIG_NETFILTER_XT_MATCH_POLICY=y
146CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y
147CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
148CONFIG_NETFILTER_XT_MATCH_QUOTA=y
149CONFIG_NETFILTER_XT_MATCH_RATEEST=y
150CONFIG_NETFILTER_XT_MATCH_REALM=y
151CONFIG_NETFILTER_XT_MATCH_RECENT=y
152CONFIG_NETFILTER_XT_MATCH_SCTP=y
153CONFIG_NETFILTER_XT_MATCH_STATE=y
154CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
155CONFIG_NETFILTER_XT_MATCH_STRING=y
156CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
157CONFIG_NETFILTER_XT_MATCH_TIME=y
158CONFIG_NETFILTER_XT_MATCH_U32=y
159CONFIG_NF_CONNTRACK_IPV4=y
160CONFIG_IP_NF_IPTABLES=y
161CONFIG_IP_NF_MATCH_AH=y
162CONFIG_IP_NF_MATCH_ECN=y
163CONFIG_IP_NF_MATCH_RPFILTER=y
164CONFIG_IP_NF_MATCH_TTL=y
165CONFIG_IP_NF_FILTER=y
166CONFIG_IP_NF_TARGET_REJECT=y
167CONFIG_IP_NF_TARGET_ULOG=y
168CONFIG_NF_NAT=y
169CONFIG_IP_NF_TARGET_MASQUERADE=y
170CONFIG_IP_NF_TARGET_NETMAP=y
171CONFIG_IP_NF_TARGET_REDIRECT=y
172CONFIG_IP_NF_MANGLE=y
173CONFIG_IP_NF_TARGET_CLUSTERIP=y
174CONFIG_IP_NF_TARGET_ECN=y
175CONFIG_IP_NF_TARGET_TTL=y
176CONFIG_IP_NF_RAW=y
177CONFIG_IP_NF_ARPTABLES=y
178CONFIG_IP_NF_ARPFILTER=y
179CONFIG_IP_NF_ARP_MANGLE=y
180CONFIG_NF_CONNTRACK_IPV6=y
181CONFIG_IP6_NF_IPTABLES=y
182CONFIG_IP6_NF_MATCH_AH=y
183CONFIG_IP6_NF_MATCH_EUI64=y
184CONFIG_IP6_NF_MATCH_FRAG=y
185CONFIG_IP6_NF_MATCH_OPTS=y
186CONFIG_IP6_NF_MATCH_HL=y
187CONFIG_IP6_NF_MATCH_IPV6HEADER=y
188CONFIG_IP6_NF_MATCH_MH=y
189CONFIG_IP6_NF_MATCH_RPFILTER=y
190CONFIG_IP6_NF_MATCH_RT=y
191CONFIG_IP6_NF_TARGET_HL=y
192CONFIG_IP6_NF_FILTER=y
193CONFIG_IP6_NF_TARGET_REJECT=y
194CONFIG_IP6_NF_MANGLE=y
195CONFIG_IP6_NF_RAW=y
196CONFIG_BRIDGE_NF_EBTABLES=y
197CONFIG_BRIDGE_EBT_BROUTE=y
198CONFIG_BRIDGE_EBT_T_FILTER=y
199CONFIG_BRIDGE_EBT_T_NAT=y
200CONFIG_BRIDGE_EBT_802_3=y
201CONFIG_BRIDGE_EBT_AMONG=y
202CONFIG_BRIDGE_EBT_ARP=y
203CONFIG_BRIDGE_EBT_IP=y
204CONFIG_BRIDGE_EBT_IP6=y
205CONFIG_BRIDGE_EBT_LIMIT=y
206CONFIG_BRIDGE_EBT_MARK=y
207CONFIG_BRIDGE_EBT_PKTTYPE=y
208CONFIG_BRIDGE_EBT_STP=y
209CONFIG_BRIDGE_EBT_VLAN=y
210CONFIG_BRIDGE_EBT_ARPREPLY=y
211CONFIG_BRIDGE_EBT_DNAT=y
212CONFIG_BRIDGE_EBT_MARK_T=y
213CONFIG_BRIDGE_EBT_REDIRECT=y
214CONFIG_BRIDGE_EBT_SNAT=y
215CONFIG_BRIDGE_EBT_LOG=y
216CONFIG_BRIDGE_EBT_NFLOG=y
217CONFIG_L2TP=y
218CONFIG_L2TP_V3=y
219CONFIG_L2TP_IP=y
220CONFIG_L2TP_ETH=y
221CONFIG_BRIDGE=y
222CONFIG_VLAN_8021Q=y
223CONFIG_VLAN_8021Q_GVRP=y
224CONFIG_LLC2=y
225CONFIG_NET_SCHED=y
226CONFIG_NET_SCH_CBQ=y
227CONFIG_NET_SCH_HTB=y
228CONFIG_NET_SCH_HFSC=y
229CONFIG_NET_SCH_PRIO=y
230CONFIG_NET_SCH_MULTIQ=y
231CONFIG_NET_SCH_RED=y
232CONFIG_NET_SCH_SFB=y
233CONFIG_NET_SCH_SFQ=y
234CONFIG_NET_SCH_TEQL=y
235CONFIG_NET_SCH_TBF=y
236CONFIG_NET_SCH_GRED=y
237CONFIG_NET_SCH_DSMARK=y
238CONFIG_NET_SCH_NETEM=y
239CONFIG_NET_SCH_DRR=y
240CONFIG_NET_SCH_MQPRIO=y
241CONFIG_NET_SCH_CHOKE=y
242CONFIG_NET_SCH_QFQ=y
243CONFIG_NET_SCH_CODEL=y
244CONFIG_NET_SCH_FQ_CODEL=y
245CONFIG_NET_SCH_INGRESS=y
246CONFIG_NET_SCH_PLUG=y
247CONFIG_NET_CLS_BASIC=y
248CONFIG_NET_CLS_TCINDEX=y
249CONFIG_NET_CLS_ROUTE4=y
250CONFIG_NET_CLS_FW=y
251CONFIG_NET_CLS_U32=y
252CONFIG_CLS_U32_PERF=y
253CONFIG_CLS_U32_MARK=y
254CONFIG_NET_CLS_RSVP=y
255CONFIG_NET_CLS_RSVP6=y
256CONFIG_NET_CLS_FLOW=y
257CONFIG_NET_EMATCH=y
258CONFIG_NET_EMATCH_CMP=y
259CONFIG_NET_EMATCH_NBYTE=y
260CONFIG_NET_EMATCH_U32=y
261CONFIG_NET_EMATCH_META=y
262CONFIG_NET_EMATCH_TEXT=y
263CONFIG_NET_CLS_ACT=y
264CONFIG_NET_ACT_POLICE=y
265CONFIG_NET_ACT_GACT=y
266CONFIG_GACT_PROB=y
267CONFIG_NET_ACT_MIRRED=y
268CONFIG_NET_ACT_NAT=y
269CONFIG_NET_ACT_PEDIT=y
270CONFIG_NET_ACT_SIMP=y
271CONFIG_NET_ACT_SKBEDIT=y
272CONFIG_NET_ACT_CSUM=y
273CONFIG_NET_CLS_IND=y
274CONFIG_BT=y
275CONFIG_BT_RFCOMM=y
276CONFIG_BT_RFCOMM_TTY=y
277CONFIG_BT_BNEP=y
278CONFIG_BT_BNEP_MC_FILTER=y
279CONFIG_BT_BNEP_PROTO_FILTER=y
280CONFIG_BT_HIDP=y
281CONFIG_BT_HCIBTUSB=y
282CONFIG_CFG80211=y
283CONFIG_CFG80211_CERTIFICATION_ONUS=y
284CONFIG_CFG80211_WEXT=y
285CONFIG_MAC80211=y
286CONFIG_MAC80211_LEDS=y
287CONFIG_RFKILL=y
288CONFIG_RFKILL_INPUT=y
289CONFIG_DEVTMPFS=y
290CONFIG_DEVTMPFS_MOUNT=y
291CONFIG_MTD=y
292CONFIG_MTD_CHAR=y
293CONFIG_MTD_BLOCK=y
294CONFIG_MTD_CFI=y
295CONFIG_MTD_CFI_AMDSTD=y
296CONFIG_MTD_PHYSMAP=y
297CONFIG_MTD_M25P80=y
298CONFIG_MTD_NAND=y
299CONFIG_MTD_NAND_PLATFORM=y
300CONFIG_EEPROM_AT24=y
301CONFIG_EEPROM_AT25=y
302CONFIG_IDE=y
303CONFIG_BLK_DEV_IDE_AU1XXX=y
304CONFIG_BLK_DEV_SD=y
305CONFIG_CHR_DEV_SG=y
306CONFIG_SCSI_MULTI_LUN=y
307CONFIG_ATA=y
308CONFIG_PATA_HPT37X=y
309CONFIG_PATA_PCMCIA=y
310CONFIG_PATA_PLATFORM=y
311CONFIG_NETDEVICES=y
312CONFIG_MIPS_AU1X00_ENET=y
313CONFIG_SMC91X=y
314CONFIG_SMSC911X=y
315CONFIG_AMD_PHY=y
316CONFIG_SMSC_PHY=y
317CONFIG_RT2X00=y
318CONFIG_RT73USB=y
319CONFIG_INPUT_EVDEV=y
320CONFIG_INPUT_TOUCHSCREEN=y
321CONFIG_TOUCHSCREEN_WM97XX=y
322CONFIG_INPUT_MISC=y
323CONFIG_INPUT_UINPUT=y
324CONFIG_SERIAL_8250=y
325CONFIG_SERIAL_8250_CONSOLE=y
326CONFIG_TTY_PRINTK=y
327CONFIG_I2C=y
328CONFIG_I2C_CHARDEV=y
329CONFIG_I2C_AU1550=y
330CONFIG_SPI=y
331CONFIG_SPI_AU1550=y
332CONFIG_GPIO_SYSFS=y
333CONFIG_SENSORS_ADM1025=y
334CONFIG_SENSORS_LM70=y
335CONFIG_SOUND=y
336CONFIG_SND=y
337CONFIG_SND_HRTIMER=y
338CONFIG_SND_DYNAMIC_MINORS=y
339CONFIG_SND_SOC=y
340CONFIG_SND_SOC_AU1XPSC=y
341CONFIG_SND_SOC_DB1200=y
342CONFIG_HIDRAW=y
343CONFIG_UHID=y
344CONFIG_USB_HIDDEV=y
345CONFIG_USB=y
346CONFIG_USB_DYNAMIC_MINORS=y
347CONFIG_USB_EHCI_HCD=y
348CONFIG_USB_EHCI_HCD_PLATFORM=y
349CONFIG_USB_EHCI_ROOT_HUB_TT=y
350CONFIG_USB_OHCI_HCD=y
351CONFIG_USB_OHCI_HCD_PLATFORM=y
352CONFIG_USB_STORAGE=y
353CONFIG_MMC=y
354CONFIG_MMC_AU1X=y
355CONFIG_NEW_LEDS=y
356CONFIG_LEDS_CLASS=y
357CONFIG_RTC_CLASS=y
358CONFIG_RTC_DRV_AU1XXX=y
359CONFIG_EXT4_FS=y
360CONFIG_EXT4_FS_POSIX_ACL=y
361CONFIG_EXT4_FS_SECURITY=y
362CONFIG_XFS_FS=y
363CONFIG_XFS_POSIX_ACL=y
364CONFIG_VFAT_FS=y
365CONFIG_TMPFS=y
366CONFIG_TMPFS_POSIX_ACL=y
367CONFIG_CONFIGFS_FS=y
368CONFIG_JFFS2_FS=y
369CONFIG_JFFS2_SUMMARY=y
370CONFIG_JFFS2_FS_XATTR=y
371CONFIG_JFFS2_COMPRESSION_OPTIONS=y
372CONFIG_JFFS2_LZO=y
373CONFIG_JFFS2_CMODE_FAVOURLZO=y
374CONFIG_SQUASHFS=y
375CONFIG_SQUASHFS_LZO=y
376CONFIG_SQUASHFS_XZ=y
377CONFIG_NFS_FS=y
378CONFIG_NFS_V3_ACL=y
379CONFIG_NFS_V4=y
380CONFIG_NFS_V4_1=y
381CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org"
382CONFIG_ROOT_NFS=y
383CONFIG_NFSD=y
384CONFIG_NFSD_V3_ACL=y
385CONFIG_NFSD_V4=y
386CONFIG_NLS_CODEPAGE_437=y
387CONFIG_NLS_CODEPAGE_850=y
388CONFIG_NLS_CODEPAGE_852=y
389CONFIG_NLS_CODEPAGE_1250=y
390CONFIG_NLS_ASCII=y
391CONFIG_NLS_ISO8859_1=y
392CONFIG_NLS_ISO8859_2=y
393CONFIG_NLS_ISO8859_15=y
394CONFIG_NLS_UTF8=y
395CONFIG_MAGIC_SYSRQ=y
396CONFIG_STRIP_ASM_SYMS=y
397CONFIG_SECURITYFS=y
398CONFIG_CRYPTO_USER=y
399CONFIG_CRYPTO_NULL=y
400CONFIG_CRYPTO_CRYPTD=y
401CONFIG_CRYPTO_CCM=y
402CONFIG_CRYPTO_GCM=y
403CONFIG_CRYPTO_CTS=y
404CONFIG_CRYPTO_LRW=y
405CONFIG_CRYPTO_PCBC=y
406CONFIG_CRYPTO_XTS=y
407CONFIG_CRYPTO_XCBC=y
408CONFIG_CRYPTO_VMAC=y
409CONFIG_CRYPTO_MD4=y
410CONFIG_CRYPTO_MICHAEL_MIC=y
411CONFIG_CRYPTO_RMD128=y
412CONFIG_CRYPTO_RMD160=y
413CONFIG_CRYPTO_RMD256=y
414CONFIG_CRYPTO_RMD320=y
415CONFIG_CRYPTO_SHA256=y
416CONFIG_CRYPTO_SHA512=y
417CONFIG_CRYPTO_TGR192=y
418CONFIG_CRYPTO_WP512=y
419CONFIG_CRYPTO_ANUBIS=y
420CONFIG_CRYPTO_BLOWFISH=y
421CONFIG_CRYPTO_CAMELLIA=y
422CONFIG_CRYPTO_CAST5=y
423CONFIG_CRYPTO_CAST6=y
424CONFIG_CRYPTO_FCRYPT=y
425CONFIG_CRYPTO_KHAZAD=y
426CONFIG_CRYPTO_SALSA20=y
427CONFIG_CRYPTO_SEED=y
428CONFIG_CRYPTO_SERPENT=y
429CONFIG_CRYPTO_TEA=y
430CONFIG_CRYPTO_TWOFISH=y
431CONFIG_CRYPTO_ZLIB=y
432CONFIG_CRYPTO_LZO=y
433CONFIG_CRYPTO_USER_API_HASH=y
434CONFIG_CRYPTO_USER_API_SKCIPHER=y
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
new file mode 100644
index 000000000000..c99b6eeda90b
--- /dev/null
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -0,0 +1,245 @@
1CONFIG_MIPS_ALCHEMY=y
2CONFIG_MIPS_DB1XXX=y
3CONFIG_CMA=y
4CONFIG_CMA_DEBUG=y
5CONFIG_HZ_100=y
6CONFIG_LOCALVERSION="-db1xxx"
7CONFIG_KERNEL_XZ=y
8CONFIG_DEFAULT_HOSTNAME="db1xxx"
9CONFIG_SYSVIPC=y
10CONFIG_POSIX_MQUEUE=y
11CONFIG_FHANDLE=y
12CONFIG_AUDIT=y
13CONFIG_NO_HZ=y
14CONFIG_HIGH_RES_TIMERS=y
15CONFIG_LOG_BUF_SHIFT=16
16CONFIG_CGROUPS=y
17CONFIG_CGROUP_FREEZER=y
18CONFIG_CGROUP_DEVICE=y
19CONFIG_CPUSETS=y
20CONFIG_CGROUP_CPUACCT=y
21CONFIG_RESOURCE_COUNTERS=y
22CONFIG_MEMCG=y
23CONFIG_MEMCG_SWAP=y
24CONFIG_MEMCG_KMEM=y
25CONFIG_CGROUP_SCHED=y
26CONFIG_CFS_BANDWIDTH=y
27CONFIG_RT_GROUP_SCHED=y
28CONFIG_BLK_CGROUP=y
29CONFIG_KALLSYMS_ALL=y
30CONFIG_EMBEDDED=y
31CONFIG_SLAB=y
32CONFIG_BLK_DEV_BSGLIB=y
33CONFIG_PARTITION_ADVANCED=y
34CONFIG_DEFAULT_NOOP=y
35CONFIG_PCI=y
36CONFIG_PCI_REALLOC_ENABLE_AUTO=y
37CONFIG_PCCARD=y
38CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
39CONFIG_PM_RUNTIME=y
40CONFIG_NET=y
41CONFIG_PACKET=y
42CONFIG_PACKET_DIAG=y
43CONFIG_UNIX=y
44CONFIG_UNIX_DIAG=y
45CONFIG_XFRM_USER=y
46CONFIG_XFRM_SUB_POLICY=y
47CONFIG_XFRM_MIGRATE=y
48CONFIG_INET=y
49CONFIG_IP_MULTICAST=y
50CONFIG_IP_ADVANCED_ROUTER=y
51CONFIG_IP_FIB_TRIE_STATS=y
52CONFIG_NET_IPIP=y
53CONFIG_NET_IPGRE_DEMUX=y
54CONFIG_NET_IPGRE=y
55CONFIG_NET_IPGRE_BROADCAST=y
56CONFIG_SYN_COOKIES=y
57CONFIG_INET_AH=y
58CONFIG_INET_ESP=y
59CONFIG_INET_IPCOMP=y
60CONFIG_INET_UDP_DIAG=y
61CONFIG_TCP_CONG_ADVANCED=y
62CONFIG_TCP_CONG_VENO=y
63CONFIG_DEFAULT_VENO=y
64CONFIG_IPV6_ROUTER_PREF=y
65CONFIG_IPV6_ROUTE_INFO=y
66CONFIG_IPV6_OPTIMISTIC_DAD=y
67CONFIG_INET6_AH=y
68CONFIG_INET6_ESP=y
69CONFIG_INET6_IPCOMP=y
70CONFIG_IPV6_MIP6=y
71CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
72CONFIG_IPV6_VTI=y
73CONFIG_IPV6_SIT_6RD=y
74CONFIG_IPV6_GRE=y
75CONFIG_IPV6_MULTIPLE_TABLES=y
76CONFIG_IPV6_SUBTREES=y
77CONFIG_IPV6_MROUTE=y
78CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
79CONFIG_IPV6_PIMSM_V2=y
80CONFIG_BRIDGE=y
81CONFIG_NETLINK_MMAP=y
82CONFIG_NETLINK_DIAG=y
83CONFIG_IRDA=y
84CONFIG_IRLAN=y
85CONFIG_IRCOMM=y
86CONFIG_IRDA_ULTRA=y
87CONFIG_IRDA_CACHE_LAST_LSAP=y
88CONFIG_IRDA_FAST_RR=y
89CONFIG_AU1000_FIR=y
90CONFIG_BT=y
91CONFIG_BT_RFCOMM=y
92CONFIG_BT_RFCOMM_TTY=y
93CONFIG_BT_BNEP=y
94CONFIG_BT_BNEP_MC_FILTER=y
95CONFIG_BT_BNEP_PROTO_FILTER=y
96CONFIG_BT_HIDP=y
97CONFIG_BT_HCIBTUSB=y
98CONFIG_CFG80211=y
99CONFIG_CFG80211_WEXT=y
100CONFIG_MAC80211=y
101CONFIG_DEVTMPFS=y
102CONFIG_DEVTMPFS_MOUNT=y
103CONFIG_MTD=y
104CONFIG_MTD_CMDLINE_PARTS=y
105CONFIG_MTD_BLOCK=y
106CONFIG_MTD_CFI=y
107CONFIG_MTD_CFI_ADV_OPTIONS=y
108CONFIG_MTD_CFI_AMDSTD=y
109CONFIG_MTD_PHYSMAP=y
110CONFIG_MTD_M25P80=y
111CONFIG_MTD_SST25L=y
112CONFIG_MTD_NAND=y
113CONFIG_MTD_NAND_ECC_BCH=y
114CONFIG_MTD_NAND_AU1550=y
115CONFIG_MTD_NAND_PLATFORM=y
116CONFIG_EEPROM_AT24=y
117CONFIG_EEPROM_AT25=y
118CONFIG_SCSI_TGT=y
119CONFIG_BLK_DEV_SD=y
120CONFIG_CHR_DEV_SG=y
121CONFIG_SCSI_MULTI_LUN=y
122CONFIG_ATA=y
123CONFIG_PATA_HPT37X=y
124CONFIG_PATA_HPT3X2N=y
125CONFIG_PATA_PCMCIA=y
126CONFIG_PATA_PLATFORM=y
127CONFIG_NETDEVICES=y
128CONFIG_NLMON=y
129CONFIG_PCMCIA_3C589=y
130CONFIG_MIPS_AU1X00_ENET=y
131CONFIG_SMC91X=y
132CONFIG_SMSC911X=y
133CONFIG_AMD_PHY=y
134CONFIG_SMSC_PHY=y
135CONFIG_INPUT_EVDEV=y
136CONFIG_KEYBOARD_GPIO=y
137CONFIG_INPUT_TOUCHSCREEN=y
138CONFIG_TOUCHSCREEN_ADS7846=y
139CONFIG_TOUCHSCREEN_WM97XX=y
140CONFIG_INPUT_MISC=y
141CONFIG_INPUT_UINPUT=y
142CONFIG_SERIAL_8250=y
143CONFIG_SERIAL_8250_CONSOLE=y
144CONFIG_TTY_PRINTK=y
145CONFIG_I2C=y
146CONFIG_I2C_CHARDEV=y
147CONFIG_I2C_AU1550=y
148CONFIG_SPI=y
149CONFIG_SPI_AU1550=y
150CONFIG_SPI_GPIO=y
151CONFIG_SENSORS_ADM1025=y
152CONFIG_SENSORS_LM70=y
153CONFIG_FB=y
154CONFIG_FB_AU1100=y
155CONFIG_FB_AU1200=y
156CONFIG_FRAMEBUFFER_CONSOLE=y
157CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
158CONFIG_SOUND=y
159CONFIG_SND=y
160CONFIG_SND_SEQUENCER=y
161CONFIG_SND_HRTIMER=y
162CONFIG_SND_DYNAMIC_MINORS=y
163CONFIG_SND_AC97_POWER_SAVE=y
164CONFIG_SND_AC97_POWER_SAVE_DEFAULT=1
165CONFIG_SND_SOC=y
166CONFIG_SND_SOC_AU1XPSC=y
167CONFIG_SND_SOC_AU1XAUDIO=y
168CONFIG_SND_SOC_DB1000=y
169CONFIG_SND_SOC_DB1200=y
170CONFIG_HIDRAW=y
171CONFIG_UHID=y
172CONFIG_HID_LOGITECH=y
173CONFIG_HID_LOGITECH_DJ=y
174CONFIG_USB_HIDDEV=y
175CONFIG_USB=y
176CONFIG_USB_DYNAMIC_MINORS=y
177CONFIG_USB_OTG=y
178CONFIG_USB_EHCI_HCD=y
179CONFIG_USB_EHCI_ROOT_HUB_TT=y
180CONFIG_USB_EHCI_HCD_PLATFORM=y
181CONFIG_USB_OHCI_HCD=y
182CONFIG_USB_OHCI_HCD_PLATFORM=y
183CONFIG_USB_STORAGE=y
184CONFIG_MMC=y
185CONFIG_MMC_CLKGATE=y
186CONFIG_SDIO_UART=y
187CONFIG_MMC_AU1X=y
188CONFIG_NEW_LEDS=y
189CONFIG_LEDS_CLASS=y
190CONFIG_LEDS_TRIGGERS=y
191CONFIG_RTC_CLASS=y
192CONFIG_RTC_DRV_AU1XXX=y
193CONFIG_FIRMWARE_MEMMAP=y
194CONFIG_EXT4_FS=y
195CONFIG_EXT4_FS_POSIX_ACL=y
196CONFIG_EXT4_FS_SECURITY=y
197CONFIG_XFS_FS=y
198CONFIG_XFS_POSIX_ACL=y
199CONFIG_FANOTIFY=y
200CONFIG_FUSE_FS=y
201CONFIG_CUSE=y
202CONFIG_VFAT_FS=y
203CONFIG_TMPFS=y
204CONFIG_TMPFS_POSIX_ACL=y
205CONFIG_CONFIGFS_FS=y
206CONFIG_JFFS2_FS=y
207CONFIG_JFFS2_SUMMARY=y
208CONFIG_JFFS2_COMPRESSION_OPTIONS=y
209CONFIG_JFFS2_LZO=y
210CONFIG_JFFS2_RUBIN=y
211CONFIG_SQUASHFS=y
212CONFIG_SQUASHFS_FILE_DIRECT=y
213CONFIG_SQUASHFS_XATTR=y
214CONFIG_SQUASHFS_LZO=y
215CONFIG_SQUASHFS_XZ=y
216CONFIG_F2FS_FS=y
217CONFIG_F2FS_FS_SECURITY=y
218CONFIG_NFS_FS=y
219CONFIG_NFS_V3_ACL=y
220CONFIG_NFS_V4=y
221CONFIG_NFS_V4_1=y
222CONFIG_NFS_V4_2=y
223CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="local"
224CONFIG_NFS_V4_1_MIGRATION=y
225CONFIG_NFSD=y
226CONFIG_NFSD_V3_ACL=y
227CONFIG_NFSD_V4=y
228CONFIG_NLS_CODEPAGE_437=y
229CONFIG_NLS_CODEPAGE_850=y
230CONFIG_NLS_CODEPAGE_852=y
231CONFIG_NLS_CODEPAGE_1250=y
232CONFIG_NLS_ASCII=y
233CONFIG_NLS_ISO8859_1=y
234CONFIG_NLS_ISO8859_2=y
235CONFIG_NLS_ISO8859_15=y
236CONFIG_NLS_UTF8=y
237CONFIG_MAGIC_SYSRQ=y
238CONFIG_SECURITYFS=y
239CONFIG_CRYPTO_USER=y
240CONFIG_CRYPTO_CRYPTD=y
241CONFIG_CRYPTO_USER_API_HASH=y
242CONFIG_CRYPTO_USER_API_SKCIPHER=y
243CONFIG_CRC32_SLICEBY4=y
244CONFIG_FONTS=y
245CONFIG_FONT_8x8=y
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
new file mode 100644
index 000000000000..ea1761f0f917
--- /dev/null
+++ b/arch/mips/configs/loongson3_defconfig
@@ -0,0 +1,362 @@
1CONFIG_MACH_LOONGSON=y
2CONFIG_SWIOTLB=y
3CONFIG_LEMOTE_MACH3A=y
4CONFIG_CPU_LOONGSON3=y
5CONFIG_64BIT=y
6CONFIG_PAGE_SIZE_16KB=y
7CONFIG_KSM=y
8CONFIG_SMP=y
9CONFIG_NR_CPUS=4
10CONFIG_HZ_256=y
11CONFIG_PREEMPT=y
12CONFIG_KEXEC=y
13# CONFIG_LOCALVERSION_AUTO is not set
14CONFIG_KERNEL_LZMA=y
15CONFIG_SYSVIPC=y
16CONFIG_POSIX_MQUEUE=y
17CONFIG_AUDIT=y
18CONFIG_NO_HZ=y
19CONFIG_HIGH_RES_TIMERS=y
20CONFIG_BSD_PROCESS_ACCT=y
21CONFIG_BSD_PROCESS_ACCT_V3=y
22CONFIG_TASKSTATS=y
23CONFIG_TASK_DELAY_ACCT=y
24CONFIG_TASK_XACCT=y
25CONFIG_TASK_IO_ACCOUNTING=y
26CONFIG_LOG_BUF_SHIFT=14
27CONFIG_CPUSETS=y
28CONFIG_RESOURCE_COUNTERS=y
29CONFIG_MEMCG=y
30CONFIG_MEMCG_SWAP=y
31CONFIG_BLK_CGROUP=y
32CONFIG_SCHED_AUTOGROUP=y
33CONFIG_SYSFS_DEPRECATED=y
34CONFIG_RELAY=y
35CONFIG_BLK_DEV_INITRD=y
36CONFIG_RD_BZIP2=y
37CONFIG_RD_LZMA=y
38CONFIG_SYSCTL_SYSCALL=y
39CONFIG_EMBEDDED=y
40CONFIG_MODULES=y
41CONFIG_MODULE_FORCE_LOAD=y
42CONFIG_MODULE_UNLOAD=y
43CONFIG_MODULE_FORCE_UNLOAD=y
44CONFIG_MODVERSIONS=y
45CONFIG_BLK_DEV_INTEGRITY=y
46CONFIG_PARTITION_ADVANCED=y
47CONFIG_IOSCHED_DEADLINE=m
48CONFIG_CFQ_GROUP_IOSCHED=y
49CONFIG_PCI=y
50CONFIG_HT_PCI=y
51CONFIG_PCIEPORTBUS=y
52CONFIG_HOTPLUG_PCI_PCIE=y
53# CONFIG_PCIEAER is not set
54CONFIG_PCIEASPM_PERFORMANCE=y
55CONFIG_HOTPLUG_PCI=y
56CONFIG_HOTPLUG_PCI_SHPC=m
57CONFIG_BINFMT_MISC=m
58CONFIG_MIPS32_COMPAT=y
59CONFIG_MIPS32_O32=y
60CONFIG_MIPS32_N32=y
61CONFIG_PM_RUNTIME=y
62CONFIG_PACKET=y
63CONFIG_UNIX=y
64CONFIG_XFRM_USER=y
65CONFIG_NET_KEY=y
66CONFIG_INET=y
67CONFIG_IP_MULTICAST=y
68CONFIG_IP_ADVANCED_ROUTER=y
69CONFIG_IP_MULTIPLE_TABLES=y
70CONFIG_IP_ROUTE_MULTIPATH=y
71CONFIG_IP_ROUTE_VERBOSE=y
72CONFIG_NETFILTER=y
73CONFIG_NETFILTER_NETLINK_LOG=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_MARK=m
76CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
77CONFIG_NETFILTER_XT_MATCH_COMMENT=m
78CONFIG_NETFILTER_XT_MATCH_DCCP=m
79CONFIG_NETFILTER_XT_MATCH_ESP=m
80CONFIG_NETFILTER_XT_MATCH_LENGTH=m
81CONFIG_NETFILTER_XT_MATCH_LIMIT=m
82CONFIG_NETFILTER_XT_MATCH_MAC=m
83CONFIG_NETFILTER_XT_MATCH_MARK=m
84CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
85CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
86CONFIG_NETFILTER_XT_MATCH_QUOTA=m
87CONFIG_NETFILTER_XT_MATCH_REALM=m
88CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
89CONFIG_NETFILTER_XT_MATCH_STRING=m
90CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
91CONFIG_IP_VS=m
92CONFIG_IP_NF_IPTABLES=m
93CONFIG_IP_NF_MATCH_AH=m
94CONFIG_IP_NF_MATCH_ECN=m
95CONFIG_IP_NF_MATCH_TTL=m
96CONFIG_IP_NF_FILTER=m
97CONFIG_IP_NF_TARGET_REJECT=m
98CONFIG_IP_NF_TARGET_ULOG=m
99CONFIG_IP_NF_MANGLE=m
100CONFIG_IP_NF_TARGET_ECN=m
101CONFIG_IP_NF_TARGET_TTL=m
102CONFIG_IP_NF_RAW=m
103CONFIG_IP_NF_ARPTABLES=m
104CONFIG_IP_NF_ARPFILTER=m
105CONFIG_IP_NF_ARP_MANGLE=m
106CONFIG_IP_SCTP=m
107CONFIG_L2TP=m
108CONFIG_BRIDGE=m
109CONFIG_CFG80211=m
110CONFIG_CFG80211_WEXT=y
111CONFIG_MAC80211=m
112CONFIG_RFKILL=m
113CONFIG_RFKILL_INPUT=y
114CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
115CONFIG_DEVTMPFS=y
116CONFIG_DEVTMPFS_MOUNT=y
117CONFIG_MTD=m
118CONFIG_BLK_DEV_LOOP=y
119CONFIG_BLK_DEV_CRYPTOLOOP=y
120CONFIG_BLK_DEV_RAM=y
121CONFIG_BLK_DEV_RAM_SIZE=8192
122CONFIG_RAID_ATTRS=m
123CONFIG_SCSI_TGT=y
124CONFIG_BLK_DEV_SD=y
125CONFIG_BLK_DEV_SR=y
126CONFIG_CHR_DEV_SG=y
127CONFIG_CHR_DEV_SCH=m
128CONFIG_SCSI_MULTI_LUN=y
129CONFIG_SCSI_CONSTANTS=y
130CONFIG_SCSI_LOGGING=y
131CONFIG_SCSI_SPI_ATTRS=m
132CONFIG_SCSI_FC_ATTRS=m
133CONFIG_ISCSI_TCP=m
134CONFIG_MEGARAID_NEWGEN=y
135CONFIG_MEGARAID_MM=y
136CONFIG_MEGARAID_MAILBOX=y
137CONFIG_MEGARAID_LEGACY=y
138CONFIG_MEGARAID_SAS=y
139CONFIG_ATA=y
140CONFIG_SATA_AHCI=y
141CONFIG_PATA_ATIIXP=y
142CONFIG_MD=y
143CONFIG_BLK_DEV_MD=m
144CONFIG_MD_LINEAR=m
145CONFIG_MD_RAID0=m
146CONFIG_MD_RAID1=m
147CONFIG_MD_RAID10=m
148CONFIG_MD_RAID456=m
149CONFIG_MD_MULTIPATH=m
150CONFIG_BLK_DEV_DM=m
151CONFIG_DM_CRYPT=m
152CONFIG_DM_SNAPSHOT=m
153CONFIG_DM_MIRROR=m
154CONFIG_DM_ZERO=m
155CONFIG_TARGET_CORE=m
156CONFIG_TCM_IBLOCK=m
157CONFIG_TCM_FILEIO=m
158CONFIG_TCM_PSCSI=m
159CONFIG_LOOPBACK_TARGET=m
160CONFIG_ISCSI_TARGET=m
161CONFIG_NETDEVICES=y
162CONFIG_TUN=m
163# CONFIG_NET_VENDOR_3COM is not set
164# CONFIG_NET_VENDOR_ADAPTEC is not set
165# CONFIG_NET_VENDOR_ALTEON is not set
166# CONFIG_NET_VENDOR_AMD is not set
167# CONFIG_NET_VENDOR_ARC is not set
168# CONFIG_NET_VENDOR_ATHEROS is not set
169# CONFIG_NET_CADENCE is not set
170# CONFIG_NET_VENDOR_BROADCOM is not set
171# CONFIG_NET_VENDOR_BROCADE is not set
172# CONFIG_NET_VENDOR_CHELSIO is not set
173# CONFIG_NET_VENDOR_CIRRUS is not set
174# CONFIG_NET_VENDOR_CISCO is not set
175# CONFIG_NET_VENDOR_DEC is not set
176# CONFIG_NET_VENDOR_DLINK is not set
177# CONFIG_NET_VENDOR_EMULEX is not set
178# CONFIG_NET_VENDOR_EXAR is not set
179# CONFIG_NET_VENDOR_HP is not set
180CONFIG_E1000=y
181CONFIG_E1000E=y
182CONFIG_IGB=y
183CONFIG_IXGB=y
184CONFIG_IXGBE=y
185# CONFIG_NET_VENDOR_I825XX is not set
186# CONFIG_NET_VENDOR_MARVELL is not set
187# CONFIG_NET_VENDOR_MELLANOX is not set
188# CONFIG_NET_VENDOR_MICREL is not set
189# CONFIG_NET_VENDOR_MYRI is not set
190# CONFIG_NET_VENDOR_NATSEMI is not set
191# CONFIG_NET_VENDOR_NVIDIA is not set
192# CONFIG_NET_VENDOR_OKI is not set
193# CONFIG_NET_PACKET_ENGINE is not set
194# CONFIG_NET_VENDOR_QLOGIC is not set
195CONFIG_8139CP=m
196CONFIG_8139TOO=m
197CONFIG_R8169=y
198# CONFIG_NET_VENDOR_RDC is not set
199# CONFIG_NET_VENDOR_SEEQ is not set
200# CONFIG_NET_VENDOR_SILAN is not set
201# CONFIG_NET_VENDOR_SIS is not set
202# CONFIG_NET_VENDOR_SMSC is not set
203# CONFIG_NET_VENDOR_STMICRO is not set
204# CONFIG_NET_VENDOR_SUN is not set
205# CONFIG_NET_VENDOR_TEHUTI is not set
206# CONFIG_NET_VENDOR_TI is not set
207# CONFIG_NET_VENDOR_TOSHIBA is not set
208# CONFIG_NET_VENDOR_VIA is not set
209# CONFIG_NET_VENDOR_WIZNET is not set
210CONFIG_PPP=m
211CONFIG_PPP_BSDCOMP=m
212CONFIG_PPP_DEFLATE=m
213CONFIG_PPP_FILTER=y
214CONFIG_PPP_MPPE=m
215CONFIG_PPP_MULTILINK=y
216CONFIG_PPPOE=m
217CONFIG_PPPOL2TP=m
218CONFIG_PPP_ASYNC=m
219CONFIG_PPP_SYNC_TTY=m
220CONFIG_ATH_CARDS=m
221CONFIG_ATH9K=m
222CONFIG_HOSTAP=m
223CONFIG_INPUT_POLLDEV=m
224CONFIG_INPUT_SPARSEKMAP=y
225CONFIG_INPUT_EVDEV=y
226CONFIG_KEYBOARD_XTKBD=m
227CONFIG_MOUSE_PS2_SENTELIC=y
228CONFIG_MOUSE_SERIAL=m
229CONFIG_INPUT_MISC=y
230CONFIG_INPUT_UINPUT=m
231CONFIG_SERIO_SERPORT=m
232CONFIG_SERIO_RAW=m
233CONFIG_LEGACY_PTY_COUNT=16
234CONFIG_SERIAL_NONSTANDARD=y
235CONFIG_SERIAL_8250=y
236CONFIG_SERIAL_8250_CONSOLE=y
237CONFIG_SERIAL_8250_NR_UARTS=16
238CONFIG_SERIAL_8250_EXTENDED=y
239CONFIG_SERIAL_8250_MANY_PORTS=y
240CONFIG_SERIAL_8250_SHARE_IRQ=y
241CONFIG_SERIAL_8250_RSA=y
242CONFIG_HW_RANDOM=y
243CONFIG_RAW_DRIVER=m
244CONFIG_I2C_CHARDEV=y
245CONFIG_I2C_PIIX4=y
246CONFIG_SENSORS_LM75=m
247CONFIG_SENSORS_LM93=m
248CONFIG_SENSORS_W83627HF=m
249CONFIG_MEDIA_SUPPORT=m
250CONFIG_MEDIA_CAMERA_SUPPORT=y
251CONFIG_MEDIA_USB_SUPPORT=y
252CONFIG_USB_VIDEO_CLASS=m
253CONFIG_DRM=y
254CONFIG_DRM_RADEON=y
255CONFIG_VIDEO_OUTPUT_CONTROL=y
256CONFIG_FB_RADEON=y
257CONFIG_LCD_CLASS_DEVICE=y
258CONFIG_LCD_PLATFORM=m
259CONFIG_BACKLIGHT_GENERIC=m
260# CONFIG_VGA_CONSOLE is not set
261CONFIG_FRAMEBUFFER_CONSOLE=y
262CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
263CONFIG_LOGO=y
264CONFIG_SOUND=y
265CONFIG_SND=m
266CONFIG_SND_SEQUENCER=m
267CONFIG_SND_SEQ_DUMMY=m
268# CONFIG_SND_ISA is not set
269CONFIG_SND_HDA_INTEL=m
270CONFIG_SND_HDA_PATCH_LOADER=y
271CONFIG_SND_HDA_CODEC_REALTEK=m
272CONFIG_SND_HDA_CODEC_CONEXANT=m
273# CONFIG_SND_USB is not set
274CONFIG_HID_A4TECH=m
275CONFIG_HID_SUNPLUS=m
276CONFIG_USB=y
277CONFIG_USB_MON=y
278CONFIG_USB_XHCI_HCD=m
279CONFIG_USB_EHCI_HCD=y
280CONFIG_USB_EHCI_ROOT_HUB_TT=y
281CONFIG_USB_OHCI_HCD=y
282CONFIG_USB_UHCI_HCD=m
283CONFIG_USB_STORAGE=m
284CONFIG_USB_SERIAL=m
285CONFIG_USB_SERIAL_OPTION=m
286CONFIG_RTC_CLASS=y
287CONFIG_RTC_DRV_CMOS=y
288CONFIG_DMADEVICES=y
289CONFIG_PM_DEVFREQ=y
290CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
291CONFIG_DEVFREQ_GOV_PERFORMANCE=y
292CONFIG_DEVFREQ_GOV_POWERSAVE=y
293CONFIG_DEVFREQ_GOV_USERSPACE=y
294CONFIG_EXT2_FS=y
295CONFIG_EXT2_FS_XATTR=y
296CONFIG_EXT2_FS_POSIX_ACL=y
297CONFIG_EXT2_FS_SECURITY=y
298CONFIG_EXT3_FS=y
299CONFIG_EXT3_FS_POSIX_ACL=y
300CONFIG_EXT3_FS_SECURITY=y
301CONFIG_EXT4_FS=y
302CONFIG_EXT4_FS_POSIX_ACL=y
303CONFIG_EXT4_FS_SECURITY=y
304CONFIG_QUOTA=y
305# CONFIG_PRINT_QUOTA_WARNING is not set
306CONFIG_AUTOFS4_FS=y
307CONFIG_FUSE_FS=m
308CONFIG_ISO9660_FS=m
309CONFIG_JOLIET=y
310CONFIG_MSDOS_FS=m
311CONFIG_VFAT_FS=m
312CONFIG_FAT_DEFAULT_CODEPAGE=936
313CONFIG_FAT_DEFAULT_IOCHARSET="gb2312"
314CONFIG_PROC_KCORE=y
315CONFIG_TMPFS=y
316CONFIG_TMPFS_POSIX_ACL=y
317CONFIG_CONFIGFS_FS=y
318CONFIG_CRAMFS=m
319CONFIG_SQUASHFS=y
320CONFIG_SQUASHFS_XATTR=y
321CONFIG_NFS_FS=m
322CONFIG_NFS_V3_ACL=y
323CONFIG_NFS_V4=m
324CONFIG_NFSD=m
325CONFIG_NFSD_V3_ACL=y
326CONFIG_NFSD_V4=y
327CONFIG_CIFS=m
328CONFIG_NLS_CODEPAGE_437=y
329CONFIG_NLS_CODEPAGE_936=y
330CONFIG_NLS_ASCII=y
331CONFIG_NLS_UTF8=y
332CONFIG_PRINTK_TIME=y
333CONFIG_FRAME_WARN=1024
334CONFIG_STRIP_ASM_SYMS=y
335CONFIG_MAGIC_SYSRQ=y
336# CONFIG_SCHED_DEBUG is not set
337# CONFIG_DEBUG_PREEMPT is not set
338# CONFIG_RCU_CPU_STALL_VERBOSE is not set
339# CONFIG_FTRACE is not set
340CONFIG_SECURITY=y
341CONFIG_SECURITYFS=y
342CONFIG_SECURITY_NETWORK=y
343CONFIG_SECURITY_PATH=y
344CONFIG_SECURITY_SELINUX=y
345CONFIG_SECURITY_SELINUX_BOOTPARAM=y
346CONFIG_SECURITY_SELINUX_DISABLE=y
347CONFIG_DEFAULT_SECURITY_DAC=y
348CONFIG_CRYPTO_AUTHENC=m
349CONFIG_CRYPTO_HMAC=y
350CONFIG_CRYPTO_MD5=y
351CONFIG_CRYPTO_SHA512=m
352CONFIG_CRYPTO_TGR192=m
353CONFIG_CRYPTO_WP512=m
354CONFIG_CRYPTO_ANUBIS=m
355CONFIG_CRYPTO_BLOWFISH=m
356CONFIG_CRYPTO_CAST5=m
357CONFIG_CRYPTO_CAST6=m
358CONFIG_CRYPTO_KHAZAD=m
359CONFIG_CRYPTO_SERPENT=m
360CONFIG_CRYPTO_TEA=m
361CONFIG_CRYPTO_TWOFISH=m
362CONFIG_CRYPTO_DEFLATE=m
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index ce1d3eeeb737..b745b6a9f322 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -1,7 +1,9 @@
1CONFIG_MIPS_MALTA=y 1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y 2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y
4CONFIG_MIPS_MT_SMP=y 5CONFIG_MIPS_MT_SMP=y
6CONFIG_NR_CPUS=8
5CONFIG_HZ_100=y 7CONFIG_HZ_100=y
6CONFIG_SYSVIPC=y 8CONFIG_SYSVIPC=y
7CONFIG_NO_HZ=y 9CONFIG_NO_HZ=y
@@ -42,7 +44,6 @@ CONFIG_INET_IPCOMP=m
42CONFIG_INET_XFRM_MODE_TRANSPORT=m 44CONFIG_INET_XFRM_MODE_TRANSPORT=m
43CONFIG_INET_XFRM_MODE_TUNNEL=m 45CONFIG_INET_XFRM_MODE_TUNNEL=m
44CONFIG_TCP_MD5SIG=y 46CONFIG_TCP_MD5SIG=y
45CONFIG_IPV6_PRIVACY=y
46CONFIG_IPV6_ROUTER_PREF=y 47CONFIG_IPV6_ROUTER_PREF=y
47CONFIG_IPV6_ROUTE_INFO=y 48CONFIG_IPV6_ROUTE_INFO=y
48CONFIG_IPV6_OPTIMISTIC_DAD=y 49CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -68,7 +69,6 @@ CONFIG_NF_CONNTRACK_SANE=m
68CONFIG_NF_CONNTRACK_SIP=m 69CONFIG_NF_CONNTRACK_SIP=m
69CONFIG_NF_CONNTRACK_TFTP=m 70CONFIG_NF_CONNTRACK_TFTP=m
70CONFIG_NF_CT_NETLINK=m 71CONFIG_NF_CT_NETLINK=m
71CONFIG_NETFILTER_TPROXY=m
72CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 72CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
73CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 73CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
74CONFIG_NETFILTER_XT_TARGET_MARK=m 74CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -125,7 +125,6 @@ CONFIG_IP_VS_SH=m
125CONFIG_IP_VS_SED=m 125CONFIG_IP_VS_SED=m
126CONFIG_IP_VS_NQ=m 126CONFIG_IP_VS_NQ=m
127CONFIG_NF_CONNTRACK_IPV4=m 127CONFIG_NF_CONNTRACK_IPV4=m
128CONFIG_IP_NF_QUEUE=m
129CONFIG_IP_NF_IPTABLES=m 128CONFIG_IP_NF_IPTABLES=m
130CONFIG_IP_NF_MATCH_AH=m 129CONFIG_IP_NF_MATCH_AH=m
131CONFIG_IP_NF_MATCH_ECN=m 130CONFIG_IP_NF_MATCH_ECN=m
@@ -185,7 +184,6 @@ CONFIG_ATALK=m
185CONFIG_DEV_APPLETALK=m 184CONFIG_DEV_APPLETALK=m
186CONFIG_IPDDP=m 185CONFIG_IPDDP=m
187CONFIG_IPDDP_ENCAP=y 186CONFIG_IPDDP_ENCAP=y
188CONFIG_IPDDP_DECAP=y
189CONFIG_PHONET=m 187CONFIG_PHONET=m
190CONFIG_NET_SCHED=y 188CONFIG_NET_SCHED=y
191CONFIG_NET_SCH_CBQ=m 189CONFIG_NET_SCH_CBQ=m
@@ -226,9 +224,9 @@ CONFIG_MAC80211_RC_DEFAULT_PID=y
226CONFIG_MAC80211_MESH=y 224CONFIG_MAC80211_MESH=y
227CONFIG_RFKILL=m 225CONFIG_RFKILL=m
228CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 226CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
227CONFIG_DEVTMPFS=y
229CONFIG_CONNECTOR=m 228CONFIG_CONNECTOR=m
230CONFIG_MTD=y 229CONFIG_MTD=y
231CONFIG_MTD_CHAR=y
232CONFIG_MTD_BLOCK=y 230CONFIG_MTD_BLOCK=y
233CONFIG_MTD_OOPS=m 231CONFIG_MTD_OOPS=m
234CONFIG_MTD_CFI=y 232CONFIG_MTD_CFI=y
@@ -328,7 +326,6 @@ CONFIG_LIBERTAS=m
328# CONFIG_INPUT_KEYBOARD is not set 326# CONFIG_INPUT_KEYBOARD is not set
329# CONFIG_INPUT_MOUSE is not set 327# CONFIG_INPUT_MOUSE is not set
330# CONFIG_SERIO_I8042 is not set 328# CONFIG_SERIO_I8042 is not set
331CONFIG_VT_HW_CONSOLE_BINDING=y
332CONFIG_SERIAL_8250=y 329CONFIG_SERIAL_8250=y
333CONFIG_SERIAL_8250_CONSOLE=y 330CONFIG_SERIAL_8250_CONSOLE=y
334# CONFIG_HWMON is not set 331# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/malta_kvm_defconfig b/arch/mips/configs/malta_kvm_defconfig
index 341bb47204d6..4f7d952d8517 100644
--- a/arch/mips/configs/malta_kvm_defconfig
+++ b/arch/mips/configs/malta_kvm_defconfig
@@ -3,6 +3,7 @@ CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y 4CONFIG_PAGE_SIZE_16KB=y
5CONFIG_MIPS_MT_SMP=y 5CONFIG_MIPS_MT_SMP=y
6CONFIG_NR_CPUS=8
6CONFIG_HZ_100=y 7CONFIG_HZ_100=y
7CONFIG_SYSVIPC=y 8CONFIG_SYSVIPC=y
8CONFIG_NO_HZ=y 9CONFIG_NO_HZ=y
@@ -44,7 +45,6 @@ CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m 45CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m 46CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y 47CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y 48CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y 49CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y 50CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -70,7 +70,6 @@ CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m 70CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m 71CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m 72CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 73CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 74CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m 75CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -127,7 +126,6 @@ CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m 126CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m 127CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m 128CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m 129CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m 130CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m 131CONFIG_IP_NF_MATCH_ECN=m
@@ -187,7 +185,6 @@ CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m 185CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m 186CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y 187CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m 188CONFIG_PHONET=m
192CONFIG_NET_SCHED=y 189CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m 190CONFIG_NET_SCH_CBQ=m
@@ -228,9 +225,9 @@ CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y 225CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m 226CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 227CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
228CONFIG_DEVTMPFS=y
231CONFIG_CONNECTOR=m 229CONFIG_CONNECTOR=m
232CONFIG_MTD=y 230CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y 231CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m 232CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y 233CONFIG_MTD_CFI=y
@@ -300,6 +297,7 @@ CONFIG_IFB=m
300CONFIG_MACVLAN=m 297CONFIG_MACVLAN=m
301CONFIG_TUN=m 298CONFIG_TUN=m
302CONFIG_VETH=m 299CONFIG_VETH=m
300CONFIG_VHOST_NET=m
303CONFIG_PCNET32=y 301CONFIG_PCNET32=y
304CONFIG_CHELSIO_T3=m 302CONFIG_CHELSIO_T3=m
305CONFIG_AX88796=m 303CONFIG_AX88796=m
@@ -329,7 +327,6 @@ CONFIG_LIBERTAS=m
329# CONFIG_INPUT_KEYBOARD is not set 327# CONFIG_INPUT_KEYBOARD is not set
330# CONFIG_INPUT_MOUSE is not set 328# CONFIG_INPUT_MOUSE is not set
331# CONFIG_SERIO_I8042 is not set 329# CONFIG_SERIO_I8042 is not set
332CONFIG_VT_HW_CONSOLE_BINDING=y
333CONFIG_SERIAL_8250=y 330CONFIG_SERIAL_8250=y
334CONFIG_SERIAL_8250_CONSOLE=y 331CONFIG_SERIAL_8250_CONSOLE=y
335# CONFIG_HWMON is not set 332# CONFIG_HWMON is not set
@@ -453,4 +450,3 @@ CONFIG_VIRTUALIZATION=y
453CONFIG_KVM=m 450CONFIG_KVM=m
454CONFIG_KVM_MIPS_DYN_TRANS=y 451CONFIG_KVM_MIPS_DYN_TRANS=y
455CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y 452CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y
456CONFIG_VHOST_NET=m
diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig
index 2b8558b71080..e36681c24ddc 100644
--- a/arch/mips/configs/malta_kvm_guest_defconfig
+++ b/arch/mips/configs/malta_kvm_guest_defconfig
@@ -44,7 +44,6 @@ CONFIG_INET_IPCOMP=m
44CONFIG_INET_XFRM_MODE_TRANSPORT=m 44CONFIG_INET_XFRM_MODE_TRANSPORT=m
45CONFIG_INET_XFRM_MODE_TUNNEL=m 45CONFIG_INET_XFRM_MODE_TUNNEL=m
46CONFIG_TCP_MD5SIG=y 46CONFIG_TCP_MD5SIG=y
47CONFIG_IPV6_PRIVACY=y
48CONFIG_IPV6_ROUTER_PREF=y 47CONFIG_IPV6_ROUTER_PREF=y
49CONFIG_IPV6_ROUTE_INFO=y 48CONFIG_IPV6_ROUTE_INFO=y
50CONFIG_IPV6_OPTIMISTIC_DAD=y 49CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -70,7 +69,6 @@ CONFIG_NF_CONNTRACK_SANE=m
70CONFIG_NF_CONNTRACK_SIP=m 69CONFIG_NF_CONNTRACK_SIP=m
71CONFIG_NF_CONNTRACK_TFTP=m 70CONFIG_NF_CONNTRACK_TFTP=m
72CONFIG_NF_CT_NETLINK=m 71CONFIG_NF_CT_NETLINK=m
73CONFIG_NETFILTER_TPROXY=m
74CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 72CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
75CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 73CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
76CONFIG_NETFILTER_XT_TARGET_MARK=m 74CONFIG_NETFILTER_XT_TARGET_MARK=m
@@ -127,7 +125,6 @@ CONFIG_IP_VS_SH=m
127CONFIG_IP_VS_SED=m 125CONFIG_IP_VS_SED=m
128CONFIG_IP_VS_NQ=m 126CONFIG_IP_VS_NQ=m
129CONFIG_NF_CONNTRACK_IPV4=m 127CONFIG_NF_CONNTRACK_IPV4=m
130CONFIG_IP_NF_QUEUE=m
131CONFIG_IP_NF_IPTABLES=m 128CONFIG_IP_NF_IPTABLES=m
132CONFIG_IP_NF_MATCH_AH=m 129CONFIG_IP_NF_MATCH_AH=m
133CONFIG_IP_NF_MATCH_ECN=m 130CONFIG_IP_NF_MATCH_ECN=m
@@ -187,7 +184,6 @@ CONFIG_ATALK=m
187CONFIG_DEV_APPLETALK=m 184CONFIG_DEV_APPLETALK=m
188CONFIG_IPDDP=m 185CONFIG_IPDDP=m
189CONFIG_IPDDP_ENCAP=y 186CONFIG_IPDDP_ENCAP=y
190CONFIG_IPDDP_DECAP=y
191CONFIG_PHONET=m 187CONFIG_PHONET=m
192CONFIG_NET_SCHED=y 188CONFIG_NET_SCHED=y
193CONFIG_NET_SCH_CBQ=m 189CONFIG_NET_SCH_CBQ=m
@@ -228,9 +224,9 @@ CONFIG_MAC80211_RC_DEFAULT_PID=y
228CONFIG_MAC80211_MESH=y 224CONFIG_MAC80211_MESH=y
229CONFIG_RFKILL=m 225CONFIG_RFKILL=m
230CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 226CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
227CONFIG_DEVTMPFS=y
231CONFIG_CONNECTOR=m 228CONFIG_CONNECTOR=m
232CONFIG_MTD=y 229CONFIG_MTD=y
233CONFIG_MTD_CHAR=y
234CONFIG_MTD_BLOCK=y 230CONFIG_MTD_BLOCK=y
235CONFIG_MTD_OOPS=m 231CONFIG_MTD_OOPS=m
236CONFIG_MTD_CFI=y 232CONFIG_MTD_CFI=y
@@ -331,7 +327,6 @@ CONFIG_LIBERTAS=m
331# CONFIG_INPUT_KEYBOARD is not set 327# CONFIG_INPUT_KEYBOARD is not set
332# CONFIG_INPUT_MOUSE is not set 328# CONFIG_INPUT_MOUSE is not set
333# CONFIG_SERIO_I8042 is not set 329# CONFIG_SERIO_I8042 is not set
334CONFIG_VT_HW_CONSOLE_BINDING=y
335CONFIG_SERIAL_8250=y 330CONFIG_SERIAL_8250=y
336CONFIG_SERIAL_8250_CONSOLE=y 331CONFIG_SERIAL_8250_CONSOLE=y
337# CONFIG_HWMON is not set 332# CONFIG_HWMON is not set
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index 93057a760dfa..fb042ce86b4b 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -44,7 +44,6 @@ CONFIG_INET_AH=m
44CONFIG_INET_ESP=m 44CONFIG_INET_ESP=m
45CONFIG_INET_IPCOMP=m 45CONFIG_INET_IPCOMP=m
46# CONFIG_INET_LRO is not set 46# CONFIG_INET_LRO is not set
47CONFIG_IPV6_PRIVACY=y
48CONFIG_INET6_AH=m 47CONFIG_INET6_AH=m
49CONFIG_INET6_ESP=m 48CONFIG_INET6_ESP=m
50CONFIG_INET6_IPCOMP=m 49CONFIG_INET6_IPCOMP=m
@@ -55,7 +54,6 @@ CONFIG_ATALK=m
55CONFIG_DEV_APPLETALK=m 54CONFIG_DEV_APPLETALK=m
56CONFIG_IPDDP=m 55CONFIG_IPDDP=m
57CONFIG_IPDDP_ENCAP=y 56CONFIG_IPDDP_ENCAP=y
58CONFIG_IPDDP_DECAP=y
59CONFIG_NET_SCHED=y 57CONFIG_NET_SCHED=y
60CONFIG_NET_SCH_CBQ=m 58CONFIG_NET_SCH_CBQ=m
61CONFIG_NET_SCH_HTB=m 59CONFIG_NET_SCH_HTB=m
@@ -80,6 +78,7 @@ CONFIG_NET_CLS_ACT=y
80CONFIG_NET_ACT_POLICE=y 78CONFIG_NET_ACT_POLICE=y
81CONFIG_NET_CLS_IND=y 79CONFIG_NET_CLS_IND=y
82# CONFIG_WIRELESS is not set 80# CONFIG_WIRELESS is not set
81CONFIG_DEVTMPFS=y
83CONFIG_BLK_DEV_LOOP=y 82CONFIG_BLK_DEV_LOOP=y
84CONFIG_BLK_DEV_CRYPTOLOOP=m 83CONFIG_BLK_DEV_CRYPTOLOOP=m
85CONFIG_IDE=y 84CONFIG_IDE=y
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
index 4e54b75d89be..eb316447588c 100644
--- a/arch/mips/configs/maltasmtc_defconfig
+++ b/arch/mips/configs/maltasmtc_defconfig
@@ -1,6 +1,7 @@
1CONFIG_MIPS_MALTA=y 1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y 2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y
4CONFIG_MIPS_MT_SMTC=y 5CONFIG_MIPS_MT_SMTC=y
5# CONFIG_MIPS_MT_FPAFF is not set 6# CONFIG_MIPS_MT_FPAFF is not set
6CONFIG_NR_CPUS=9 7CONFIG_NR_CPUS=9
@@ -45,7 +46,6 @@ CONFIG_INET_AH=m
45CONFIG_INET_ESP=m 46CONFIG_INET_ESP=m
46CONFIG_INET_IPCOMP=m 47CONFIG_INET_IPCOMP=m
47# CONFIG_INET_LRO is not set 48# CONFIG_INET_LRO is not set
48CONFIG_IPV6_PRIVACY=y
49CONFIG_INET6_AH=m 49CONFIG_INET6_AH=m
50CONFIG_INET6_ESP=m 50CONFIG_INET6_ESP=m
51CONFIG_INET6_IPCOMP=m 51CONFIG_INET6_IPCOMP=m
@@ -56,7 +56,6 @@ CONFIG_ATALK=m
56CONFIG_DEV_APPLETALK=m 56CONFIG_DEV_APPLETALK=m
57CONFIG_IPDDP=m 57CONFIG_IPDDP=m
58CONFIG_IPDDP_ENCAP=y 58CONFIG_IPDDP_ENCAP=y
59CONFIG_IPDDP_DECAP=y
60CONFIG_NET_SCHED=y 59CONFIG_NET_SCHED=y
61CONFIG_NET_SCH_CBQ=m 60CONFIG_NET_SCH_CBQ=m
62CONFIG_NET_SCH_HTB=m 61CONFIG_NET_SCH_HTB=m
@@ -81,6 +80,7 @@ CONFIG_NET_CLS_ACT=y
81CONFIG_NET_ACT_POLICE=y 80CONFIG_NET_ACT_POLICE=y
82CONFIG_NET_CLS_IND=y 81CONFIG_NET_CLS_IND=y
83# CONFIG_WIRELESS is not set 82# CONFIG_WIRELESS is not set
83CONFIG_DEVTMPFS=y
84CONFIG_BLK_DEV_LOOP=y 84CONFIG_BLK_DEV_LOOP=y
85CONFIG_BLK_DEV_CRYPTOLOOP=m 85CONFIG_BLK_DEV_CRYPTOLOOP=m
86CONFIG_IDE=y 86CONFIG_IDE=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index d75931850392..10ef3bed5f43 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -1,10 +1,11 @@
1CONFIG_MIPS_MALTA=y 1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y 2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y 3CONFIG_CPU_MIPS32_R2=y
4CONFIG_PAGE_SIZE_16KB=y
4CONFIG_MIPS_MT_SMP=y 5CONFIG_MIPS_MT_SMP=y
5CONFIG_SCHED_SMT=y 6CONFIG_SCHED_SMT=y
6CONFIG_MIPS_CMP=y 7CONFIG_MIPS_CMP=y
7CONFIG_NR_CPUS=2 8CONFIG_NR_CPUS=8
8CONFIG_HZ_100=y 9CONFIG_HZ_100=y
9CONFIG_LOCALVERSION="cmp" 10CONFIG_LOCALVERSION="cmp"
10CONFIG_SYSVIPC=y 11CONFIG_SYSVIPC=y
@@ -47,7 +48,6 @@ CONFIG_INET_AH=m
47CONFIG_INET_ESP=m 48CONFIG_INET_ESP=m
48CONFIG_INET_IPCOMP=m 49CONFIG_INET_IPCOMP=m
49# CONFIG_INET_LRO is not set 50# CONFIG_INET_LRO is not set
50CONFIG_IPV6_PRIVACY=y
51CONFIG_INET6_AH=m 51CONFIG_INET6_AH=m
52CONFIG_INET6_ESP=m 52CONFIG_INET6_ESP=m
53CONFIG_INET6_IPCOMP=m 53CONFIG_INET6_IPCOMP=m
@@ -82,6 +82,7 @@ CONFIG_NET_CLS_ACT=y
82CONFIG_NET_ACT_POLICE=y 82CONFIG_NET_ACT_POLICE=y
83CONFIG_NET_CLS_IND=y 83CONFIG_NET_CLS_IND=y
84# CONFIG_WIRELESS is not set 84# CONFIG_WIRELESS is not set
85CONFIG_DEVTMPFS=y
85CONFIG_BLK_DEV_LOOP=y 86CONFIG_BLK_DEV_LOOP=y
86CONFIG_BLK_DEV_CRYPTOLOOP=m 87CONFIG_BLK_DEV_CRYPTOLOOP=m
87CONFIG_IDE=y 88CONFIG_IDE=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
new file mode 100644
index 000000000000..2d3002cba102
--- /dev/null
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -0,0 +1,200 @@
1CONFIG_MIPS_MALTA=y
2CONFIG_CPU_LITTLE_ENDIAN=y
3CONFIG_CPU_MIPS32_R2=y
4CONFIG_CPU_MIPS32_3_5_FEATURES=y
5CONFIG_PAGE_SIZE_16KB=y
6CONFIG_MIPS_MT_SMP=y
7CONFIG_SCHED_SMT=y
8CONFIG_MIPS_CMP=y
9CONFIG_NR_CPUS=8
10CONFIG_HZ_100=y
11CONFIG_LOCALVERSION="cmp"
12CONFIG_SYSVIPC=y
13CONFIG_POSIX_MQUEUE=y
14CONFIG_AUDIT=y
15CONFIG_NO_HZ=y
16CONFIG_IKCONFIG=y
17CONFIG_IKCONFIG_PROC=y
18CONFIG_LOG_BUF_SHIFT=15
19CONFIG_SYSCTL_SYSCALL=y
20CONFIG_EMBEDDED=y
21CONFIG_SLAB=y
22CONFIG_MODULES=y
23CONFIG_MODULE_UNLOAD=y
24CONFIG_MODVERSIONS=y
25CONFIG_MODULE_SRCVERSION_ALL=y
26# CONFIG_BLK_DEV_BSG is not set
27CONFIG_PCI=y
28# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
29CONFIG_NET=y
30CONFIG_PACKET=y
31CONFIG_UNIX=y
32CONFIG_XFRM_USER=m
33CONFIG_NET_KEY=y
34CONFIG_INET=y
35CONFIG_IP_MULTICAST=y
36CONFIG_IP_ADVANCED_ROUTER=y
37CONFIG_IP_MULTIPLE_TABLES=y
38CONFIG_IP_ROUTE_MULTIPATH=y
39CONFIG_IP_ROUTE_VERBOSE=y
40CONFIG_IP_PNP=y
41CONFIG_IP_PNP_DHCP=y
42CONFIG_IP_PNP_BOOTP=y
43CONFIG_NET_IPIP=m
44CONFIG_IP_MROUTE=y
45CONFIG_IP_PIMSM_V1=y
46CONFIG_IP_PIMSM_V2=y
47CONFIG_SYN_COOKIES=y
48CONFIG_INET_AH=m
49CONFIG_INET_ESP=m
50CONFIG_INET_IPCOMP=m
51# CONFIG_INET_LRO is not set
52CONFIG_INET6_AH=m
53CONFIG_INET6_ESP=m
54CONFIG_INET6_IPCOMP=m
55CONFIG_IPV6_TUNNEL=m
56CONFIG_BRIDGE=m
57CONFIG_VLAN_8021Q=m
58CONFIG_ATALK=m
59CONFIG_DEV_APPLETALK=m
60CONFIG_IPDDP=m
61CONFIG_IPDDP_ENCAP=y
62CONFIG_NET_SCHED=y
63CONFIG_NET_SCH_CBQ=m
64CONFIG_NET_SCH_HTB=m
65CONFIG_NET_SCH_HFSC=m
66CONFIG_NET_SCH_PRIO=m
67CONFIG_NET_SCH_RED=m
68CONFIG_NET_SCH_SFQ=m
69CONFIG_NET_SCH_TEQL=m
70CONFIG_NET_SCH_TBF=m
71CONFIG_NET_SCH_GRED=m
72CONFIG_NET_SCH_DSMARK=m
73CONFIG_NET_SCH_NETEM=m
74CONFIG_NET_SCH_INGRESS=m
75CONFIG_NET_CLS_BASIC=m
76CONFIG_NET_CLS_TCINDEX=m
77CONFIG_NET_CLS_ROUTE4=m
78CONFIG_NET_CLS_FW=m
79CONFIG_NET_CLS_U32=m
80CONFIG_NET_CLS_RSVP=m
81CONFIG_NET_CLS_RSVP6=m
82CONFIG_NET_CLS_ACT=y
83CONFIG_NET_ACT_POLICE=y
84CONFIG_NET_CLS_IND=y
85# CONFIG_WIRELESS is not set
86CONFIG_DEVTMPFS=y
87CONFIG_BLK_DEV_LOOP=y
88CONFIG_BLK_DEV_CRYPTOLOOP=m
89CONFIG_IDE=y
90# CONFIG_IDE_PROC_FS is not set
91# CONFIG_IDEPCI_PCIBUS_ORDER is not set
92CONFIG_BLK_DEV_GENERIC=y
93CONFIG_BLK_DEV_PIIX=y
94CONFIG_SCSI=y
95CONFIG_BLK_DEV_SD=y
96CONFIG_CHR_DEV_SG=y
97# CONFIG_SCSI_LOWLEVEL is not set
98CONFIG_NETDEVICES=y
99# CONFIG_NET_VENDOR_3COM is not set
100# CONFIG_NET_VENDOR_ADAPTEC is not set
101# CONFIG_NET_VENDOR_ALTEON is not set
102CONFIG_PCNET32=y
103# CONFIG_NET_VENDOR_ATHEROS is not set
104# CONFIG_NET_VENDOR_BROADCOM is not set
105# CONFIG_NET_VENDOR_BROCADE is not set
106# CONFIG_NET_VENDOR_CHELSIO is not set
107# CONFIG_NET_VENDOR_CISCO is not set
108# CONFIG_NET_VENDOR_DEC is not set
109# CONFIG_NET_VENDOR_DLINK is not set
110# CONFIG_NET_VENDOR_EMULEX is not set
111# CONFIG_NET_VENDOR_EXAR is not set
112# CONFIG_NET_VENDOR_HP is not set
113# CONFIG_NET_VENDOR_INTEL is not set
114# CONFIG_NET_VENDOR_MARVELL is not set
115# CONFIG_NET_VENDOR_MELLANOX is not set
116# CONFIG_NET_VENDOR_MICREL is not set
117# CONFIG_NET_VENDOR_MYRI is not set
118# CONFIG_NET_VENDOR_NATSEMI is not set
119# CONFIG_NET_VENDOR_NVIDIA is not set
120# CONFIG_NET_VENDOR_OKI is not set
121# CONFIG_NET_PACKET_ENGINE is not set
122# CONFIG_NET_VENDOR_QLOGIC is not set
123# CONFIG_NET_VENDOR_REALTEK is not set
124# CONFIG_NET_VENDOR_RDC is not set
125# CONFIG_NET_VENDOR_SEEQ is not set
126# CONFIG_NET_VENDOR_SILAN is not set
127# CONFIG_NET_VENDOR_SIS is not set
128# CONFIG_NET_VENDOR_SMSC is not set
129# CONFIG_NET_VENDOR_STMICRO is not set
130# CONFIG_NET_VENDOR_SUN is not set
131# CONFIG_NET_VENDOR_TEHUTI is not set
132# CONFIG_NET_VENDOR_TI is not set
133# CONFIG_NET_VENDOR_TOSHIBA is not set
134# CONFIG_NET_VENDOR_VIA is not set
135# CONFIG_NET_VENDOR_WIZNET is not set
136# CONFIG_WLAN is not set
137# CONFIG_VT is not set
138CONFIG_LEGACY_PTY_COUNT=4
139CONFIG_SERIAL_8250=y
140CONFIG_SERIAL_8250_CONSOLE=y
141CONFIG_HW_RANDOM=y
142# CONFIG_HWMON is not set
143CONFIG_VIDEO_OUTPUT_CONTROL=m
144CONFIG_FB=y
145CONFIG_FIRMWARE_EDID=y
146CONFIG_FB_MATROX=y
147CONFIG_FB_MATROX_G=y
148CONFIG_USB=y
149CONFIG_USB_EHCI_HCD=y
150# CONFIG_USB_EHCI_TT_NEWSCHED is not set
151CONFIG_USB_UHCI_HCD=y
152CONFIG_USB_STORAGE=y
153CONFIG_NEW_LEDS=y
154CONFIG_LEDS_CLASS=y
155CONFIG_LEDS_TRIGGERS=y
156CONFIG_LEDS_TRIGGER_TIMER=y
157CONFIG_LEDS_TRIGGER_IDE_DISK=y
158CONFIG_LEDS_TRIGGER_HEARTBEAT=y
159CONFIG_LEDS_TRIGGER_BACKLIGHT=y
160CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
161CONFIG_RTC_CLASS=y
162CONFIG_RTC_DRV_CMOS=y
163CONFIG_EXT2_FS=y
164CONFIG_EXT3_FS=y
165# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
166CONFIG_XFS_FS=y
167CONFIG_XFS_QUOTA=y
168CONFIG_XFS_POSIX_ACL=y
169CONFIG_QUOTA=y
170CONFIG_QFMT_V2=y
171CONFIG_MSDOS_FS=m
172CONFIG_VFAT_FS=m
173CONFIG_PROC_KCORE=y
174CONFIG_TMPFS=y
175CONFIG_NFS_FS=y
176CONFIG_ROOT_NFS=y
177CONFIG_CIFS=m
178CONFIG_CIFS_WEAK_PW_HASH=y
179CONFIG_CIFS_XATTR=y
180CONFIG_CIFS_POSIX=y
181CONFIG_NLS_CODEPAGE_437=m
182CONFIG_NLS_ISO8859_1=m
183# CONFIG_FTRACE is not set
184CONFIG_CRYPTO_NULL=m
185CONFIG_CRYPTO_PCBC=m
186CONFIG_CRYPTO_HMAC=y
187CONFIG_CRYPTO_MICHAEL_MIC=m
188CONFIG_CRYPTO_SHA512=m
189CONFIG_CRYPTO_TGR192=m
190CONFIG_CRYPTO_WP512=m
191CONFIG_CRYPTO_ANUBIS=m
192CONFIG_CRYPTO_BLOWFISH=m
193CONFIG_CRYPTO_CAST5=m
194CONFIG_CRYPTO_CAST6=m
195CONFIG_CRYPTO_KHAZAD=m
196CONFIG_CRYPTO_SERPENT=m
197CONFIG_CRYPTO_TEA=m
198CONFIG_CRYPTO_TWOFISH=m
199# CONFIG_CRYPTO_ANSI_CPRNG is not set
200# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 9868fc9c1133..62344648eb7a 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -43,7 +43,6 @@ CONFIG_INET_AH=m
43CONFIG_INET_ESP=m 43CONFIG_INET_ESP=m
44CONFIG_INET_IPCOMP=m 44CONFIG_INET_IPCOMP=m
45# CONFIG_INET_LRO is not set 45# CONFIG_INET_LRO is not set
46CONFIG_IPV6_PRIVACY=y
47CONFIG_INET6_AH=m 46CONFIG_INET6_AH=m
48CONFIG_INET6_ESP=m 47CONFIG_INET6_ESP=m
49CONFIG_INET6_IPCOMP=m 48CONFIG_INET6_IPCOMP=m
@@ -54,7 +53,6 @@ CONFIG_ATALK=m
54CONFIG_DEV_APPLETALK=m 53CONFIG_DEV_APPLETALK=m
55CONFIG_IPDDP=m 54CONFIG_IPDDP=m
56CONFIG_IPDDP_ENCAP=y 55CONFIG_IPDDP_ENCAP=y
57CONFIG_IPDDP_DECAP=y
58CONFIG_NET_SCHED=y 56CONFIG_NET_SCHED=y
59CONFIG_NET_SCH_CBQ=m 57CONFIG_NET_SCH_CBQ=m
60CONFIG_NET_SCH_HTB=m 58CONFIG_NET_SCH_HTB=m
@@ -79,6 +77,7 @@ CONFIG_NET_CLS_ACT=y
79CONFIG_NET_ACT_POLICE=y 77CONFIG_NET_ACT_POLICE=y
80CONFIG_NET_CLS_IND=y 78CONFIG_NET_CLS_IND=y
81# CONFIG_WIRELESS is not set 79# CONFIG_WIRELESS is not set
80CONFIG_DEVTMPFS=y
82CONFIG_BLK_DEV_LOOP=y 81CONFIG_BLK_DEV_LOOP=y
83CONFIG_BLK_DEV_CRYPTOLOOP=m 82CONFIG_BLK_DEV_CRYPTOLOOP=m
84CONFIG_IDE=y 83CONFIG_IDE=y
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
new file mode 100644
index 000000000000..e41c56e375b1
--- /dev/null
+++ b/arch/mips/include/asm/asm-eva.h
@@ -0,0 +1,135 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2014 Imagination Technologies Ltd.
7 *
8 */
9
10#ifndef __ASM_ASM_EVA_H
11#define __ASM_ASM_EVA_H
12
13#ifndef __ASSEMBLY__
14#ifdef CONFIG_EVA
15
16#define __BUILD_EVA_INSN(insn, reg, addr) \
17 " .set push\n" \
18 " .set mips0\n" \
19 " .set eva\n" \
20 " "insn" "reg", "addr "\n" \
21 " .set pop\n"
22
23#define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base)
24#define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr)
25#define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr)
26#define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr)
27#define user_lwl(reg, addr) __BUILD_EVA_INSN("lwle", reg, addr)
28#define user_lwr(reg, addr) __BUILD_EVA_INSN("lwre", reg, addr)
29#define user_lh(reg, addr) __BUILD_EVA_INSN("lhe", reg, addr)
30#define user_lb(reg, addr) __BUILD_EVA_INSN("lbe", reg, addr)
31#define user_lbu(reg, addr) __BUILD_EVA_INSN("lbue", reg, addr)
32/* No 64-bit EVA instruction for loading double words */
33#define user_ld(reg, addr) user_lw(reg, addr)
34#define user_sw(reg, addr) __BUILD_EVA_INSN("swe", reg, addr)
35#define user_swl(reg, addr) __BUILD_EVA_INSN("swle", reg, addr)
36#define user_swr(reg, addr) __BUILD_EVA_INSN("swre", reg, addr)
37#define user_sh(reg, addr) __BUILD_EVA_INSN("she", reg, addr)
38#define user_sb(reg, addr) __BUILD_EVA_INSN("sbe", reg, addr)
39/* No 64-bit EVA instruction for storing double words */
40#define user_sd(reg, addr) user_sw(reg, addr)
41
42#else
43
44#define user_cache(op, base) "cache " op ", " base "\n"
45#define user_ll(reg, addr) "ll " reg ", " addr "\n"
46#define user_sc(reg, addr) "sc " reg ", " addr "\n"
47#define user_lw(reg, addr) "lw " reg ", " addr "\n"
48#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
49#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
50#define user_lh(reg, addr) "lh " reg ", " addr "\n"
51#define user_lb(reg, addr) "lb " reg ", " addr "\n"
52#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
53#define user_sw(reg, addr) "sw " reg ", " addr "\n"
54#define user_swl(reg, addr) "swl " reg ", " addr "\n"
55#define user_swr(reg, addr) "swr " reg ", " addr "\n"
56#define user_sh(reg, addr) "sh " reg ", " addr "\n"
57#define user_sb(reg, addr) "sb " reg ", " addr "\n"
58
59#ifdef CONFIG_32BIT
60/*
61 * No 'sd' or 'ld' instructions in 32-bit but the code will
62 * do the correct thing
63 */
64#define user_sd(reg, addr) user_sw(reg, addr)
65#define user_ld(reg, addr) user_lw(reg, addr)
66#else
67#define user_sd(reg, addr) "sd " reg", " addr "\n"
68#define user_ld(reg, addr) "ld " reg", " addr "\n"
69#endif /* CONFIG_32BIT */
70
71#endif /* CONFIG_EVA */
72
73#else /* __ASSEMBLY__ */
74
75#ifdef CONFIG_EVA
76
77#define __BUILD_EVA_INSN(insn, reg, addr) \
78 .set push; \
79 .set mips0; \
80 .set eva; \
81 insn reg, addr; \
82 .set pop;
83
84#define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base)
85#define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr)
86#define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr)
87#define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr)
88#define user_lwl(reg, addr) __BUILD_EVA_INSN(lwle, reg, addr)
89#define user_lwr(reg, addr) __BUILD_EVA_INSN(lwre, reg, addr)
90#define user_lh(reg, addr) __BUILD_EVA_INSN(lhe, reg, addr)
91#define user_lb(reg, addr) __BUILD_EVA_INSN(lbe, reg, addr)
92#define user_lbu(reg, addr) __BUILD_EVA_INSN(lbue, reg, addr)
93/* No 64-bit EVA instruction for loading double words */
94#define user_ld(reg, addr) user_lw(reg, addr)
95#define user_sw(reg, addr) __BUILD_EVA_INSN(swe, reg, addr)
96#define user_swl(reg, addr) __BUILD_EVA_INSN(swle, reg, addr)
97#define user_swr(reg, addr) __BUILD_EVA_INSN(swre, reg, addr)
98#define user_sh(reg, addr) __BUILD_EVA_INSN(she, reg, addr)
99#define user_sb(reg, addr) __BUILD_EVA_INSN(sbe, reg, addr)
100/* No 64-bit EVA instruction for loading double words */
101#define user_sd(reg, addr) user_sw(reg, addr)
102#else
103
104#define user_cache(op, base) cache op, base
105#define user_ll(reg, addr) ll reg, addr
106#define user_sc(reg, addr) sc reg, addr
107#define user_lw(reg, addr) lw reg, addr
108#define user_lwl(reg, addr) lwl reg, addr
109#define user_lwr(reg, addr) lwr reg, addr
110#define user_lh(reg, addr) lh reg, addr
111#define user_lb(reg, addr) lb reg, addr
112#define user_lbu(reg, addr) lbu reg, addr
113#define user_sw(reg, addr) sw reg, addr
114#define user_swl(reg, addr) swl reg, addr
115#define user_swr(reg, addr) swr reg, addr
116#define user_sh(reg, addr) sh reg, addr
117#define user_sb(reg, addr) sb reg, addr
118
119#ifdef CONFIG_32BIT
120/*
121 * No 'sd' or 'ld' instructions in 32-bit but the code will
122 * do the correct thing
123 */
124#define user_sd(reg, addr) user_sw(reg, addr)
125#define user_ld(reg, addr) user_lw(reg, addr)
126#else
127#define user_sd(reg, addr) sd reg, addr
128#define user_ld(reg, addr) ld reg, addr
129#endif /* CONFIG_32BIT */
130
131#endif /* CONFIG_EVA */
132
133#endif /* __ASSEMBLY__ */
134
135#endif /* __ASM_ASM_EVA_H */
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 879691d194af..7c26b28bf252 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -18,6 +18,7 @@
18#define __ASM_ASM_H 18#define __ASM_ASM_H
19 19
20#include <asm/sgidefs.h> 20#include <asm/sgidefs.h>
21#include <asm/asm-eva.h>
21 22
22#ifndef CAT 23#ifndef CAT
23#ifdef __STDC__ 24#ifdef __STDC__
@@ -145,19 +146,27 @@ symbol = value
145 146
146#define PREF(hint,addr) \ 147#define PREF(hint,addr) \
147 .set push; \ 148 .set push; \
148 .set mips4; \ 149 .set arch=r5000; \
149 pref hint, addr; \ 150 pref hint, addr; \
150 .set pop 151 .set pop
151 152
153#define PREFE(hint, addr) \
154 .set push; \
155 .set mips0; \
156 .set eva; \
157 prefe hint, addr; \
158 .set pop
159
152#define PREFX(hint,addr) \ 160#define PREFX(hint,addr) \
153 .set push; \ 161 .set push; \
154 .set mips4; \ 162 .set arch=r5000; \
155 prefx hint, addr; \ 163 prefx hint, addr; \
156 .set pop 164 .set pop
157 165
158#else /* !CONFIG_CPU_HAS_PREFETCH */ 166#else /* !CONFIG_CPU_HAS_PREFETCH */
159 167
160#define PREF(hint, addr) 168#define PREF(hint, addr)
169#define PREFE(hint, addr)
161#define PREFX(hint, addr) 170#define PREFX(hint, addr)
162 171
163#endif /* !CONFIG_CPU_HAS_PREFETCH */ 172#endif /* !CONFIG_CPU_HAS_PREFETCH */
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index 70e1f176f123..e38c2811d4e2 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -14,75 +14,75 @@
14 14
15 .macro fpu_save_single thread tmp=t0 15 .macro fpu_save_single thread tmp=t0
16 cfc1 \tmp, fcr31 16 cfc1 \tmp, fcr31
17 swc1 $f0, THREAD_FPR0(\thread) 17 swc1 $f0, THREAD_FPR0_LS64(\thread)
18 swc1 $f1, THREAD_FPR1(\thread) 18 swc1 $f1, THREAD_FPR1_LS64(\thread)
19 swc1 $f2, THREAD_FPR2(\thread) 19 swc1 $f2, THREAD_FPR2_LS64(\thread)
20 swc1 $f3, THREAD_FPR3(\thread) 20 swc1 $f3, THREAD_FPR3_LS64(\thread)
21 swc1 $f4, THREAD_FPR4(\thread) 21 swc1 $f4, THREAD_FPR4_LS64(\thread)
22 swc1 $f5, THREAD_FPR5(\thread) 22 swc1 $f5, THREAD_FPR5_LS64(\thread)
23 swc1 $f6, THREAD_FPR6(\thread) 23 swc1 $f6, THREAD_FPR6_LS64(\thread)
24 swc1 $f7, THREAD_FPR7(\thread) 24 swc1 $f7, THREAD_FPR7_LS64(\thread)
25 swc1 $f8, THREAD_FPR8(\thread) 25 swc1 $f8, THREAD_FPR8_LS64(\thread)
26 swc1 $f9, THREAD_FPR9(\thread) 26 swc1 $f9, THREAD_FPR9_LS64(\thread)
27 swc1 $f10, THREAD_FPR10(\thread) 27 swc1 $f10, THREAD_FPR10_LS64(\thread)
28 swc1 $f11, THREAD_FPR11(\thread) 28 swc1 $f11, THREAD_FPR11_LS64(\thread)
29 swc1 $f12, THREAD_FPR12(\thread) 29 swc1 $f12, THREAD_FPR12_LS64(\thread)
30 swc1 $f13, THREAD_FPR13(\thread) 30 swc1 $f13, THREAD_FPR13_LS64(\thread)
31 swc1 $f14, THREAD_FPR14(\thread) 31 swc1 $f14, THREAD_FPR14_LS64(\thread)
32 swc1 $f15, THREAD_FPR15(\thread) 32 swc1 $f15, THREAD_FPR15_LS64(\thread)
33 swc1 $f16, THREAD_FPR16(\thread) 33 swc1 $f16, THREAD_FPR16_LS64(\thread)
34 swc1 $f17, THREAD_FPR17(\thread) 34 swc1 $f17, THREAD_FPR17_LS64(\thread)
35 swc1 $f18, THREAD_FPR18(\thread) 35 swc1 $f18, THREAD_FPR18_LS64(\thread)
36 swc1 $f19, THREAD_FPR19(\thread) 36 swc1 $f19, THREAD_FPR19_LS64(\thread)
37 swc1 $f20, THREAD_FPR20(\thread) 37 swc1 $f20, THREAD_FPR20_LS64(\thread)
38 swc1 $f21, THREAD_FPR21(\thread) 38 swc1 $f21, THREAD_FPR21_LS64(\thread)
39 swc1 $f22, THREAD_FPR22(\thread) 39 swc1 $f22, THREAD_FPR22_LS64(\thread)
40 swc1 $f23, THREAD_FPR23(\thread) 40 swc1 $f23, THREAD_FPR23_LS64(\thread)
41 swc1 $f24, THREAD_FPR24(\thread) 41 swc1 $f24, THREAD_FPR24_LS64(\thread)
42 swc1 $f25, THREAD_FPR25(\thread) 42 swc1 $f25, THREAD_FPR25_LS64(\thread)
43 swc1 $f26, THREAD_FPR26(\thread) 43 swc1 $f26, THREAD_FPR26_LS64(\thread)
44 swc1 $f27, THREAD_FPR27(\thread) 44 swc1 $f27, THREAD_FPR27_LS64(\thread)
45 swc1 $f28, THREAD_FPR28(\thread) 45 swc1 $f28, THREAD_FPR28_LS64(\thread)
46 swc1 $f29, THREAD_FPR29(\thread) 46 swc1 $f29, THREAD_FPR29_LS64(\thread)
47 swc1 $f30, THREAD_FPR30(\thread) 47 swc1 $f30, THREAD_FPR30_LS64(\thread)
48 swc1 $f31, THREAD_FPR31(\thread) 48 swc1 $f31, THREAD_FPR31_LS64(\thread)
49 sw \tmp, THREAD_FCR31(\thread) 49 sw \tmp, THREAD_FCR31(\thread)
50 .endm 50 .endm
51 51
52 .macro fpu_restore_single thread tmp=t0 52 .macro fpu_restore_single thread tmp=t0
53 lw \tmp, THREAD_FCR31(\thread) 53 lw \tmp, THREAD_FCR31(\thread)
54 lwc1 $f0, THREAD_FPR0(\thread) 54 lwc1 $f0, THREAD_FPR0_LS64(\thread)
55 lwc1 $f1, THREAD_FPR1(\thread) 55 lwc1 $f1, THREAD_FPR1_LS64(\thread)
56 lwc1 $f2, THREAD_FPR2(\thread) 56 lwc1 $f2, THREAD_FPR2_LS64(\thread)
57 lwc1 $f3, THREAD_FPR3(\thread) 57 lwc1 $f3, THREAD_FPR3_LS64(\thread)
58 lwc1 $f4, THREAD_FPR4(\thread) 58 lwc1 $f4, THREAD_FPR4_LS64(\thread)
59 lwc1 $f5, THREAD_FPR5(\thread) 59 lwc1 $f5, THREAD_FPR5_LS64(\thread)
60 lwc1 $f6, THREAD_FPR6(\thread) 60 lwc1 $f6, THREAD_FPR6_LS64(\thread)
61 lwc1 $f7, THREAD_FPR7(\thread) 61 lwc1 $f7, THREAD_FPR7_LS64(\thread)
62 lwc1 $f8, THREAD_FPR8(\thread) 62 lwc1 $f8, THREAD_FPR8_LS64(\thread)
63 lwc1 $f9, THREAD_FPR9(\thread) 63 lwc1 $f9, THREAD_FPR9_LS64(\thread)
64 lwc1 $f10, THREAD_FPR10(\thread) 64 lwc1 $f10, THREAD_FPR10_LS64(\thread)
65 lwc1 $f11, THREAD_FPR11(\thread) 65 lwc1 $f11, THREAD_FPR11_LS64(\thread)
66 lwc1 $f12, THREAD_FPR12(\thread) 66 lwc1 $f12, THREAD_FPR12_LS64(\thread)
67 lwc1 $f13, THREAD_FPR13(\thread) 67 lwc1 $f13, THREAD_FPR13_LS64(\thread)
68 lwc1 $f14, THREAD_FPR14(\thread) 68 lwc1 $f14, THREAD_FPR14_LS64(\thread)
69 lwc1 $f15, THREAD_FPR15(\thread) 69 lwc1 $f15, THREAD_FPR15_LS64(\thread)
70 lwc1 $f16, THREAD_FPR16(\thread) 70 lwc1 $f16, THREAD_FPR16_LS64(\thread)
71 lwc1 $f17, THREAD_FPR17(\thread) 71 lwc1 $f17, THREAD_FPR17_LS64(\thread)
72 lwc1 $f18, THREAD_FPR18(\thread) 72 lwc1 $f18, THREAD_FPR18_LS64(\thread)
73 lwc1 $f19, THREAD_FPR19(\thread) 73 lwc1 $f19, THREAD_FPR19_LS64(\thread)
74 lwc1 $f20, THREAD_FPR20(\thread) 74 lwc1 $f20, THREAD_FPR20_LS64(\thread)
75 lwc1 $f21, THREAD_FPR21(\thread) 75 lwc1 $f21, THREAD_FPR21_LS64(\thread)
76 lwc1 $f22, THREAD_FPR22(\thread) 76 lwc1 $f22, THREAD_FPR22_LS64(\thread)
77 lwc1 $f23, THREAD_FPR23(\thread) 77 lwc1 $f23, THREAD_FPR23_LS64(\thread)
78 lwc1 $f24, THREAD_FPR24(\thread) 78 lwc1 $f24, THREAD_FPR24_LS64(\thread)
79 lwc1 $f25, THREAD_FPR25(\thread) 79 lwc1 $f25, THREAD_FPR25_LS64(\thread)
80 lwc1 $f26, THREAD_FPR26(\thread) 80 lwc1 $f26, THREAD_FPR26_LS64(\thread)
81 lwc1 $f27, THREAD_FPR27(\thread) 81 lwc1 $f27, THREAD_FPR27_LS64(\thread)
82 lwc1 $f28, THREAD_FPR28(\thread) 82 lwc1 $f28, THREAD_FPR28_LS64(\thread)
83 lwc1 $f29, THREAD_FPR29(\thread) 83 lwc1 $f29, THREAD_FPR29_LS64(\thread)
84 lwc1 $f30, THREAD_FPR30(\thread) 84 lwc1 $f30, THREAD_FPR30_LS64(\thread)
85 lwc1 $f31, THREAD_FPR31(\thread) 85 lwc1 $f31, THREAD_FPR31_LS64(\thread)
86 ctc1 \tmp, fcr31 86 ctc1 \tmp, fcr31
87 .endm 87 .endm
88 88
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 4225e99bd7bf..b464b8b1147a 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -75,44 +75,44 @@
75 75
76 .macro fpu_save_16even thread tmp=t0 76 .macro fpu_save_16even thread tmp=t0
77 cfc1 \tmp, fcr31 77 cfc1 \tmp, fcr31
78 sdc1 $f0, THREAD_FPR0(\thread) 78 sdc1 $f0, THREAD_FPR0_LS64(\thread)
79 sdc1 $f2, THREAD_FPR2(\thread) 79 sdc1 $f2, THREAD_FPR2_LS64(\thread)
80 sdc1 $f4, THREAD_FPR4(\thread) 80 sdc1 $f4, THREAD_FPR4_LS64(\thread)
81 sdc1 $f6, THREAD_FPR6(\thread) 81 sdc1 $f6, THREAD_FPR6_LS64(\thread)
82 sdc1 $f8, THREAD_FPR8(\thread) 82 sdc1 $f8, THREAD_FPR8_LS64(\thread)
83 sdc1 $f10, THREAD_FPR10(\thread) 83 sdc1 $f10, THREAD_FPR10_LS64(\thread)
84 sdc1 $f12, THREAD_FPR12(\thread) 84 sdc1 $f12, THREAD_FPR12_LS64(\thread)
85 sdc1 $f14, THREAD_FPR14(\thread) 85 sdc1 $f14, THREAD_FPR14_LS64(\thread)
86 sdc1 $f16, THREAD_FPR16(\thread) 86 sdc1 $f16, THREAD_FPR16_LS64(\thread)
87 sdc1 $f18, THREAD_FPR18(\thread) 87 sdc1 $f18, THREAD_FPR18_LS64(\thread)
88 sdc1 $f20, THREAD_FPR20(\thread) 88 sdc1 $f20, THREAD_FPR20_LS64(\thread)
89 sdc1 $f22, THREAD_FPR22(\thread) 89 sdc1 $f22, THREAD_FPR22_LS64(\thread)
90 sdc1 $f24, THREAD_FPR24(\thread) 90 sdc1 $f24, THREAD_FPR24_LS64(\thread)
91 sdc1 $f26, THREAD_FPR26(\thread) 91 sdc1 $f26, THREAD_FPR26_LS64(\thread)
92 sdc1 $f28, THREAD_FPR28(\thread) 92 sdc1 $f28, THREAD_FPR28_LS64(\thread)
93 sdc1 $f30, THREAD_FPR30(\thread) 93 sdc1 $f30, THREAD_FPR30_LS64(\thread)
94 sw \tmp, THREAD_FCR31(\thread) 94 sw \tmp, THREAD_FCR31(\thread)
95 .endm 95 .endm
96 96
97 .macro fpu_save_16odd thread 97 .macro fpu_save_16odd thread
98 .set push 98 .set push
99 .set mips64r2 99 .set mips64r2
100 sdc1 $f1, THREAD_FPR1(\thread) 100 sdc1 $f1, THREAD_FPR1_LS64(\thread)
101 sdc1 $f3, THREAD_FPR3(\thread) 101 sdc1 $f3, THREAD_FPR3_LS64(\thread)
102 sdc1 $f5, THREAD_FPR5(\thread) 102 sdc1 $f5, THREAD_FPR5_LS64(\thread)
103 sdc1 $f7, THREAD_FPR7(\thread) 103 sdc1 $f7, THREAD_FPR7_LS64(\thread)
104 sdc1 $f9, THREAD_FPR9(\thread) 104 sdc1 $f9, THREAD_FPR9_LS64(\thread)
105 sdc1 $f11, THREAD_FPR11(\thread) 105 sdc1 $f11, THREAD_FPR11_LS64(\thread)
106 sdc1 $f13, THREAD_FPR13(\thread) 106 sdc1 $f13, THREAD_FPR13_LS64(\thread)
107 sdc1 $f15, THREAD_FPR15(\thread) 107 sdc1 $f15, THREAD_FPR15_LS64(\thread)
108 sdc1 $f17, THREAD_FPR17(\thread) 108 sdc1 $f17, THREAD_FPR17_LS64(\thread)
109 sdc1 $f19, THREAD_FPR19(\thread) 109 sdc1 $f19, THREAD_FPR19_LS64(\thread)
110 sdc1 $f21, THREAD_FPR21(\thread) 110 sdc1 $f21, THREAD_FPR21_LS64(\thread)
111 sdc1 $f23, THREAD_FPR23(\thread) 111 sdc1 $f23, THREAD_FPR23_LS64(\thread)
112 sdc1 $f25, THREAD_FPR25(\thread) 112 sdc1 $f25, THREAD_FPR25_LS64(\thread)
113 sdc1 $f27, THREAD_FPR27(\thread) 113 sdc1 $f27, THREAD_FPR27_LS64(\thread)
114 sdc1 $f29, THREAD_FPR29(\thread) 114 sdc1 $f29, THREAD_FPR29_LS64(\thread)
115 sdc1 $f31, THREAD_FPR31(\thread) 115 sdc1 $f31, THREAD_FPR31_LS64(\thread)
116 .set pop 116 .set pop
117 .endm 117 .endm
118 118
@@ -128,44 +128,44 @@
128 128
129 .macro fpu_restore_16even thread tmp=t0 129 .macro fpu_restore_16even thread tmp=t0
130 lw \tmp, THREAD_FCR31(\thread) 130 lw \tmp, THREAD_FCR31(\thread)
131 ldc1 $f0, THREAD_FPR0(\thread) 131 ldc1 $f0, THREAD_FPR0_LS64(\thread)
132 ldc1 $f2, THREAD_FPR2(\thread) 132 ldc1 $f2, THREAD_FPR2_LS64(\thread)
133 ldc1 $f4, THREAD_FPR4(\thread) 133 ldc1 $f4, THREAD_FPR4_LS64(\thread)
134 ldc1 $f6, THREAD_FPR6(\thread) 134 ldc1 $f6, THREAD_FPR6_LS64(\thread)
135 ldc1 $f8, THREAD_FPR8(\thread) 135 ldc1 $f8, THREAD_FPR8_LS64(\thread)
136 ldc1 $f10, THREAD_FPR10(\thread) 136 ldc1 $f10, THREAD_FPR10_LS64(\thread)
137 ldc1 $f12, THREAD_FPR12(\thread) 137 ldc1 $f12, THREAD_FPR12_LS64(\thread)
138 ldc1 $f14, THREAD_FPR14(\thread) 138 ldc1 $f14, THREAD_FPR14_LS64(\thread)
139 ldc1 $f16, THREAD_FPR16(\thread) 139 ldc1 $f16, THREAD_FPR16_LS64(\thread)
140 ldc1 $f18, THREAD_FPR18(\thread) 140 ldc1 $f18, THREAD_FPR18_LS64(\thread)
141 ldc1 $f20, THREAD_FPR20(\thread) 141 ldc1 $f20, THREAD_FPR20_LS64(\thread)
142 ldc1 $f22, THREAD_FPR22(\thread) 142 ldc1 $f22, THREAD_FPR22_LS64(\thread)
143 ldc1 $f24, THREAD_FPR24(\thread) 143 ldc1 $f24, THREAD_FPR24_LS64(\thread)
144 ldc1 $f26, THREAD_FPR26(\thread) 144 ldc1 $f26, THREAD_FPR26_LS64(\thread)
145 ldc1 $f28, THREAD_FPR28(\thread) 145 ldc1 $f28, THREAD_FPR28_LS64(\thread)
146 ldc1 $f30, THREAD_FPR30(\thread) 146 ldc1 $f30, THREAD_FPR30_LS64(\thread)
147 ctc1 \tmp, fcr31 147 ctc1 \tmp, fcr31
148 .endm 148 .endm
149 149
150 .macro fpu_restore_16odd thread 150 .macro fpu_restore_16odd thread
151 .set push 151 .set push
152 .set mips64r2 152 .set mips64r2
153 ldc1 $f1, THREAD_FPR1(\thread) 153 ldc1 $f1, THREAD_FPR1_LS64(\thread)
154 ldc1 $f3, THREAD_FPR3(\thread) 154 ldc1 $f3, THREAD_FPR3_LS64(\thread)
155 ldc1 $f5, THREAD_FPR5(\thread) 155 ldc1 $f5, THREAD_FPR5_LS64(\thread)
156 ldc1 $f7, THREAD_FPR7(\thread) 156 ldc1 $f7, THREAD_FPR7_LS64(\thread)
157 ldc1 $f9, THREAD_FPR9(\thread) 157 ldc1 $f9, THREAD_FPR9_LS64(\thread)
158 ldc1 $f11, THREAD_FPR11(\thread) 158 ldc1 $f11, THREAD_FPR11_LS64(\thread)
159 ldc1 $f13, THREAD_FPR13(\thread) 159 ldc1 $f13, THREAD_FPR13_LS64(\thread)
160 ldc1 $f15, THREAD_FPR15(\thread) 160 ldc1 $f15, THREAD_FPR15_LS64(\thread)
161 ldc1 $f17, THREAD_FPR17(\thread) 161 ldc1 $f17, THREAD_FPR17_LS64(\thread)
162 ldc1 $f19, THREAD_FPR19(\thread) 162 ldc1 $f19, THREAD_FPR19_LS64(\thread)
163 ldc1 $f21, THREAD_FPR21(\thread) 163 ldc1 $f21, THREAD_FPR21_LS64(\thread)
164 ldc1 $f23, THREAD_FPR23(\thread) 164 ldc1 $f23, THREAD_FPR23_LS64(\thread)
165 ldc1 $f25, THREAD_FPR25(\thread) 165 ldc1 $f25, THREAD_FPR25_LS64(\thread)
166 ldc1 $f27, THREAD_FPR27(\thread) 166 ldc1 $f27, THREAD_FPR27_LS64(\thread)
167 ldc1 $f29, THREAD_FPR29(\thread) 167 ldc1 $f29, THREAD_FPR29_LS64(\thread)
168 ldc1 $f31, THREAD_FPR31(\thread) 168 ldc1 $f31, THREAD_FPR31_LS64(\thread)
169 .set pop 169 .set pop
170 .endm 170 .endm
171 171
@@ -180,6 +180,17 @@
180 fpu_restore_16even \thread \tmp 180 fpu_restore_16even \thread \tmp
181 .endm 181 .endm
182 182
183#ifdef CONFIG_CPU_MIPSR2
184 .macro _EXT rd, rs, p, s
185 ext \rd, \rs, \p, \s
186 .endm
187#else /* !CONFIG_CPU_MIPSR2 */
188 .macro _EXT rd, rs, p, s
189 srl \rd, \rs, \p
190 andi \rd, \rd, (1 << \s) - 1
191 .endm
192#endif /* !CONFIG_CPU_MIPSR2 */
193
183/* 194/*
184 * Temporary until all gas have MT ASE support 195 * Temporary until all gas have MT ASE support
185 */ 196 */
@@ -207,4 +218,195 @@
207 .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel) 218 .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
208 .endm 219 .endm
209 220
221#ifdef TOOLCHAIN_SUPPORTS_MSA
222 .macro ld_d wd, off, base
223 .set push
224 .set mips32r2
225 .set msa
226 ld.d $w\wd, \off(\base)
227 .set pop
228 .endm
229
230 .macro st_d wd, off, base
231 .set push
232 .set mips32r2
233 .set msa
234 st.d $w\wd, \off(\base)
235 .set pop
236 .endm
237
238 .macro copy_u_w rd, ws, n
239 .set push
240 .set mips32r2
241 .set msa
242 copy_u.w \rd, $w\ws[\n]
243 .set pop
244 .endm
245
246 .macro copy_u_d rd, ws, n
247 .set push
248 .set mips64r2
249 .set msa
250 copy_u.d \rd, $w\ws[\n]
251 .set pop
252 .endm
253
254 .macro insert_w wd, n, rs
255 .set push
256 .set mips32r2
257 .set msa
258 insert.w $w\wd[\n], \rs
259 .set pop
260 .endm
261
262 .macro insert_d wd, n, rs
263 .set push
264 .set mips64r2
265 .set msa
266 insert.d $w\wd[\n], \rs
267 .set pop
268 .endm
269#else
270 /*
271 * Temporary until all toolchains in use include MSA support.
272 */
273 .macro cfcmsa rd, cs
274 .set push
275 .set noat
276 .word 0x787e0059 | (\cs << 11)
277 move \rd, $1
278 .set pop
279 .endm
280
281 .macro ctcmsa cd, rs
282 .set push
283 .set noat
284 move $1, \rs
285 .word 0x783e0819 | (\cd << 6)
286 .set pop
287 .endm
288
289 .macro ld_d wd, off, base
290 .set push
291 .set noat
292 add $1, \base, \off
293 .word 0x78000823 | (\wd << 6)
294 .set pop
295 .endm
296
297 .macro st_d wd, off, base
298 .set push
299 .set noat
300 add $1, \base, \off
301 .word 0x78000827 | (\wd << 6)
302 .set pop
303 .endm
304
305 .macro copy_u_w rd, ws, n
306 .set push
307 .set noat
308 .word 0x78f00059 | (\n << 16) | (\ws << 11)
309 /* move triggers an assembler bug... */
310 or \rd, $1, zero
311 .set pop
312 .endm
313
314 .macro copy_u_d rd, ws, n
315 .set push
316 .set noat
317 .word 0x78f80059 | (\n << 16) | (\ws << 11)
318 /* move triggers an assembler bug... */
319 or \rd, $1, zero
320 .set pop
321 .endm
322
323 .macro insert_w wd, n, rs
324 .set push
325 .set noat
326 /* move triggers an assembler bug... */
327 or $1, \rs, zero
328 .word 0x79300819 | (\n << 16) | (\wd << 6)
329 .set pop
330 .endm
331
332 .macro insert_d wd, n, rs
333 .set push
334 .set noat
335 /* move triggers an assembler bug... */
336 or $1, \rs, zero
337 .word 0x79380819 | (\n << 16) | (\wd << 6)
338 .set pop
339 .endm
340#endif
341
342 .macro msa_save_all thread
343 st_d 0, THREAD_FPR0, \thread
344 st_d 1, THREAD_FPR1, \thread
345 st_d 2, THREAD_FPR2, \thread
346 st_d 3, THREAD_FPR3, \thread
347 st_d 4, THREAD_FPR4, \thread
348 st_d 5, THREAD_FPR5, \thread
349 st_d 6, THREAD_FPR6, \thread
350 st_d 7, THREAD_FPR7, \thread
351 st_d 8, THREAD_FPR8, \thread
352 st_d 9, THREAD_FPR9, \thread
353 st_d 10, THREAD_FPR10, \thread
354 st_d 11, THREAD_FPR11, \thread
355 st_d 12, THREAD_FPR12, \thread
356 st_d 13, THREAD_FPR13, \thread
357 st_d 14, THREAD_FPR14, \thread
358 st_d 15, THREAD_FPR15, \thread
359 st_d 16, THREAD_FPR16, \thread
360 st_d 17, THREAD_FPR17, \thread
361 st_d 18, THREAD_FPR18, \thread
362 st_d 19, THREAD_FPR19, \thread
363 st_d 20, THREAD_FPR20, \thread
364 st_d 21, THREAD_FPR21, \thread
365 st_d 22, THREAD_FPR22, \thread
366 st_d 23, THREAD_FPR23, \thread
367 st_d 24, THREAD_FPR24, \thread
368 st_d 25, THREAD_FPR25, \thread
369 st_d 26, THREAD_FPR26, \thread
370 st_d 27, THREAD_FPR27, \thread
371 st_d 28, THREAD_FPR28, \thread
372 st_d 29, THREAD_FPR29, \thread
373 st_d 30, THREAD_FPR30, \thread
374 st_d 31, THREAD_FPR31, \thread
375 .endm
376
377 .macro msa_restore_all thread
378 ld_d 0, THREAD_FPR0, \thread
379 ld_d 1, THREAD_FPR1, \thread
380 ld_d 2, THREAD_FPR2, \thread
381 ld_d 3, THREAD_FPR3, \thread
382 ld_d 4, THREAD_FPR4, \thread
383 ld_d 5, THREAD_FPR5, \thread
384 ld_d 6, THREAD_FPR6, \thread
385 ld_d 7, THREAD_FPR7, \thread
386 ld_d 8, THREAD_FPR8, \thread
387 ld_d 9, THREAD_FPR9, \thread
388 ld_d 10, THREAD_FPR10, \thread
389 ld_d 11, THREAD_FPR11, \thread
390 ld_d 12, THREAD_FPR12, \thread
391 ld_d 13, THREAD_FPR13, \thread
392 ld_d 14, THREAD_FPR14, \thread
393 ld_d 15, THREAD_FPR15, \thread
394 ld_d 16, THREAD_FPR16, \thread
395 ld_d 17, THREAD_FPR17, \thread
396 ld_d 18, THREAD_FPR18, \thread
397 ld_d 19, THREAD_FPR19, \thread
398 ld_d 20, THREAD_FPR20, \thread
399 ld_d 21, THREAD_FPR21, \thread
400 ld_d 22, THREAD_FPR22, \thread
401 ld_d 23, THREAD_FPR23, \thread
402 ld_d 24, THREAD_FPR24, \thread
403 ld_d 25, THREAD_FPR25, \thread
404 ld_d 26, THREAD_FPR26, \thread
405 ld_d 27, THREAD_FPR27, \thread
406 ld_d 28, THREAD_FPR28, \thread
407 ld_d 29, THREAD_FPR29, \thread
408 ld_d 30, THREAD_FPR30, \thread
409 ld_d 31, THREAD_FPR31, \thread
410 .endm
411
210#endif /* _ASM_ASMMACRO_H */ 412#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 7eed2f261710..e8eb3d53a241 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -53,7 +53,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
53 int temp; 53 int temp;
54 54
55 __asm__ __volatile__( 55 __asm__ __volatile__(
56 " .set mips3 \n" 56 " .set arch=r4000 \n"
57 "1: ll %0, %1 # atomic_add \n" 57 "1: ll %0, %1 # atomic_add \n"
58 " addu %0, %2 \n" 58 " addu %0, %2 \n"
59 " sc %0, %1 \n" 59 " sc %0, %1 \n"
@@ -66,7 +66,7 @@ static __inline__ void atomic_add(int i, atomic_t * v)
66 66
67 do { 67 do {
68 __asm__ __volatile__( 68 __asm__ __volatile__(
69 " .set mips3 \n" 69 " .set arch=r4000 \n"
70 " ll %0, %1 # atomic_add \n" 70 " ll %0, %1 # atomic_add \n"
71 " addu %0, %2 \n" 71 " addu %0, %2 \n"
72 " sc %0, %1 \n" 72 " sc %0, %1 \n"
@@ -96,7 +96,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
96 int temp; 96 int temp;
97 97
98 __asm__ __volatile__( 98 __asm__ __volatile__(
99 " .set mips3 \n" 99 " .set arch=r4000 \n"
100 "1: ll %0, %1 # atomic_sub \n" 100 "1: ll %0, %1 # atomic_sub \n"
101 " subu %0, %2 \n" 101 " subu %0, %2 \n"
102 " sc %0, %1 \n" 102 " sc %0, %1 \n"
@@ -109,7 +109,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
109 109
110 do { 110 do {
111 __asm__ __volatile__( 111 __asm__ __volatile__(
112 " .set mips3 \n" 112 " .set arch=r4000 \n"
113 " ll %0, %1 # atomic_sub \n" 113 " ll %0, %1 # atomic_sub \n"
114 " subu %0, %2 \n" 114 " subu %0, %2 \n"
115 " sc %0, %1 \n" 115 " sc %0, %1 \n"
@@ -139,7 +139,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
139 int temp; 139 int temp;
140 140
141 __asm__ __volatile__( 141 __asm__ __volatile__(
142 " .set mips3 \n" 142 " .set arch=r4000 \n"
143 "1: ll %1, %2 # atomic_add_return \n" 143 "1: ll %1, %2 # atomic_add_return \n"
144 " addu %0, %1, %3 \n" 144 " addu %0, %1, %3 \n"
145 " sc %0, %2 \n" 145 " sc %0, %2 \n"
@@ -153,7 +153,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
153 153
154 do { 154 do {
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 " .set mips3 \n" 156 " .set arch=r4000 \n"
157 " ll %1, %2 # atomic_add_return \n" 157 " ll %1, %2 # atomic_add_return \n"
158 " addu %0, %1, %3 \n" 158 " addu %0, %1, %3 \n"
159 " sc %0, %2 \n" 159 " sc %0, %2 \n"
@@ -188,7 +188,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
188 int temp; 188 int temp;
189 189
190 __asm__ __volatile__( 190 __asm__ __volatile__(
191 " .set mips3 \n" 191 " .set arch=r4000 \n"
192 "1: ll %1, %2 # atomic_sub_return \n" 192 "1: ll %1, %2 # atomic_sub_return \n"
193 " subu %0, %1, %3 \n" 193 " subu %0, %1, %3 \n"
194 " sc %0, %2 \n" 194 " sc %0, %2 \n"
@@ -205,7 +205,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
205 205
206 do { 206 do {
207 __asm__ __volatile__( 207 __asm__ __volatile__(
208 " .set mips3 \n" 208 " .set arch=r4000 \n"
209 " ll %1, %2 # atomic_sub_return \n" 209 " ll %1, %2 # atomic_sub_return \n"
210 " subu %0, %1, %3 \n" 210 " subu %0, %1, %3 \n"
211 " sc %0, %2 \n" 211 " sc %0, %2 \n"
@@ -248,7 +248,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
248 int temp; 248 int temp;
249 249
250 __asm__ __volatile__( 250 __asm__ __volatile__(
251 " .set mips3 \n" 251 " .set arch=r4000 \n"
252 "1: ll %1, %2 # atomic_sub_if_positive\n" 252 "1: ll %1, %2 # atomic_sub_if_positive\n"
253 " subu %0, %1, %3 \n" 253 " subu %0, %1, %3 \n"
254 " bltz %0, 1f \n" 254 " bltz %0, 1f \n"
@@ -266,7 +266,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
266 int temp; 266 int temp;
267 267
268 __asm__ __volatile__( 268 __asm__ __volatile__(
269 " .set mips3 \n" 269 " .set arch=r4000 \n"
270 "1: ll %1, %2 # atomic_sub_if_positive\n" 270 "1: ll %1, %2 # atomic_sub_if_positive\n"
271 " subu %0, %1, %3 \n" 271 " subu %0, %1, %3 \n"
272 " bltz %0, 1f \n" 272 " bltz %0, 1f \n"
@@ -420,7 +420,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
420 long temp; 420 long temp;
421 421
422 __asm__ __volatile__( 422 __asm__ __volatile__(
423 " .set mips3 \n" 423 " .set arch=r4000 \n"
424 "1: lld %0, %1 # atomic64_add \n" 424 "1: lld %0, %1 # atomic64_add \n"
425 " daddu %0, %2 \n" 425 " daddu %0, %2 \n"
426 " scd %0, %1 \n" 426 " scd %0, %1 \n"
@@ -433,7 +433,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
433 433
434 do { 434 do {
435 __asm__ __volatile__( 435 __asm__ __volatile__(
436 " .set mips3 \n" 436 " .set arch=r4000 \n"
437 " lld %0, %1 # atomic64_add \n" 437 " lld %0, %1 # atomic64_add \n"
438 " daddu %0, %2 \n" 438 " daddu %0, %2 \n"
439 " scd %0, %1 \n" 439 " scd %0, %1 \n"
@@ -463,7 +463,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
463 long temp; 463 long temp;
464 464
465 __asm__ __volatile__( 465 __asm__ __volatile__(
466 " .set mips3 \n" 466 " .set arch=r4000 \n"
467 "1: lld %0, %1 # atomic64_sub \n" 467 "1: lld %0, %1 # atomic64_sub \n"
468 " dsubu %0, %2 \n" 468 " dsubu %0, %2 \n"
469 " scd %0, %1 \n" 469 " scd %0, %1 \n"
@@ -476,7 +476,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
476 476
477 do { 477 do {
478 __asm__ __volatile__( 478 __asm__ __volatile__(
479 " .set mips3 \n" 479 " .set arch=r4000 \n"
480 " lld %0, %1 # atomic64_sub \n" 480 " lld %0, %1 # atomic64_sub \n"
481 " dsubu %0, %2 \n" 481 " dsubu %0, %2 \n"
482 " scd %0, %1 \n" 482 " scd %0, %1 \n"
@@ -506,7 +506,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
506 long temp; 506 long temp;
507 507
508 __asm__ __volatile__( 508 __asm__ __volatile__(
509 " .set mips3 \n" 509 " .set arch=r4000 \n"
510 "1: lld %1, %2 # atomic64_add_return \n" 510 "1: lld %1, %2 # atomic64_add_return \n"
511 " daddu %0, %1, %3 \n" 511 " daddu %0, %1, %3 \n"
512 " scd %0, %2 \n" 512 " scd %0, %2 \n"
@@ -520,7 +520,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
520 520
521 do { 521 do {
522 __asm__ __volatile__( 522 __asm__ __volatile__(
523 " .set mips3 \n" 523 " .set arch=r4000 \n"
524 " lld %1, %2 # atomic64_add_return \n" 524 " lld %1, %2 # atomic64_add_return \n"
525 " daddu %0, %1, %3 \n" 525 " daddu %0, %1, %3 \n"
526 " scd %0, %2 \n" 526 " scd %0, %2 \n"
@@ -556,7 +556,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
556 long temp; 556 long temp;
557 557
558 __asm__ __volatile__( 558 __asm__ __volatile__(
559 " .set mips3 \n" 559 " .set arch=r4000 \n"
560 "1: lld %1, %2 # atomic64_sub_return \n" 560 "1: lld %1, %2 # atomic64_sub_return \n"
561 " dsubu %0, %1, %3 \n" 561 " dsubu %0, %1, %3 \n"
562 " scd %0, %2 \n" 562 " scd %0, %2 \n"
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
571 571
572 do { 572 do {
573 __asm__ __volatile__( 573 __asm__ __volatile__(
574 " .set mips3 \n" 574 " .set arch=r4000 \n"
575 " lld %1, %2 # atomic64_sub_return \n" 575 " lld %1, %2 # atomic64_sub_return \n"
576 " dsubu %0, %1, %3 \n" 576 " dsubu %0, %1, %3 \n"
577 " scd %0, %2 \n" 577 " scd %0, %2 \n"
@@ -615,7 +615,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
615 long temp; 615 long temp;
616 616
617 __asm__ __volatile__( 617 __asm__ __volatile__(
618 " .set mips3 \n" 618 " .set arch=r4000 \n"
619 "1: lld %1, %2 # atomic64_sub_if_positive\n" 619 "1: lld %1, %2 # atomic64_sub_if_positive\n"
620 " dsubu %0, %1, %3 \n" 620 " dsubu %0, %1, %3 \n"
621 " bltz %0, 1f \n" 621 " bltz %0, 1f \n"
@@ -633,7 +633,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
633 long temp; 633 long temp;
634 634
635 __asm__ __volatile__( 635 __asm__ __volatile__(
636 " .set mips3 \n" 636 " .set arch=r4000 \n"
637 "1: lld %1, %2 # atomic64_sub_if_positive\n" 637 "1: lld %1, %2 # atomic64_sub_if_positive\n"
638 " dsubu %0, %1, %3 \n" 638 " dsubu %0, %1, %3 \n"
639 " bltz %0, 1f \n" 639 " bltz %0, 1f \n"
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 71305a8b3d78..6a65d49e2c0d 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -79,7 +79,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
79 79
80 if (kernel_uses_llsc && R10000_LLSC_WAR) { 80 if (kernel_uses_llsc && R10000_LLSC_WAR) {
81 __asm__ __volatile__( 81 __asm__ __volatile__(
82 " .set mips3 \n" 82 " .set arch=r4000 \n"
83 "1: " __LL "%0, %1 # set_bit \n" 83 "1: " __LL "%0, %1 # set_bit \n"
84 " or %0, %2 \n" 84 " or %0, %2 \n"
85 " " __SC "%0, %1 \n" 85 " " __SC "%0, %1 \n"
@@ -101,7 +101,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
101 } else if (kernel_uses_llsc) { 101 } else if (kernel_uses_llsc) {
102 do { 102 do {
103 __asm__ __volatile__( 103 __asm__ __volatile__(
104 " .set mips3 \n" 104 " .set arch=r4000 \n"
105 " " __LL "%0, %1 # set_bit \n" 105 " " __LL "%0, %1 # set_bit \n"
106 " or %0, %2 \n" 106 " or %0, %2 \n"
107 " " __SC "%0, %1 \n" 107 " " __SC "%0, %1 \n"
@@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
131 131
132 if (kernel_uses_llsc && R10000_LLSC_WAR) { 132 if (kernel_uses_llsc && R10000_LLSC_WAR) {
133 __asm__ __volatile__( 133 __asm__ __volatile__(
134 " .set mips3 \n" 134 " .set arch=r4000 \n"
135 "1: " __LL "%0, %1 # clear_bit \n" 135 "1: " __LL "%0, %1 # clear_bit \n"
136 " and %0, %2 \n" 136 " and %0, %2 \n"
137 " " __SC "%0, %1 \n" 137 " " __SC "%0, %1 \n"
@@ -153,7 +153,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
153 } else if (kernel_uses_llsc) { 153 } else if (kernel_uses_llsc) {
154 do { 154 do {
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156 " .set mips3 \n" 156 " .set arch=r4000 \n"
157 " " __LL "%0, %1 # clear_bit \n" 157 " " __LL "%0, %1 # clear_bit \n"
158 " and %0, %2 \n" 158 " and %0, %2 \n"
159 " " __SC "%0, %1 \n" 159 " " __SC "%0, %1 \n"
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
197 unsigned long temp; 197 unsigned long temp;
198 198
199 __asm__ __volatile__( 199 __asm__ __volatile__(
200 " .set mips3 \n" 200 " .set arch=r4000 \n"
201 "1: " __LL "%0, %1 # change_bit \n" 201 "1: " __LL "%0, %1 # change_bit \n"
202 " xor %0, %2 \n" 202 " xor %0, %2 \n"
203 " " __SC "%0, %1 \n" 203 " " __SC "%0, %1 \n"
@@ -211,7 +211,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
211 211
212 do { 212 do {
213 __asm__ __volatile__( 213 __asm__ __volatile__(
214 " .set mips3 \n" 214 " .set arch=r4000 \n"
215 " " __LL "%0, %1 # change_bit \n" 215 " " __LL "%0, %1 # change_bit \n"
216 " xor %0, %2 \n" 216 " xor %0, %2 \n"
217 " " __SC "%0, %1 \n" 217 " " __SC "%0, %1 \n"
@@ -244,7 +244,7 @@ static inline int test_and_set_bit(unsigned long nr,
244 unsigned long temp; 244 unsigned long temp;
245 245
246 __asm__ __volatile__( 246 __asm__ __volatile__(
247 " .set mips3 \n" 247 " .set arch=r4000 \n"
248 "1: " __LL "%0, %1 # test_and_set_bit \n" 248 "1: " __LL "%0, %1 # test_and_set_bit \n"
249 " or %2, %0, %3 \n" 249 " or %2, %0, %3 \n"
250 " " __SC "%2, %1 \n" 250 " " __SC "%2, %1 \n"
@@ -260,7 +260,7 @@ static inline int test_and_set_bit(unsigned long nr,
260 260
261 do { 261 do {
262 __asm__ __volatile__( 262 __asm__ __volatile__(
263 " .set mips3 \n" 263 " .set arch=r4000 \n"
264 " " __LL "%0, %1 # test_and_set_bit \n" 264 " " __LL "%0, %1 # test_and_set_bit \n"
265 " or %2, %0, %3 \n" 265 " or %2, %0, %3 \n"
266 " " __SC "%2, %1 \n" 266 " " __SC "%2, %1 \n"
@@ -298,7 +298,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
298 unsigned long temp; 298 unsigned long temp;
299 299
300 __asm__ __volatile__( 300 __asm__ __volatile__(
301 " .set mips3 \n" 301 " .set arch=r4000 \n"
302 "1: " __LL "%0, %1 # test_and_set_bit \n" 302 "1: " __LL "%0, %1 # test_and_set_bit \n"
303 " or %2, %0, %3 \n" 303 " or %2, %0, %3 \n"
304 " " __SC "%2, %1 \n" 304 " " __SC "%2, %1 \n"
@@ -314,7 +314,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
314 314
315 do { 315 do {
316 __asm__ __volatile__( 316 __asm__ __volatile__(
317 " .set mips3 \n" 317 " .set arch=r4000 \n"
318 " " __LL "%0, %1 # test_and_set_bit \n" 318 " " __LL "%0, %1 # test_and_set_bit \n"
319 " or %2, %0, %3 \n" 319 " or %2, %0, %3 \n"
320 " " __SC "%2, %1 \n" 320 " " __SC "%2, %1 \n"
@@ -353,7 +353,7 @@ static inline int test_and_clear_bit(unsigned long nr,
353 unsigned long temp; 353 unsigned long temp;
354 354
355 __asm__ __volatile__( 355 __asm__ __volatile__(
356 " .set mips3 \n" 356 " .set arch=r4000 \n"
357 "1: " __LL "%0, %1 # test_and_clear_bit \n" 357 "1: " __LL "%0, %1 # test_and_clear_bit \n"
358 " or %2, %0, %3 \n" 358 " or %2, %0, %3 \n"
359 " xor %2, %3 \n" 359 " xor %2, %3 \n"
@@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
386 386
387 do { 387 do {
388 __asm__ __volatile__( 388 __asm__ __volatile__(
389 " .set mips3 \n" 389 " .set arch=r4000 \n"
390 " " __LL "%0, %1 # test_and_clear_bit \n" 390 " " __LL "%0, %1 # test_and_clear_bit \n"
391 " or %2, %0, %3 \n" 391 " or %2, %0, %3 \n"
392 " xor %2, %3 \n" 392 " xor %2, %3 \n"
@@ -427,7 +427,7 @@ static inline int test_and_change_bit(unsigned long nr,
427 unsigned long temp; 427 unsigned long temp;
428 428
429 __asm__ __volatile__( 429 __asm__ __volatile__(
430 " .set mips3 \n" 430 " .set arch=r4000 \n"
431 "1: " __LL "%0, %1 # test_and_change_bit \n" 431 "1: " __LL "%0, %1 # test_and_change_bit \n"
432 " xor %2, %0, %3 \n" 432 " xor %2, %0, %3 \n"
433 " " __SC "%2, %1 \n" 433 " " __SC "%2, %1 \n"
@@ -443,7 +443,7 @@ static inline int test_and_change_bit(unsigned long nr,
443 443
444 do { 444 do {
445 __asm__ __volatile__( 445 __asm__ __volatile__(
446 " .set mips3 \n" 446 " .set arch=r4000 \n"
447 " " __LL "%0, %1 # test_and_change_bit \n" 447 " " __LL "%0, %1 # test_and_change_bit \n"
448 " xor %2, %0, %3 \n" 448 " xor %2, %0, %3 \n"
449 " " __SC "\t%2, %1 \n" 449 " " __SC "\t%2, %1 \n"
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 4d2cdea5aa37..1f7ca8b00404 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -61,15 +61,21 @@
61/* 61/*
62 * Valid machtype for Loongson family 62 * Valid machtype for Loongson family
63 */ 63 */
64#define MACH_LOONGSON_UNKNOWN 0 64enum loongson_machine_type {
65#define MACH_LEMOTE_FL2E 1 65 MACH_LOONGSON_UNKNOWN,
66#define MACH_LEMOTE_FL2F 2 66 MACH_LEMOTE_FL2E,
67#define MACH_LEMOTE_ML2F7 3 67 MACH_LEMOTE_FL2F,
68#define MACH_LEMOTE_YL2F89 4 68 MACH_LEMOTE_ML2F7,
69#define MACH_DEXXON_GDIUM2F10 5 69 MACH_LEMOTE_YL2F89,
70#define MACH_LEMOTE_NAS 6 70 MACH_DEXXON_GDIUM2F10,
71#define MACH_LEMOTE_LL2F 7 71 MACH_LEMOTE_NAS,
72#define MACH_LOONGSON_END 8 72 MACH_LEMOTE_LL2F,
73 MACH_LEMOTE_A1004,
74 MACH_LEMOTE_A1101,
75 MACH_LEMOTE_A1201,
76 MACH_LEMOTE_A1205,
77 MACH_LOONGSON_END
78};
73 79
74/* 80/*
75 * Valid machtype for group INGENIC 81 * Valid machtype for group INGENIC
@@ -112,6 +118,8 @@ extern void prom_free_prom_memory(void);
112extern void free_init_pages(const char *what, 118extern void free_init_pages(const char *what,
113 unsigned long begin, unsigned long end); 119 unsigned long begin, unsigned long end);
114 120
121extern void (*free_init_pages_eva)(void *begin, void *end);
122
115/* 123/*
116 * Initial kernel command line, usually setup by prom_init() 124 * Initial kernel command line, usually setup by prom_init()
117 */ 125 */
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index ac3d2b8a20d4..3418c51e1151 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -7,6 +7,7 @@
7 * Copyright (C) 1999 Silicon Graphics, Inc. 7 * Copyright (C) 1999 Silicon Graphics, Inc.
8 * Copyright (C) 2001 Thiemo Seufer. 8 * Copyright (C) 2001 Thiemo Seufer.
9 * Copyright (C) 2002 Maciej W. Rozycki 9 * Copyright (C) 2002 Maciej W. Rozycki
10 * Copyright (C) 2014 Imagination Technologies Ltd.
10 */ 11 */
11#ifndef _ASM_CHECKSUM_H 12#ifndef _ASM_CHECKSUM_H
12#define _ASM_CHECKSUM_H 13#define _ASM_CHECKSUM_H
@@ -29,9 +30,13 @@
29 */ 30 */
30__wsum csum_partial(const void *buff, int len, __wsum sum); 31__wsum csum_partial(const void *buff, int len, __wsum sum);
31 32
32__wsum __csum_partial_copy_user(const void *src, void *dst, 33__wsum __csum_partial_copy_kernel(const void *src, void *dst,
33 int len, __wsum sum, int *err_ptr); 34 int len, __wsum sum, int *err_ptr);
34 35
36__wsum __csum_partial_copy_from_user(const void *src, void *dst,
37 int len, __wsum sum, int *err_ptr);
38__wsum __csum_partial_copy_to_user(const void *src, void *dst,
39 int len, __wsum sum, int *err_ptr);
35/* 40/*
36 * this is a new version of the above that records errors it finds in *errp, 41 * this is a new version of the above that records errors it finds in *errp,
37 * but continues and zeros the rest of the buffer. 42 * but continues and zeros the rest of the buffer.
@@ -41,8 +46,26 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
41 __wsum sum, int *err_ptr) 46 __wsum sum, int *err_ptr)
42{ 47{
43 might_fault(); 48 might_fault();
44 return __csum_partial_copy_user((__force void *)src, dst, 49 if (segment_eq(get_fs(), get_ds()))
45 len, sum, err_ptr); 50 return __csum_partial_copy_kernel((__force void *)src, dst,
51 len, sum, err_ptr);
52 else
53 return __csum_partial_copy_from_user((__force void *)src, dst,
54 len, sum, err_ptr);
55}
56
57#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
58static inline
59__wsum csum_and_copy_from_user(const void __user *src, void *dst,
60 int len, __wsum sum, int *err_ptr)
61{
62 if (access_ok(VERIFY_READ, src, len))
63 return csum_partial_copy_from_user(src, dst, len, sum,
64 err_ptr);
65 if (len)
66 *err_ptr = -EFAULT;
67
68 return sum;
46} 69}
47 70
48/* 71/*
@@ -54,9 +77,16 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
54 __wsum sum, int *err_ptr) 77 __wsum sum, int *err_ptr)
55{ 78{
56 might_fault(); 79 might_fault();
57 if (access_ok(VERIFY_WRITE, dst, len)) 80 if (access_ok(VERIFY_WRITE, dst, len)) {
58 return __csum_partial_copy_user(src, (__force void *)dst, 81 if (segment_eq(get_fs(), get_ds()))
59 len, sum, err_ptr); 82 return __csum_partial_copy_kernel(src,
83 (__force void *)dst,
84 len, sum, err_ptr);
85 else
86 return __csum_partial_copy_to_user(src,
87 (__force void *)dst,
88 len, sum, err_ptr);
89 }
60 if (len) 90 if (len)
61 *err_ptr = -EFAULT; 91 *err_ptr = -EFAULT;
62 92
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 466069bd8465..eefcaa363a87 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -22,11 +22,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
22 unsigned long dummy; 22 unsigned long dummy;
23 23
24 __asm__ __volatile__( 24 __asm__ __volatile__(
25 " .set mips3 \n" 25 " .set arch=r4000 \n"
26 "1: ll %0, %3 # xchg_u32 \n" 26 "1: ll %0, %3 # xchg_u32 \n"
27 " .set mips0 \n" 27 " .set mips0 \n"
28 " move %2, %z4 \n" 28 " move %2, %z4 \n"
29 " .set mips3 \n" 29 " .set arch=r4000 \n"
30 " sc %2, %1 \n" 30 " sc %2, %1 \n"
31 " beqzl %2, 1b \n" 31 " beqzl %2, 1b \n"
32 " .set mips0 \n" 32 " .set mips0 \n"
@@ -38,11 +38,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
38 38
39 do { 39 do {
40 __asm__ __volatile__( 40 __asm__ __volatile__(
41 " .set mips3 \n" 41 " .set arch=r4000 \n"
42 " ll %0, %3 # xchg_u32 \n" 42 " ll %0, %3 # xchg_u32 \n"
43 " .set mips0 \n" 43 " .set mips0 \n"
44 " move %2, %z4 \n" 44 " move %2, %z4 \n"
45 " .set mips3 \n" 45 " .set arch=r4000 \n"
46 " sc %2, %1 \n" 46 " sc %2, %1 \n"
47 " .set mips0 \n" 47 " .set mips0 \n"
48 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 48 : "=&r" (retval), "=m" (*m), "=&r" (dummy)
@@ -74,7 +74,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
74 unsigned long dummy; 74 unsigned long dummy;
75 75
76 __asm__ __volatile__( 76 __asm__ __volatile__(
77 " .set mips3 \n" 77 " .set arch=r4000 \n"
78 "1: lld %0, %3 # xchg_u64 \n" 78 "1: lld %0, %3 # xchg_u64 \n"
79 " move %2, %z4 \n" 79 " move %2, %z4 \n"
80 " scd %2, %1 \n" 80 " scd %2, %1 \n"
@@ -88,7 +88,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
88 88
89 do { 89 do {
90 __asm__ __volatile__( 90 __asm__ __volatile__(
91 " .set mips3 \n" 91 " .set arch=r4000 \n"
92 " lld %0, %3 # xchg_u64 \n" 92 " lld %0, %3 # xchg_u64 \n"
93 " move %2, %z4 \n" 93 " move %2, %z4 \n"
94 " scd %2, %1 \n" 94 " scd %2, %1 \n"
@@ -145,12 +145,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
145 __asm__ __volatile__( \ 145 __asm__ __volatile__( \
146 " .set push \n" \ 146 " .set push \n" \
147 " .set noat \n" \ 147 " .set noat \n" \
148 " .set mips3 \n" \ 148 " .set arch=r4000 \n" \
149 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 149 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
150 " bne %0, %z3, 2f \n" \ 150 " bne %0, %z3, 2f \n" \
151 " .set mips0 \n" \ 151 " .set mips0 \n" \
152 " move $1, %z4 \n" \ 152 " move $1, %z4 \n" \
153 " .set mips3 \n" \ 153 " .set arch=r4000 \n" \
154 " " st " $1, %1 \n" \ 154 " " st " $1, %1 \n" \
155 " beqzl $1, 1b \n" \ 155 " beqzl $1, 1b \n" \
156 "2: \n" \ 156 "2: \n" \
@@ -162,12 +162,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
162 __asm__ __volatile__( \ 162 __asm__ __volatile__( \
163 " .set push \n" \ 163 " .set push \n" \
164 " .set noat \n" \ 164 " .set noat \n" \
165 " .set mips3 \n" \ 165 " .set arch=r4000 \n" \
166 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 166 "1: " ld " %0, %2 # __cmpxchg_asm \n" \
167 " bne %0, %z3, 2f \n" \ 167 " bne %0, %z3, 2f \n" \
168 " .set mips0 \n" \ 168 " .set mips0 \n" \
169 " move $1, %z4 \n" \ 169 " move $1, %z4 \n" \
170 " .set mips3 \n" \ 170 " .set arch=r4000 \n" \
171 " " st " $1, %1 \n" \ 171 " " st " $1, %1 \n" \
172 " beqz $1, 1b \n" \ 172 " beqz $1, 1b \n" \
173 " .set pop \n" \ 173 " .set pop \n" \
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 6e70b03b6aab..f56cc975b92f 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -26,7 +26,9 @@
26#ifndef cpu_has_segments 26#ifndef cpu_has_segments
27#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS) 27#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
28#endif 28#endif
29 29#ifndef cpu_has_eva
30#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
31#endif
30 32
31/* 33/*
32 * For the moment we don't consider R6000 and R8000 so we can assume that 34 * For the moment we don't consider R6000 and R8000 so we can assume that
@@ -299,4 +301,10 @@
299#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ) 301#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
300#endif 302#endif
301 303
304#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
305# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
306#elif !defined(cpu_has_msa)
307# define cpu_has_msa 0
308#endif
309
302#endif /* __ASM_CPU_FEATURES_H */ 310#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 8f7adf0ac1e3..dc2135be2a3a 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -49,6 +49,7 @@ struct cpuinfo_mips {
49 unsigned long ases; 49 unsigned long ases;
50 unsigned int processor_id; 50 unsigned int processor_id;
51 unsigned int fpu_id; 51 unsigned int fpu_id;
52 unsigned int msa_id;
52 unsigned int cputype; 53 unsigned int cputype;
53 int isa_level; 54 int isa_level;
54 int tlbsize; 55 int tlbsize;
@@ -95,4 +96,31 @@ extern void cpu_report(void);
95extern const char *__cpu_name[]; 96extern const char *__cpu_name[];
96#define cpu_name_string() __cpu_name[smp_processor_id()] 97#define cpu_name_string() __cpu_name[smp_processor_id()]
97 98
99struct seq_file;
100struct notifier_block;
101
102extern int register_proc_cpuinfo_notifier(struct notifier_block *nb);
103extern int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v);
104
105#define proc_cpuinfo_notifier(fn, pri) \
106({ \
107 static struct notifier_block fn##_nb = { \
108 .notifier_call = fn, \
109 .priority = pri \
110 }; \
111 \
112 register_proc_cpuinfo_notifier(&fn##_nb); \
113})
114
115struct proc_cpuinfo_notifier_args {
116 struct seq_file *m;
117 unsigned long n;
118};
119
120#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
121# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
122#else
123# define cpu_vpe_id(cpuinfo) 0
124#endif
125
98#endif /* __ASM_CPU_INFO_H */ 126#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index 02f591bd95ca..721906130a57 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -20,6 +20,10 @@ static inline int __pure __get_cpu_type(const int cpu_type)
20 case CPU_LOONGSON2: 20 case CPU_LOONGSON2:
21#endif 21#endif
22 22
23#ifdef CONFIG_SYS_HAS_CPU_LOONGSON3
24 case CPU_LOONGSON3:
25#endif
26
23#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B 27#ifdef CONFIG_SYS_HAS_CPU_LOONGSON1B
24 case CPU_LOONGSON1: 28 case CPU_LOONGSON1:
25#endif 29#endif
@@ -46,6 +50,8 @@ static inline int __pure __get_cpu_type(const int cpu_type)
46 case CPU_M14KEC: 50 case CPU_M14KEC:
47 case CPU_INTERAPTIV: 51 case CPU_INTERAPTIV:
48 case CPU_PROAPTIV: 52 case CPU_PROAPTIV:
53 case CPU_P5600:
54 case CPU_M5150:
49#endif 55#endif
50 56
51#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 57#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 76411df3d971..530eb8b3a68e 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -82,10 +82,10 @@
82#define PRID_IMP_RM7000 0x2700 82#define PRID_IMP_RM7000 0x2700
83#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */ 83#define PRID_IMP_NEVADA 0x2800 /* RM5260 ??? */
84#define PRID_IMP_RM9000 0x3400 84#define PRID_IMP_RM9000 0x3400
85#define PRID_IMP_LOONGSON1 0x4200 85#define PRID_IMP_LOONGSON_32 0x4200 /* Loongson-1 */
86#define PRID_IMP_R5432 0x5400 86#define PRID_IMP_R5432 0x5400
87#define PRID_IMP_R5500 0x5500 87#define PRID_IMP_R5500 0x5500
88#define PRID_IMP_LOONGSON2 0x6300 88#define PRID_IMP_LOONGSON_64 0x6300 /* Loongson-2/3 */
89 89
90#define PRID_IMP_UNKNOWN 0xff00 90#define PRID_IMP_UNKNOWN 0xff00
91 91
@@ -115,6 +115,8 @@
115#define PRID_IMP_INTERAPTIV_MP 0xa100 115#define PRID_IMP_INTERAPTIV_MP 0xa100
116#define PRID_IMP_PROAPTIV_UP 0xa200 116#define PRID_IMP_PROAPTIV_UP 0xa200
117#define PRID_IMP_PROAPTIV_MP 0xa300 117#define PRID_IMP_PROAPTIV_MP 0xa300
118#define PRID_IMP_M5150 0xa700
119#define PRID_IMP_P5600 0xa800
118 120
119/* 121/*
120 * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE 122 * These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -229,6 +231,7 @@
229#define PRID_REV_LOONGSON1B 0x0020 231#define PRID_REV_LOONGSON1B 0x0020
230#define PRID_REV_LOONGSON2E 0x0002 232#define PRID_REV_LOONGSON2E 0x0002
231#define PRID_REV_LOONGSON2F 0x0003 233#define PRID_REV_LOONGSON2F 0x0003
234#define PRID_REV_LOONGSON3A 0x0005
232 235
233/* 236/*
234 * Older processors used to encode processor version and revision in two 237 * Older processors used to encode processor version and revision in two
@@ -296,14 +299,14 @@ enum cpu_type_enum {
296 CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K, 299 CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
297 CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350, 300 CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
298 CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC, 301 CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
299 CPU_M14KEC, CPU_INTERAPTIV, CPU_PROAPTIV, 302 CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
300 303
301 /* 304 /*
302 * MIPS64 class processors 305 * MIPS64 class processors
303 */ 306 */
304 CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2, 307 CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
305 CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, 308 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
306 CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, 309 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
307 310
308 CPU_LAST 311 CPU_LAST
309}; 312};
@@ -358,6 +361,7 @@ enum cpu_type_enum {
358#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */ 361#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */
359#define MIPS_CPU_TLBINV 0x02000000 /* CPU supports TLBINV/F */ 362#define MIPS_CPU_TLBINV 0x02000000 /* CPU supports TLBINV/F */
360#define MIPS_CPU_SEGMENTS 0x04000000 /* CPU supports Segmentation Control registers */ 363#define MIPS_CPU_SEGMENTS 0x04000000 /* CPU supports Segmentation Control registers */
364#define MIPS_CPU_EVA 0x80000000 /* CPU supports Enhanced Virtual Addressing */
361 365
362/* 366/*
363 * CPU ASE encodings 367 * CPU ASE encodings
@@ -370,5 +374,6 @@ enum cpu_type_enum {
370#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */ 374#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
371#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */ 375#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
372#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */ 376#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
377#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
373 378
374#endif /* _ASM_CPU_H */ 379#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 84238c574d5e..06412aa9e3fb 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -49,9 +49,14 @@ static inline int dma_mapping_error(struct device *dev, u64 mask)
49static inline int 49static inline int
50dma_set_mask(struct device *dev, u64 mask) 50dma_set_mask(struct device *dev, u64 mask)
51{ 51{
52 struct dma_map_ops *ops = get_dma_ops(dev);
53
52 if(!dev->dma_mask || !dma_supported(dev, mask)) 54 if(!dev->dma_mask || !dma_supported(dev, mask))
53 return -EIO; 55 return -EIO;
54 56
57 if (ops->set_dma_mask)
58 return ops->set_dma_mask(dev, mask);
59
55 *dev->dma_mask = mask; 60 *dev->dma_mask = mask;
56 61
57 return 0; 62 return 0;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 58e50cbdb1a6..4d86b72750c7 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -180,7 +180,7 @@ static inline void restore_fp(struct task_struct *tsk)
180 _restore_fp(tsk); 180 _restore_fp(tsk);
181} 181}
182 182
183static inline fpureg_t *get_fpu_regs(struct task_struct *tsk) 183static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
184{ 184{
185 if (tsk == current) { 185 if (tsk == current) {
186 preempt_disable(); 186 preempt_disable();
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 6ea15815d3ee..194cda0396a3 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -12,6 +12,7 @@
12 12
13#include <linux/futex.h> 13#include <linux/futex.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/asm-eva.h>
15#include <asm/barrier.h> 16#include <asm/barrier.h>
16#include <asm/errno.h> 17#include <asm/errno.h>
17#include <asm/war.h> 18#include <asm/war.h>
@@ -22,11 +23,11 @@
22 __asm__ __volatile__( \ 23 __asm__ __volatile__( \
23 " .set push \n" \ 24 " .set push \n" \
24 " .set noat \n" \ 25 " .set noat \n" \
25 " .set mips3 \n" \ 26 " .set arch=r4000 \n" \
26 "1: ll %1, %4 # __futex_atomic_op \n" \ 27 "1: ll %1, %4 # __futex_atomic_op \n" \
27 " .set mips0 \n" \ 28 " .set mips0 \n" \
28 " " insn " \n" \ 29 " " insn " \n" \
29 " .set mips3 \n" \ 30 " .set arch=r4000 \n" \
30 "2: sc $1, %2 \n" \ 31 "2: sc $1, %2 \n" \
31 " beqzl $1, 1b \n" \ 32 " beqzl $1, 1b \n" \
32 __WEAK_LLSC_MB \ 33 __WEAK_LLSC_MB \
@@ -48,12 +49,12 @@
48 __asm__ __volatile__( \ 49 __asm__ __volatile__( \
49 " .set push \n" \ 50 " .set push \n" \
50 " .set noat \n" \ 51 " .set noat \n" \
51 " .set mips3 \n" \ 52 " .set arch=r4000 \n" \
52 "1: ll %1, %4 # __futex_atomic_op \n" \ 53 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \
53 " .set mips0 \n" \ 54 " .set mips0 \n" \
54 " " insn " \n" \ 55 " " insn " \n" \
55 " .set mips3 \n" \ 56 " .set arch=r4000 \n" \
56 "2: sc $1, %2 \n" \ 57 "2: "user_sc("$1", "%2")" \n" \
57 " beqz $1, 1b \n" \ 58 " beqz $1, 1b \n" \
58 __WEAK_LLSC_MB \ 59 __WEAK_LLSC_MB \
59 "3: \n" \ 60 "3: \n" \
@@ -146,12 +147,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
146 "# futex_atomic_cmpxchg_inatomic \n" 147 "# futex_atomic_cmpxchg_inatomic \n"
147 " .set push \n" 148 " .set push \n"
148 " .set noat \n" 149 " .set noat \n"
149 " .set mips3 \n" 150 " .set arch=r4000 \n"
150 "1: ll %1, %3 \n" 151 "1: ll %1, %3 \n"
151 " bne %1, %z4, 3f \n" 152 " bne %1, %z4, 3f \n"
152 " .set mips0 \n" 153 " .set mips0 \n"
153 " move $1, %z5 \n" 154 " move $1, %z5 \n"
154 " .set mips3 \n" 155 " .set arch=r4000 \n"
155 "2: sc $1, %2 \n" 156 "2: sc $1, %2 \n"
156 " beqzl $1, 1b \n" 157 " beqzl $1, 1b \n"
157 __WEAK_LLSC_MB 158 __WEAK_LLSC_MB
@@ -173,13 +174,13 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
173 "# futex_atomic_cmpxchg_inatomic \n" 174 "# futex_atomic_cmpxchg_inatomic \n"
174 " .set push \n" 175 " .set push \n"
175 " .set noat \n" 176 " .set noat \n"
176 " .set mips3 \n" 177 " .set arch=r4000 \n"
177 "1: ll %1, %3 \n" 178 "1: "user_ll("%1", "%3")" \n"
178 " bne %1, %z4, 3f \n" 179 " bne %1, %z4, 3f \n"
179 " .set mips0 \n" 180 " .set mips0 \n"
180 " move $1, %z5 \n" 181 " move $1, %z5 \n"
181 " .set mips3 \n" 182 " .set arch=r4000 \n"
182 "2: sc $1, %2 \n" 183 "2: "user_sc("$1", "%2")" \n"
183 " beqz $1, 1b \n" 184 " beqz $1, 1b \n"
184 __WEAK_LLSC_MB 185 __WEAK_LLSC_MB
185 "3: \n" 186 "3: \n"
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
index d6c50a7e9ede..f3e6978aad70 100644
--- a/arch/mips/include/asm/fw/fw.h
+++ b/arch/mips/include/asm/fw/fw.h
@@ -38,7 +38,7 @@ extern int *_fw_envp;
38 38
39extern void fw_init_cmdline(void); 39extern void fw_init_cmdline(void);
40extern char *fw_getcmdline(void); 40extern char *fw_getcmdline(void);
41extern fw_memblock_t *fw_getmdesc(void); 41extern fw_memblock_t *fw_getmdesc(int);
42extern void fw_meminit(void); 42extern void fw_meminit(void);
43extern char *fw_getenv(char *name); 43extern char *fw_getenv(char *name);
44extern unsigned long fw_getenvl(char *name); 44extern unsigned long fw_getenvl(char *name);
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
deleted file mode 100644
index a7359f77a48e..000000000000
--- a/arch/mips/include/asm/gcmpregs.h
+++ /dev/null
@@ -1,125 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 07 MIPS Technologies, Inc.
7 *
8 * Multiprocessor Subsystem Register Definitions
9 *
10 */
11#ifndef _ASM_GCMPREGS_H
12#define _ASM_GCMPREGS_H
13
14
15/* Offsets to major blocks within GCMP from GCMP base */
16#define GCMP_GCB_OFS 0x0000 /* Global Control Block */
17#define GCMP_CLCB_OFS 0x2000 /* Core Local Control Block */
18#define GCMP_COCB_OFS 0x4000 /* Core Other Control Block */
19#define GCMP_GDB_OFS 0x8000 /* Global Debug Block */
20
21/* Offsets to individual GCMP registers from GCMP base */
22#define GCMPOFS(block, tag, reg) \
23 (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS)
24#define GCMPOFSn(block, tag, reg, n) \
25 (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS(n))
26
27#define GCMPGCBOFS(reg) GCMPOFS(GCB, GCB, reg)
28#define GCMPGCBOFSn(reg, n) GCMPOFSn(GCB, GCB, reg, n)
29#define GCMPCLCBOFS(reg) GCMPOFS(CLCB, CCB, reg)
30#define GCMPCOCBOFS(reg) GCMPOFS(COCB, CCB, reg)
31#define GCMPGDBOFS(reg) GCMPOFS(GDB, GDB, reg)
32
33/* GCMP register access */
34#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg))
35#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
36#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg))
37#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg))
38#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg))
39
40/* Mask generation */
41#define GCMPMSK(block, reg, bits) (MSK(bits)<<GCMP_##block##_##reg##_SHF)
42#define GCMPGCBMSK(reg, bits) GCMPMSK(GCB, reg, bits)
43#define GCMPCCBMSK(reg, bits) GCMPMSK(CCB, reg, bits)
44#define GCMPGDBMSK(reg, bits) GCMPMSK(GDB, reg, bits)
45
46/* GCB registers */
47#define GCMP_GCB_GC_OFS 0x0000 /* Global Config Register */
48#define GCMP_GCB_GC_NUMIOCU_SHF 8
49#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
50#define GCMP_GCB_GC_NUMCORES_SHF 0
51#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
52#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */
53#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
54#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
55#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
56#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
57#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
58#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
59#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
60#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
61#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */
62#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */
63#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
64#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
65#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */
66#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */
67#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */
68#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
69#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
70#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
71#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
72#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */
73#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */
74#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
75#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
76#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
77#define GCMP_GCB_GICBA_BASE_SHF 17
78#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
79#define GCMP_GCB_GICBA_EN_SHF 0
80#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
81
82/* GCB Regions */
83#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */
84#define GCMP_GCB_CMxBASE_BASE_SHF 16
85#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
86#define GCMP_GCB_CMxMASK_OFS(n) (0x0098+16*(n)) /* Global Region[0-3] Address Mask */
87#define GCMP_GCB_CMxMASK_MASK_SHF 16
88#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
89#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
90#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
91#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
92#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
93#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
94#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
95
96
97/* Core local/Core other control block registers */
98#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
99#define GCMP_CCB_RESETR_INRESET_SHF 0
100#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
101#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */
102#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
103#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
104#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
105#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
106#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
107#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
108#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
109#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
110#define GCMP_CCB_CFG_NUMVPE_SHF 0
111#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
112#define GCMP_CCB_OTHER_OFS 0x0018 /* Other Address */
113#define GCMP_CCB_OTHER_CORENUM_SHF 16
114#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
115#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */
116#define GCMP_CCB_RESETBASE_BEV_SHF 12
117#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
118#define GCMP_CCB_ID_OFS 0x0028 /* Identification */
119#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
120#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
121
122extern int __init gcmp_probe(unsigned long, unsigned long);
123extern int __init gcmp_niocu(void);
124extern void __init gcmp_setregion(int, unsigned long, unsigned long, int);
125#endif /* _ASM_GCMPREGS_H */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index b2e3e93dd7d8..082716690589 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -11,6 +11,9 @@
11#ifndef _ASM_GICREGS_H 11#ifndef _ASM_GICREGS_H
12#define _ASM_GICREGS_H 12#define _ASM_GICREGS_H
13 13
14#include <linux/bitmap.h>
15#include <linux/threads.h>
16
14#undef GICISBYTELITTLEENDIAN 17#undef GICISBYTELITTLEENDIAN
15 18
16/* Constants */ 19/* Constants */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 3321dd5a8872..933b50e125a0 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -331,7 +331,7 @@ static inline void pfx##write##bwlq(type val, \
331 if (irq) \ 331 if (irq) \
332 local_irq_save(__flags); \ 332 local_irq_save(__flags); \
333 __asm__ __volatile__( \ 333 __asm__ __volatile__( \
334 ".set mips3" "\t\t# __writeq""\n\t" \ 334 ".set arch=r4000" "\t\t# __writeq""\n\t" \
335 "dsll32 %L0, %L0, 0" "\n\t" \ 335 "dsll32 %L0, %L0, 0" "\n\t" \
336 "dsrl32 %L0, %L0, 0" "\n\t" \ 336 "dsrl32 %L0, %L0, 0" "\n\t" \
337 "dsll32 %M0, %M0, 0" "\n\t" \ 337 "dsll32 %M0, %M0, 0" "\n\t" \
@@ -361,7 +361,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
361 if (irq) \ 361 if (irq) \
362 local_irq_save(__flags); \ 362 local_irq_save(__flags); \
363 __asm__ __volatile__( \ 363 __asm__ __volatile__( \
364 ".set mips3" "\t\t# __readq" "\n\t" \ 364 ".set arch=r4000" "\t\t# __readq" "\n\t" \
365 "ld %L0, %1" "\n\t" \ 365 "ld %L0, %1" "\n\t" \
366 "dsra32 %M0, %L0, 0" "\n\t" \ 366 "dsra32 %M0, %L0, 0" "\n\t" \
367 "sll %L0, %L0, 0" "\n\t" \ 367 "sll %L0, %L0, 0" "\n\t" \
@@ -584,7 +584,7 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
584 * 584 *
585 * This API used to be exported; it now is for arch code internal use only. 585 * This API used to be exported; it now is for arch code internal use only.
586 */ 586 */
587#ifdef CONFIG_DMA_NONCOHERENT 587#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
588 588
589extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 589extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
590extern void (*_dma_cache_wback)(unsigned long start, unsigned long size); 590extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
@@ -603,7 +603,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
603#define dma_cache_inv(start,size) \ 603#define dma_cache_inv(start,size) \
604 do { (void) (start); (void) (size); } while (0) 604 do { (void) (start); (void) (size); } while (0)
605 605
606#endif /* CONFIG_DMA_NONCOHERENT */ 606#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
607 607
608/* 608/*
609 * Read a 32-bit register that requires a 64-bit read cycle on the bus. 609 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index a995fce87791..060aaa6348d7 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -30,16 +30,16 @@
30 30
31 31
32/* Special address that contains the comm page, used for reducing # of traps */ 32/* Special address that contains the comm page, used for reducing # of traps */
33#define KVM_GUEST_COMMPAGE_ADDR 0x0 33#define KVM_GUEST_COMMPAGE_ADDR 0x0
34 34
35#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ 35#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
36 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) 36 ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
37 37
38#define KVM_GUEST_KUSEG 0x00000000UL 38#define KVM_GUEST_KUSEG 0x00000000UL
39#define KVM_GUEST_KSEG0 0x40000000UL 39#define KVM_GUEST_KSEG0 0x40000000UL
40#define KVM_GUEST_KSEG23 0x60000000UL 40#define KVM_GUEST_KSEG23 0x60000000UL
41#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) 41#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
42#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) 42#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
43 43
44#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) 44#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
45#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 45#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
@@ -52,17 +52,17 @@
52#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) 52#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
53#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) 53#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
54 54
55#define KVM_INVALID_PAGE 0xdeadbeef 55#define KVM_INVALID_PAGE 0xdeadbeef
56#define KVM_INVALID_INST 0xdeadbeef 56#define KVM_INVALID_INST 0xdeadbeef
57#define KVM_INVALID_ADDR 0xdeadbeef 57#define KVM_INVALID_ADDR 0xdeadbeef
58 58
59#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL 59#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
60 60
61#define GUEST_TICKS_PER_JIFFY (40000000/HZ) 61#define GUEST_TICKS_PER_JIFFY (40000000/HZ)
62#define MS_TO_NS(x) (x * 1E6L) 62#define MS_TO_NS(x) (x * 1E6L)
63 63
64#define CAUSEB_DC 27 64#define CAUSEB_DC 27
65#define CAUSEF_DC (_ULCAST_(1) << 27) 65#define CAUSEF_DC (_ULCAST_(1) << 27)
66 66
67struct kvm; 67struct kvm;
68struct kvm_run; 68struct kvm_run;
@@ -126,8 +126,8 @@ struct kvm_arch {
126 int commpage_tlb; 126 int commpage_tlb;
127}; 127};
128 128
129#define N_MIPS_COPROC_REGS 32 129#define N_MIPS_COPROC_REGS 32
130#define N_MIPS_COPROC_SEL 8 130#define N_MIPS_COPROC_SEL 8
131 131
132struct mips_coproc { 132struct mips_coproc {
133 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; 133 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
@@ -139,124 +139,124 @@ struct mips_coproc {
139/* 139/*
140 * Coprocessor 0 register names 140 * Coprocessor 0 register names
141 */ 141 */
142#define MIPS_CP0_TLB_INDEX 0 142#define MIPS_CP0_TLB_INDEX 0
143#define MIPS_CP0_TLB_RANDOM 1 143#define MIPS_CP0_TLB_RANDOM 1
144#define MIPS_CP0_TLB_LOW 2 144#define MIPS_CP0_TLB_LOW 2
145#define MIPS_CP0_TLB_LO0 2 145#define MIPS_CP0_TLB_LO0 2
146#define MIPS_CP0_TLB_LO1 3 146#define MIPS_CP0_TLB_LO1 3
147#define MIPS_CP0_TLB_CONTEXT 4 147#define MIPS_CP0_TLB_CONTEXT 4
148#define MIPS_CP0_TLB_PG_MASK 5 148#define MIPS_CP0_TLB_PG_MASK 5
149#define MIPS_CP0_TLB_WIRED 6 149#define MIPS_CP0_TLB_WIRED 6
150#define MIPS_CP0_HWRENA 7 150#define MIPS_CP0_HWRENA 7
151#define MIPS_CP0_BAD_VADDR 8 151#define MIPS_CP0_BAD_VADDR 8
152#define MIPS_CP0_COUNT 9 152#define MIPS_CP0_COUNT 9
153#define MIPS_CP0_TLB_HI 10 153#define MIPS_CP0_TLB_HI 10
154#define MIPS_CP0_COMPARE 11 154#define MIPS_CP0_COMPARE 11
155#define MIPS_CP0_STATUS 12 155#define MIPS_CP0_STATUS 12
156#define MIPS_CP0_CAUSE 13 156#define MIPS_CP0_CAUSE 13
157#define MIPS_CP0_EXC_PC 14 157#define MIPS_CP0_EXC_PC 14
158#define MIPS_CP0_PRID 15 158#define MIPS_CP0_PRID 15
159#define MIPS_CP0_CONFIG 16 159#define MIPS_CP0_CONFIG 16
160#define MIPS_CP0_LLADDR 17 160#define MIPS_CP0_LLADDR 17
161#define MIPS_CP0_WATCH_LO 18 161#define MIPS_CP0_WATCH_LO 18
162#define MIPS_CP0_WATCH_HI 19 162#define MIPS_CP0_WATCH_HI 19
163#define MIPS_CP0_TLB_XCONTEXT 20 163#define MIPS_CP0_TLB_XCONTEXT 20
164#define MIPS_CP0_ECC 26 164#define MIPS_CP0_ECC 26
165#define MIPS_CP0_CACHE_ERR 27 165#define MIPS_CP0_CACHE_ERR 27
166#define MIPS_CP0_TAG_LO 28 166#define MIPS_CP0_TAG_LO 28
167#define MIPS_CP0_TAG_HI 29 167#define MIPS_CP0_TAG_HI 29
168#define MIPS_CP0_ERROR_PC 30 168#define MIPS_CP0_ERROR_PC 30
169#define MIPS_CP0_DEBUG 23 169#define MIPS_CP0_DEBUG 23
170#define MIPS_CP0_DEPC 24 170#define MIPS_CP0_DEPC 24
171#define MIPS_CP0_PERFCNT 25 171#define MIPS_CP0_PERFCNT 25
172#define MIPS_CP0_ERRCTL 26 172#define MIPS_CP0_ERRCTL 26
173#define MIPS_CP0_DATA_LO 28 173#define MIPS_CP0_DATA_LO 28
174#define MIPS_CP0_DATA_HI 29 174#define MIPS_CP0_DATA_HI 29
175#define MIPS_CP0_DESAVE 31 175#define MIPS_CP0_DESAVE 31
176 176
177#define MIPS_CP0_CONFIG_SEL 0 177#define MIPS_CP0_CONFIG_SEL 0
178#define MIPS_CP0_CONFIG1_SEL 1 178#define MIPS_CP0_CONFIG1_SEL 1
179#define MIPS_CP0_CONFIG2_SEL 2 179#define MIPS_CP0_CONFIG2_SEL 2
180#define MIPS_CP0_CONFIG3_SEL 3 180#define MIPS_CP0_CONFIG3_SEL 3
181 181
182/* Config0 register bits */ 182/* Config0 register bits */
183#define CP0C0_M 31 183#define CP0C0_M 31
184#define CP0C0_K23 28 184#define CP0C0_K23 28
185#define CP0C0_KU 25 185#define CP0C0_KU 25
186#define CP0C0_MDU 20 186#define CP0C0_MDU 20
187#define CP0C0_MM 17 187#define CP0C0_MM 17
188#define CP0C0_BM 16 188#define CP0C0_BM 16
189#define CP0C0_BE 15 189#define CP0C0_BE 15
190#define CP0C0_AT 13 190#define CP0C0_AT 13
191#define CP0C0_AR 10 191#define CP0C0_AR 10
192#define CP0C0_MT 7 192#define CP0C0_MT 7
193#define CP0C0_VI 3 193#define CP0C0_VI 3
194#define CP0C0_K0 0 194#define CP0C0_K0 0
195 195
196/* Config1 register bits */ 196/* Config1 register bits */
197#define CP0C1_M 31 197#define CP0C1_M 31
198#define CP0C1_MMU 25 198#define CP0C1_MMU 25
199#define CP0C1_IS 22 199#define CP0C1_IS 22
200#define CP0C1_IL 19 200#define CP0C1_IL 19
201#define CP0C1_IA 16 201#define CP0C1_IA 16
202#define CP0C1_DS 13 202#define CP0C1_DS 13
203#define CP0C1_DL 10 203#define CP0C1_DL 10
204#define CP0C1_DA 7 204#define CP0C1_DA 7
205#define CP0C1_C2 6 205#define CP0C1_C2 6
206#define CP0C1_MD 5 206#define CP0C1_MD 5
207#define CP0C1_PC 4 207#define CP0C1_PC 4
208#define CP0C1_WR 3 208#define CP0C1_WR 3
209#define CP0C1_CA 2 209#define CP0C1_CA 2
210#define CP0C1_EP 1 210#define CP0C1_EP 1
211#define CP0C1_FP 0 211#define CP0C1_FP 0
212 212
213/* Config2 Register bits */ 213/* Config2 Register bits */
214#define CP0C2_M 31 214#define CP0C2_M 31
215#define CP0C2_TU 28 215#define CP0C2_TU 28
216#define CP0C2_TS 24 216#define CP0C2_TS 24
217#define CP0C2_TL 20 217#define CP0C2_TL 20
218#define CP0C2_TA 16 218#define CP0C2_TA 16
219#define CP0C2_SU 12 219#define CP0C2_SU 12
220#define CP0C2_SS 8 220#define CP0C2_SS 8
221#define CP0C2_SL 4 221#define CP0C2_SL 4
222#define CP0C2_SA 0 222#define CP0C2_SA 0
223 223
224/* Config3 Register bits */ 224/* Config3 Register bits */
225#define CP0C3_M 31 225#define CP0C3_M 31
226#define CP0C3_ISA_ON_EXC 16 226#define CP0C3_ISA_ON_EXC 16
227#define CP0C3_ULRI 13 227#define CP0C3_ULRI 13
228#define CP0C3_DSPP 10 228#define CP0C3_DSPP 10
229#define CP0C3_LPA 7 229#define CP0C3_LPA 7
230#define CP0C3_VEIC 6 230#define CP0C3_VEIC 6
231#define CP0C3_VInt 5 231#define CP0C3_VInt 5
232#define CP0C3_SP 4 232#define CP0C3_SP 4
233#define CP0C3_MT 2 233#define CP0C3_MT 2
234#define CP0C3_SM 1 234#define CP0C3_SM 1
235#define CP0C3_TL 0 235#define CP0C3_TL 0
236 236
237/* Have config1, Cacheable, noncoherent, write-back, write allocate*/ 237/* Have config1, Cacheable, noncoherent, write-back, write allocate*/
238#define MIPS_CONFIG0 \ 238#define MIPS_CONFIG0 \
239 ((1 << CP0C0_M) | (0x3 << CP0C0_K0)) 239 ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
240 240
241/* Have config2, no coprocessor2 attached, no MDMX support attached, 241/* Have config2, no coprocessor2 attached, no MDMX support attached,
242 no performance counters, watch registers present, 242 no performance counters, watch registers present,
243 no code compression, EJTAG present, no FPU, no watch registers */ 243 no code compression, EJTAG present, no FPU, no watch registers */
244#define MIPS_CONFIG1 \ 244#define MIPS_CONFIG1 \
245((1 << CP0C1_M) | \ 245((1 << CP0C1_M) | \
246 (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ 246 (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
247 (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ 247 (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
248 (0 << CP0C1_FP)) 248 (0 << CP0C1_FP))
249 249
250/* Have config3, no tertiary/secondary caches implemented */ 250/* Have config3, no tertiary/secondary caches implemented */
251#define MIPS_CONFIG2 \ 251#define MIPS_CONFIG2 \
252((1 << CP0C2_M)) 252((1 << CP0C2_M))
253 253
254/* No config4, no DSP ASE, no large physaddr (PABITS), 254/* No config4, no DSP ASE, no large physaddr (PABITS),
255 no external interrupt controller, no vectored interrupts, 255 no external interrupt controller, no vectored interrupts,
256 no 1kb pages, no SmartMIPS ASE, no trace logic */ 256 no 1kb pages, no SmartMIPS ASE, no trace logic */
257#define MIPS_CONFIG3 \ 257#define MIPS_CONFIG3 \
258((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ 258((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
259 (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ 259 (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
260 (0 << CP0C3_SM) | (0 << CP0C3_TL)) 260 (0 << CP0C3_SM) | (0 << CP0C3_TL))
261 261
262/* MMU types, the first four entries have the same layout as the 262/* MMU types, the first four entries have the same layout as the
@@ -274,36 +274,36 @@ enum mips_mmu_types {
274/* 274/*
275 * Trap codes 275 * Trap codes
276 */ 276 */
277#define T_INT 0 /* Interrupt pending */ 277#define T_INT 0 /* Interrupt pending */
278#define T_TLB_MOD 1 /* TLB modified fault */ 278#define T_TLB_MOD 1 /* TLB modified fault */
279#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */ 279#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
280#define T_TLB_ST_MISS 3 /* TLB miss on a store */ 280#define T_TLB_ST_MISS 3 /* TLB miss on a store */
281#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */ 281#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
282#define T_ADDR_ERR_ST 5 /* Address error on a store */ 282#define T_ADDR_ERR_ST 5 /* Address error on a store */
283#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */ 283#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
284#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */ 284#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
285#define T_SYSCALL 8 /* System call */ 285#define T_SYSCALL 8 /* System call */
286#define T_BREAK 9 /* Breakpoint */ 286#define T_BREAK 9 /* Breakpoint */
287#define T_RES_INST 10 /* Reserved instruction exception */ 287#define T_RES_INST 10 /* Reserved instruction exception */
288#define T_COP_UNUSABLE 11 /* Coprocessor unusable */ 288#define T_COP_UNUSABLE 11 /* Coprocessor unusable */
289#define T_OVFLOW 12 /* Arithmetic overflow */ 289#define T_OVFLOW 12 /* Arithmetic overflow */
290 290
291/* 291/*
292 * Trap definitions added for r4000 port. 292 * Trap definitions added for r4000 port.
293 */ 293 */
294#define T_TRAP 13 /* Trap instruction */ 294#define T_TRAP 13 /* Trap instruction */
295#define T_VCEI 14 /* Virtual coherency exception */ 295#define T_VCEI 14 /* Virtual coherency exception */
296#define T_FPE 15 /* Floating point exception */ 296#define T_FPE 15 /* Floating point exception */
297#define T_WATCH 23 /* Watch address reference */ 297#define T_WATCH 23 /* Watch address reference */
298#define T_VCED 31 /* Virtual coherency data */ 298#define T_VCED 31 /* Virtual coherency data */
299 299
300/* Resume Flags */ 300/* Resume Flags */
301#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ 301#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
302#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ 302#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
303 303
304#define RESUME_GUEST 0 304#define RESUME_GUEST 0
305#define RESUME_GUEST_DR RESUME_FLAG_DR 305#define RESUME_GUEST_DR RESUME_FLAG_DR
306#define RESUME_HOST RESUME_FLAG_HOST 306#define RESUME_HOST RESUME_FLAG_HOST
307 307
308enum emulation_result { 308enum emulation_result {
309 EMULATE_DONE, /* no further processing */ 309 EMULATE_DONE, /* no further processing */
@@ -313,24 +313,27 @@ enum emulation_result {
313 EMULATE_PRIV_FAIL, 313 EMULATE_PRIV_FAIL,
314}; 314};
315 315
316#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */ 316#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
317#define MIPS3_PG_V 0x00000002 /* Valid */ 317#define MIPS3_PG_V 0x00000002 /* Valid */
318#define MIPS3_PG_NV 0x00000000 318#define MIPS3_PG_NV 0x00000000
319#define MIPS3_PG_D 0x00000004 /* Dirty */ 319#define MIPS3_PG_D 0x00000004 /* Dirty */
320 320
321#define mips3_paddr_to_tlbpfn(x) \ 321#define mips3_paddr_to_tlbpfn(x) \
322 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) 322 (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
323#define mips3_tlbpfn_to_paddr(x) \ 323#define mips3_tlbpfn_to_paddr(x) \
324 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) 324 ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
325 325
326#define MIPS3_PG_SHIFT 6 326#define MIPS3_PG_SHIFT 6
327#define MIPS3_PG_FRAME 0x3fffffc0 327#define MIPS3_PG_FRAME 0x3fffffc0
328 328
329#define VPN2_MASK 0xffffe000 329#define VPN2_MASK 0xffffe000
330#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) 330#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
331#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 331 ((x).tlb_lo1 & MIPS3_PG_G))
332#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) 332#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
333#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) 333#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
334#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
335 ? ((x).tlb_lo1 & MIPS3_PG_V) \
336 : ((x).tlb_lo0 & MIPS3_PG_V))
334 337
335struct kvm_mips_tlb { 338struct kvm_mips_tlb {
336 long tlb_mask; 339 long tlb_mask;
@@ -339,7 +342,7 @@ struct kvm_mips_tlb {
339 long tlb_lo1; 342 long tlb_lo1;
340}; 343};
341 344
342#define KVM_MIPS_GUEST_TLB_SIZE 64 345#define KVM_MIPS_GUEST_TLB_SIZE 64
343struct kvm_vcpu_arch { 346struct kvm_vcpu_arch {
344 void *host_ebase, *guest_ebase; 347 void *host_ebase, *guest_ebase;
345 unsigned long host_stack; 348 unsigned long host_stack;
@@ -400,65 +403,67 @@ struct kvm_vcpu_arch {
400}; 403};
401 404
402 405
403#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) 406#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
404#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) 407#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
405#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) 408#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
406#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) 409#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
407#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) 410#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
408#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) 411#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
409#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) 412#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
410#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) 413#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
411#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) 414#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
412#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) 415#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
413#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) 416#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
414#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) 417#define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
415#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) 418#define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
416#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) 419#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
417#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) 420#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
418#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) 421#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
419#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) 422#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
420#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) 423#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
421#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) 424#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
422#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) 425#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
423#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) 426#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
424#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) 427#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
425#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) 428#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
426#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) 429#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
427#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) 430#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
428#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) 431#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
429#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) 432#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
430#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) 433#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
431#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) 434#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
432#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) 435#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
433#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) 436#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
434#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) 437#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
435#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) 438#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
436#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) 439#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
437#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) 440#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
438#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) 441#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
439#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) 442#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
440#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) 443#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
441#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) 444#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
442#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) 445#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
443#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) 446#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
444#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) 447#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
445#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) 448#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
446 449#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
447#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) 450#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
448#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) 451
449#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val)) 452#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
450#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val)) 453#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
451#define kvm_change_c0_guest_cause(cop0, change, val) \ 454#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val))
452{ \ 455#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val))
453 kvm_clear_c0_guest_cause(cop0, change); \ 456#define kvm_change_c0_guest_cause(cop0, change, val) \
454 kvm_set_c0_guest_cause(cop0, ((val) & (change))); \ 457{ \
458 kvm_clear_c0_guest_cause(cop0, change); \
459 kvm_set_c0_guest_cause(cop0, ((val) & (change))); \
455} 460}
456#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) 461#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
457#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) 462#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
458#define kvm_change_c0_guest_ebase(cop0, change, val) \ 463#define kvm_change_c0_guest_ebase(cop0, change, val) \
459{ \ 464{ \
460 kvm_clear_c0_guest_ebase(cop0, change); \ 465 kvm_clear_c0_guest_ebase(cop0, change); \
461 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ 466 kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
462} 467}
463 468
464 469
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index d44622cd74be..46dfc3c1fd49 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -33,7 +33,7 @@ static __inline__ long local_add_return(long i, local_t * l)
33 unsigned long temp; 33 unsigned long temp;
34 34
35 __asm__ __volatile__( 35 __asm__ __volatile__(
36 " .set mips3 \n" 36 " .set arch=r4000 \n"
37 "1:" __LL "%1, %2 # local_add_return \n" 37 "1:" __LL "%1, %2 # local_add_return \n"
38 " addu %0, %1, %3 \n" 38 " addu %0, %1, %3 \n"
39 __SC "%0, %2 \n" 39 __SC "%0, %2 \n"
@@ -47,7 +47,7 @@ static __inline__ long local_add_return(long i, local_t * l)
47 unsigned long temp; 47 unsigned long temp;
48 48
49 __asm__ __volatile__( 49 __asm__ __volatile__(
50 " .set mips3 \n" 50 " .set arch=r4000 \n"
51 "1:" __LL "%1, %2 # local_add_return \n" 51 "1:" __LL "%1, %2 # local_add_return \n"
52 " addu %0, %1, %3 \n" 52 " addu %0, %1, %3 \n"
53 __SC "%0, %2 \n" 53 __SC "%0, %2 \n"
@@ -78,7 +78,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
78 unsigned long temp; 78 unsigned long temp;
79 79
80 __asm__ __volatile__( 80 __asm__ __volatile__(
81 " .set mips3 \n" 81 " .set arch=r4000 \n"
82 "1:" __LL "%1, %2 # local_sub_return \n" 82 "1:" __LL "%1, %2 # local_sub_return \n"
83 " subu %0, %1, %3 \n" 83 " subu %0, %1, %3 \n"
84 __SC "%0, %2 \n" 84 __SC "%0, %2 \n"
@@ -92,7 +92,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
92 unsigned long temp; 92 unsigned long temp;
93 93
94 __asm__ __volatile__( 94 __asm__ __volatile__(
95 " .set mips3 \n" 95 " .set arch=r4000 \n"
96 "1:" __LL "%1, %2 # local_sub_return \n" 96 "1:" __LL "%1, %2 # local_sub_return \n"
97 " subu %0, %1, %3 \n" 97 " subu %0, %1, %3 \n"
98 __SC "%0, %2 \n" 98 __SC "%0, %2 \n"
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 54f9e84db8ac..b4c3ecb17d48 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -1161,18 +1161,6 @@ enum soc_au1200_ints {
1161#define MAC_RX_BUFF3_STATUS 0x30 1161#define MAC_RX_BUFF3_STATUS 0x30
1162#define MAC_RX_BUFF3_ADDR 0x34 1162#define MAC_RX_BUFF3_ADDR 0x34
1163 1163
1164#define UART_RX 0 /* Receive buffer */
1165#define UART_TX 4 /* Transmit buffer */
1166#define UART_IER 8 /* Interrupt Enable Register */
1167#define UART_IIR 0xC /* Interrupt ID Register */
1168#define UART_FCR 0x10 /* FIFO Control Register */
1169#define UART_LCR 0x14 /* Line Control Register */
1170#define UART_MCR 0x18 /* Modem Control Register */
1171#define UART_LSR 0x1C /* Line Status Register */
1172#define UART_MSR 0x20 /* Modem Status Register */
1173#define UART_CLK 0x28 /* Baud Rate Clock Divider */
1174#define UART_MOD_CNTRL 0x100 /* Module Control */
1175
1176/* SSIO */ 1164/* SSIO */
1177#define SSI0_STATUS 0xB1600000 1165#define SSI0_STATUS 0xB1600000
1178# define SSI_STATUS_BF (1 << 4) 1166# define SSI_STATUS_BF (1 << 4)
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index 40005fb39618..bba7399a49a3 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -27,7 +27,11 @@ enum bcm47xx_board {
27 BCM47XX_BOARD_ASUS_WL700GE, 27 BCM47XX_BOARD_ASUS_WL700GE,
28 BCM47XX_BOARD_ASUS_WLHDD, 28 BCM47XX_BOARD_ASUS_WLHDD,
29 29
30 BCM47XX_BOARD_BELKIN_F7D3301,
31 BCM47XX_BOARD_BELKIN_F7D3302,
30 BCM47XX_BOARD_BELKIN_F7D4301, 32 BCM47XX_BOARD_BELKIN_F7D4301,
33 BCM47XX_BOARD_BELKIN_F7D4302,
34 BCM47XX_BOARD_BELKIN_F7D4401,
31 35
32 BCM47XX_BOARD_BUFFALO_WBR2_G54, 36 BCM47XX_BOARD_BUFFALO_WBR2_G54,
33 BCM47XX_BOARD_BUFFALO_WHR2_A54G54, 37 BCM47XX_BOARD_BUFFALO_WHR2_A54G54,
@@ -66,7 +70,7 @@ enum bcm47xx_board {
66 BCM47XX_BOARD_LINKSYS_WRT310NV1, 70 BCM47XX_BOARD_LINKSYS_WRT310NV1,
67 BCM47XX_BOARD_LINKSYS_WRT310NV2, 71 BCM47XX_BOARD_LINKSYS_WRT310NV2,
68 BCM47XX_BOARD_LINKSYS_WRT54G3GV2, 72 BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
69 BCM47XX_BOARD_LINKSYS_WRT54GSV1, 73 BCM47XX_BOARD_LINKSYS_WRT54G,
70 BCM47XX_BOARD_LINKSYS_WRT610NV1, 74 BCM47XX_BOARD_LINKSYS_WRT610NV1,
71 BCM47XX_BOARD_LINKSYS_WRT610NV2, 75 BCM47XX_BOARD_LINKSYS_WRT610NV2,
72 BCM47XX_BOARD_LINKSYS_WRTSL54GS, 76 BCM47XX_BOARD_LINKSYS_WRTSL54GS,
@@ -94,6 +98,8 @@ enum bcm47xx_board {
94 98
95 BCM47XX_BOARD_PHICOMM_M1, 99 BCM47XX_BOARD_PHICOMM_M1,
96 100
101 BCM47XX_BOARD_SIEMENS_SE505V2,
102
97 BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE, 103 BCM47XX_BOARD_SIMPLETECH_SIMPLESHARE,
98 104
99 BCM47XX_BOARD_ZTE_H218N, 105 BCM47XX_BOARD_ZTE_H218N,
diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h
deleted file mode 100644
index d3cce7326dd4..000000000000
--- a/arch/mips/include/asm/mach-db1x00/db1200.h
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * AMD Alchemy DBAu1200 Reference Board
3 * Board register defines.
4 *
5 * ########################################################################
6 *
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
19 *
20 * ########################################################################
21 *
22 *
23 */
24#ifndef __ASM_DB1200_H
25#define __ASM_DB1200_H
26
27#include <linux/types.h>
28#include <asm/mach-au1x00/au1000.h>
29#include <asm/mach-au1x00/au1xxx_psc.h>
30
31/* Bit positions for the different interrupt sources */
32#define BCSR_INT_IDE 0x0001
33#define BCSR_INT_ETH 0x0002
34#define BCSR_INT_PC0 0x0004
35#define BCSR_INT_PC0STSCHG 0x0008
36#define BCSR_INT_PC1 0x0010
37#define BCSR_INT_PC1STSCHG 0x0020
38#define BCSR_INT_DC 0x0040
39#define BCSR_INT_FLASHBUSY 0x0080
40#define BCSR_INT_PC0INSERT 0x0100
41#define BCSR_INT_PC0EJECT 0x0200
42#define BCSR_INT_PC1INSERT 0x0400
43#define BCSR_INT_PC1EJECT 0x0800
44#define BCSR_INT_SD0INSERT 0x1000
45#define BCSR_INT_SD0EJECT 0x2000
46#define BCSR_INT_SD1INSERT 0x4000
47#define BCSR_INT_SD1EJECT 0x8000
48
49#define IDE_REG_SHIFT 5
50
51#define DB1200_IDE_PHYS_ADDR 0x18800000
52#define DB1200_IDE_PHYS_LEN (16 << IDE_REG_SHIFT)
53#define DB1200_ETH_PHYS_ADDR 0x19000300
54#define DB1200_NAND_PHYS_ADDR 0x20000000
55
56#define PB1200_IDE_PHYS_ADDR 0x0C800000
57#define PB1200_ETH_PHYS_ADDR 0x0D000300
58#define PB1200_NAND_PHYS_ADDR 0x1C000000
59
60/*
61 * External Interrupts for DBAu1200 as of 8/6/2004.
62 * Bit positions in the CPLD registers can be calculated by taking
63 * the interrupt define and subtracting the DB1200_INT_BEGIN value.
64 *
65 * Example: IDE bis pos is = 64 - 64
66 * ETH bit pos is = 65 - 64
67 */
68enum external_db1200_ints {
69 DB1200_INT_BEGIN = AU1000_MAX_INTR + 1,
70
71 DB1200_IDE_INT = DB1200_INT_BEGIN,
72 DB1200_ETH_INT,
73 DB1200_PC0_INT,
74 DB1200_PC0_STSCHG_INT,
75 DB1200_PC1_INT,
76 DB1200_PC1_STSCHG_INT,
77 DB1200_DC_INT,
78 DB1200_FLASHBUSY_INT,
79 DB1200_PC0_INSERT_INT,
80 DB1200_PC0_EJECT_INT,
81 DB1200_PC1_INSERT_INT,
82 DB1200_PC1_EJECT_INT,
83 DB1200_SD0_INSERT_INT,
84 DB1200_SD0_EJECT_INT,
85 PB1200_SD1_INSERT_INT,
86 PB1200_SD1_EJECT_INT,
87
88 DB1200_INT_END = DB1200_INT_BEGIN + 15,
89};
90
91#endif /* __ASM_DB1200_H */
diff --git a/arch/mips/include/asm/mach-db1x00/db1300.h b/arch/mips/include/asm/mach-db1x00/db1300.h
deleted file mode 100644
index 3d1ede46f059..000000000000
--- a/arch/mips/include/asm/mach-db1x00/db1300.h
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * NetLogic DB1300 board constants
3 */
4
5#ifndef _DB1300_H_
6#define _DB1300_H_
7
8/* FPGA (external mux) interrupt sources */
9#define DB1300_FIRST_INT (ALCHEMY_GPIC_INT_LAST + 1)
10#define DB1300_IDE_INT (DB1300_FIRST_INT + 0)
11#define DB1300_ETH_INT (DB1300_FIRST_INT + 1)
12#define DB1300_CF_INT (DB1300_FIRST_INT + 2)
13#define DB1300_VIDEO_INT (DB1300_FIRST_INT + 4)
14#define DB1300_HDMI_INT (DB1300_FIRST_INT + 5)
15#define DB1300_DC_INT (DB1300_FIRST_INT + 6)
16#define DB1300_FLASH_INT (DB1300_FIRST_INT + 7)
17#define DB1300_CF_INSERT_INT (DB1300_FIRST_INT + 8)
18#define DB1300_CF_EJECT_INT (DB1300_FIRST_INT + 9)
19#define DB1300_AC97_INT (DB1300_FIRST_INT + 10)
20#define DB1300_AC97_PEN_INT (DB1300_FIRST_INT + 11)
21#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
22#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
23#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
24#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
25#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
26
27/* SMSC9210 CS */
28#define DB1300_ETH_PHYS_ADDR 0x19000000
29#define DB1300_ETH_PHYS_END 0x197fffff
30
31/* ATA CS */
32#define DB1300_IDE_PHYS_ADDR 0x18800000
33#define DB1300_IDE_REG_SHIFT 5
34#define DB1300_IDE_PHYS_LEN (16 << DB1300_IDE_REG_SHIFT)
35
36/* NAND CS */
37#define DB1300_NAND_PHYS_ADDR 0x20000000
38#define DB1300_NAND_PHYS_END 0x20000fff
39
40#endif /* _DB1300_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h
new file mode 100644
index 000000000000..829a7ec185fb
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/boot_param.h
@@ -0,0 +1,163 @@
1#ifndef __ASM_MACH_LOONGSON_BOOT_PARAM_H_
2#define __ASM_MACH_LOONGSON_BOOT_PARAM_H_
3
4#define SYSTEM_RAM_LOW 1
5#define SYSTEM_RAM_HIGH 2
6#define MEM_RESERVED 3
7#define PCI_IO 4
8#define PCI_MEM 5
9#define LOONGSON_CFG_REG 6
10#define VIDEO_ROM 7
11#define ADAPTER_ROM 8
12#define ACPI_TABLE 9
13#define MAX_MEMORY_TYPE 10
14
15#define LOONGSON3_BOOT_MEM_MAP_MAX 128
16struct efi_memory_map_loongson {
17 u16 vers; /* version of efi_memory_map */
18 u32 nr_map; /* number of memory_maps */
19 u32 mem_freq; /* memory frequence */
20 struct mem_map {
21 u32 node_id; /* node_id which memory attached to */
22 u32 mem_type; /* system memory, pci memory, pci io, etc. */
23 u64 mem_start; /* memory map start address */
24 u32 mem_size; /* each memory_map size, not the total size */
25 } map[LOONGSON3_BOOT_MEM_MAP_MAX];
26} __packed;
27
28enum loongson_cpu_type {
29 Loongson_2E = 0,
30 Loongson_2F = 1,
31 Loongson_3A = 2,
32 Loongson_3B = 3,
33 Loongson_1A = 4,
34 Loongson_1B = 5
35};
36
37/*
38 * Capability and feature descriptor structure for MIPS CPU
39 */
40struct efi_cpuinfo_loongson {
41 u16 vers; /* version of efi_cpuinfo_loongson */
42 u32 processor_id; /* PRID, e.g. 6305, 6306 */
43 u32 cputype; /* Loongson_3A/3B, etc. */
44 u32 total_node; /* num of total numa nodes */
45 u32 cpu_startup_core_id; /* Core id */
46 u32 cpu_clock_freq; /* cpu_clock */
47 u32 nr_cpus;
48} __packed;
49
50struct system_loongson {
51 u16 vers; /* version of system_loongson */
52 u32 ccnuma_smp; /* 0: no numa; 1: has numa */
53 u32 sing_double_channel; /* 1:single; 2:double */
54} __packed;
55
56struct irq_source_routing_table {
57 u16 vers;
58 u16 size;
59 u16 rtr_bus;
60 u16 rtr_devfn;
61 u32 vendor;
62 u32 device;
63 u32 PIC_type; /* conform use HT or PCI to route to CPU-PIC */
64 u64 ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */
65 u64 ht_enable; /* irqs used in this PIC */
66 u32 node_id; /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */
67 u64 pci_mem_start_addr;
68 u64 pci_mem_end_addr;
69 u64 pci_io_start_addr;
70 u64 pci_io_end_addr;
71 u64 pci_config_addr;
72 u32 dma_mask_bits;
73} __packed;
74
75struct interface_info {
76 u16 vers; /* version of the specificition */
77 u16 size;
78 u8 flag;
79 char description[64];
80} __packed;
81
82#define MAX_RESOURCE_NUMBER 128
83struct resource_loongson {
84 u64 start; /* resource start address */
85 u64 end; /* resource end address */
86 char name[64];
87 u32 flags;
88};
89
90struct archdev_data {}; /* arch specific additions */
91
92struct board_devices {
93 char name[64]; /* hold the device name */
94 u32 num_resources; /* number of device_resource */
95 /* for each device's resource */
96 struct resource_loongson resource[MAX_RESOURCE_NUMBER];
97 /* arch specific additions */
98 struct archdev_data archdata;
99};
100
101struct loongson_special_attribute {
102 u16 vers; /* version of this special */
103 char special_name[64]; /* special_atribute_name */
104 u32 loongson_special_type; /* type of special device */
105 /* for each device's resource */
106 struct resource_loongson resource[MAX_RESOURCE_NUMBER];
107};
108
109struct loongson_params {
110 u64 memory_offset; /* efi_memory_map_loongson struct offset */
111 u64 cpu_offset; /* efi_cpuinfo_loongson struct offset */
112 u64 system_offset; /* system_loongson struct offset */
113 u64 irq_offset; /* irq_source_routing_table struct offset */
114 u64 interface_offset; /* interface_info struct offset */
115 u64 special_offset; /* loongson_special_attribute struct offset */
116 u64 boarddev_table_offset; /* board_devices offset */
117};
118
119struct smbios_tables {
120 u16 vers; /* version of smbios */
121 u64 vga_bios; /* vga_bios address */
122 struct loongson_params lp;
123};
124
125struct efi_reset_system_t {
126 u64 ResetCold;
127 u64 ResetWarm;
128 u64 ResetType;
129 u64 Shutdown;
130 u64 DoSuspend; /* NULL if not support */
131};
132
133struct efi_loongson {
134 u64 mps; /* MPS table */
135 u64 acpi; /* ACPI table (IA64 ext 0.71) */
136 u64 acpi20; /* ACPI table (ACPI 2.0) */
137 struct smbios_tables smbios; /* SM BIOS table */
138 u64 sal_systab; /* SAL system table */
139 u64 boot_info; /* boot info table */
140};
141
142struct boot_params {
143 struct efi_loongson efi;
144 struct efi_reset_system_t reset_system;
145};
146
147struct loongson_system_configuration {
148 u32 nr_cpus;
149 enum loongson_cpu_type cputype;
150 u64 ht_control_base;
151 u64 pci_mem_start_addr;
152 u64 pci_mem_end_addr;
153 u64 pci_io_base;
154 u64 restart_addr;
155 u64 poweroff_addr;
156 u64 suspend_addr;
157 u64 vgabios_addr;
158 u32 dma_mask_bits;
159};
160
161extern struct efi_memory_map_loongson *loongson_memmap;
162extern struct loongson_system_configuration loongson_sysconf;
163#endif
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index aeb2c05d6145..6a902751cc7f 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -11,24 +11,40 @@
11#ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H 11#ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H
12#define __ASM_MACH_LOONGSON_DMA_COHERENCE_H 12#define __ASM_MACH_LOONGSON_DMA_COHERENCE_H
13 13
14#ifdef CONFIG_SWIOTLB
15#include <linux/swiotlb.h>
16#endif
17
14struct device; 18struct device;
15 19
20extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
21extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
16static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, 22static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
17 size_t size) 23 size_t size)
18{ 24{
25#ifdef CONFIG_CPU_LOONGSON3
26 return virt_to_phys(addr);
27#else
19 return virt_to_phys(addr) | 0x80000000; 28 return virt_to_phys(addr) | 0x80000000;
29#endif
20} 30}
21 31
22static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, 32static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
23 struct page *page) 33 struct page *page)
24{ 34{
35#ifdef CONFIG_CPU_LOONGSON3
36 return page_to_phys(page);
37#else
25 return page_to_phys(page) | 0x80000000; 38 return page_to_phys(page) | 0x80000000;
39#endif
26} 40}
27 41
28static inline unsigned long plat_dma_addr_to_phys(struct device *dev, 42static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
29 dma_addr_t dma_addr) 43 dma_addr_t dma_addr)
30{ 44{
31#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) 45#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
46 return dma_addr;
47#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
32 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff); 48 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
33#else 49#else
34 return dma_addr & 0x7fffffff; 50 return dma_addr & 0x7fffffff;
@@ -55,7 +71,11 @@ static inline int plat_dma_supported(struct device *dev, u64 mask)
55 71
56static inline int plat_device_is_coherent(struct device *dev) 72static inline int plat_device_is_coherent(struct device *dev)
57{ 73{
74#ifdef CONFIG_DMA_NONCOHERENT
58 return 0; 75 return 0;
76#else
77 return 1;
78#endif /* CONFIG_DMA_NONCOHERENT */
59} 79}
60 80
61#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */ 81#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h
new file mode 100644
index 000000000000..34560bda6626
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/irq.h
@@ -0,0 +1,44 @@
1#ifndef __ASM_MACH_LOONGSON_IRQ_H_
2#define __ASM_MACH_LOONGSON_IRQ_H_
3
4#include <boot_param.h>
5
6#ifdef CONFIG_CPU_LOONGSON3
7
8/* cpu core interrupt numbers */
9#define MIPS_CPU_IRQ_BASE 56
10
11#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
12#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
13#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
14
15#define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
16#define LOONGSON_HT1_INT_VECTOR_BASE (LOONGSON_HT1_CFG_BASE + 0x80)
17#define LOONGSON_HT1_INT_EN_BASE (LOONGSON_HT1_CFG_BASE + 0xa0)
18#define LOONGSON_HT1_INT_VECTOR(n) \
19 LOONGSON3_REG32(LOONGSON_HT1_INT_VECTOR_BASE, 4 * (n))
20#define LOONGSON_HT1_INTN_EN(n) \
21 LOONGSON3_REG32(LOONGSON_HT1_INT_EN_BASE, 4 * (n))
22
23#define LOONGSON_INT_ROUTER_OFFSET 0x1400
24#define LOONGSON_INT_ROUTER_INTEN \
25 LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x24)
26#define LOONGSON_INT_ROUTER_INTENSET \
27 LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x28)
28#define LOONGSON_INT_ROUTER_INTENCLR \
29 LOONGSON3_REG32(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + 0x2c)
30#define LOONGSON_INT_ROUTER_ENTRY(n) \
31 LOONGSON3_REG8(LOONGSON3_REG_BASE, LOONGSON_INT_ROUTER_OFFSET + n)
32#define LOONGSON_INT_ROUTER_LPC LOONGSON_INT_ROUTER_ENTRY(0x0a)
33#define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18)
34
35#define LOONGSON_INT_CORE0_INT0 0x11 /* route to int 0 of core 0 */
36#define LOONGSON_INT_CORE0_INT1 0x21 /* route to int 1 of core 0 */
37
38#endif
39
40extern void fixup_irqs(void);
41extern void loongson3_ipi_interrupt(struct pt_regs *regs);
42
43#include_next <irq.h>
44#endif /* __ASM_MACH_LOONGSON_IRQ_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index b286534fef08..f3fd1eb8e3dd 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/kconfig.h> 17#include <linux/kconfig.h>
18#include <boot_param.h>
18 19
19/* loongson internal northbridge initialization */ 20/* loongson internal northbridge initialization */
20extern void bonito_irq_init(void); 21extern void bonito_irq_init(void);
@@ -24,8 +25,9 @@ extern void mach_prepare_reboot(void);
24extern void mach_prepare_shutdown(void); 25extern void mach_prepare_shutdown(void);
25 26
26/* environment arguments from bootloader */ 27/* environment arguments from bootloader */
27extern unsigned long cpu_clock_freq; 28extern u32 cpu_clock_freq;
28extern unsigned long memsize, highmemsize; 29extern u32 memsize, highmemsize;
30extern struct plat_smp_ops loongson3_smp_ops;
29 31
30/* loongson-specific command line, env and memory initialization */ 32/* loongson-specific command line, env and memory initialization */
31extern void __init prom_init_memory(void); 33extern void __init prom_init_memory(void);
@@ -61,6 +63,12 @@ extern int mach_i8259_irq(void);
61#define LOONGSON_REG(x) \ 63#define LOONGSON_REG(x) \
62 (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x))) 64 (*(volatile u32 *)((char *)CKSEG1ADDR(LOONGSON_REG_BASE) + (x)))
63 65
66#define LOONGSON3_REG8(base, x) \
67 (*(volatile u8 *)((char *)TO_UNCAC(base) + (x)))
68
69#define LOONGSON3_REG32(base, x) \
70 (*(volatile u32 *)((char *)TO_UNCAC(base) + (x)))
71
64#define LOONGSON_IRQ_BASE 32 72#define LOONGSON_IRQ_BASE 32
65#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */ 73#define LOONGSON2_PERFCNT_IRQ (MIPS_CPU_IRQ_BASE + 6) /* cpu perf counter */
66 74
@@ -86,6 +94,10 @@ static inline void do_perfcnt_IRQ(void)
86#define LOONGSON_REG_BASE 0x1fe00000 94#define LOONGSON_REG_BASE 0x1fe00000
87#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */ 95#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
88#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1) 96#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
97/* Loongson-3 specific registers */
98#define LOONGSON3_REG_BASE 0x3ff00000
99#define LOONGSON3_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
100#define LOONGSON3_REG_TOP (LOONGSON3_REG_BASE+LOONGSON3_REG_SIZE-1)
89 101
90#define LOONGSON_LIO1_BASE 0x1ff00000 102#define LOONGSON_LIO1_BASE 0x1ff00000
91#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */ 103#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
@@ -101,7 +113,13 @@ static inline void do_perfcnt_IRQ(void)
101#define LOONGSON_PCICFG_BASE 0x1fe80000 113#define LOONGSON_PCICFG_BASE 0x1fe80000
102#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */ 114#define LOONGSON_PCICFG_SIZE 0x00000800 /* 2K */
103#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1) 115#define LOONGSON_PCICFG_TOP (LOONGSON_PCICFG_BASE+LOONGSON_PCICFG_SIZE-1)
116
117#if defined(CONFIG_HT_PCI)
118#define LOONGSON_PCIIO_BASE loongson_sysconf.pci_io_base
119#else
104#define LOONGSON_PCIIO_BASE 0x1fd00000 120#define LOONGSON_PCIIO_BASE 0x1fd00000
121#endif
122
105#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */ 123#define LOONGSON_PCIIO_SIZE 0x00100000 /* 1M */
106#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1) 124#define LOONGSON_PCIIO_TOP (LOONGSON_PCIIO_BASE+LOONGSON_PCIIO_SIZE-1)
107 125
@@ -231,6 +249,9 @@ static inline void do_perfcnt_IRQ(void)
231#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68) 249#define LOONGSON_PXARB_CFG LOONGSON_REG(LOONGSON_REGBASE + 0x68)
232#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c) 250#define LOONGSON_PXARB_STATUS LOONGSON_REG(LOONGSON_REGBASE + 0x6c)
233 251
252/* Chip Config */
253#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80)
254
234/* pcimap */ 255/* pcimap */
235 256
236#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f 257#define LOONGSON_PCIMAP_PCIMAP_LO0 0x0000003f
@@ -246,9 +267,6 @@ static inline void do_perfcnt_IRQ(void)
246#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ 267#ifdef CONFIG_CPU_SUPPORTS_CPUFREQ
247#include <linux/cpufreq.h> 268#include <linux/cpufreq.h>
248extern struct cpufreq_frequency_table loongson2_clockmod_table[]; 269extern struct cpufreq_frequency_table loongson2_clockmod_table[];
249
250/* Chip Config */
251#define LOONGSON_CHIPCFG0 LOONGSON_REG(LOONGSON_REGBASE + 0x80)
252#endif 270#endif
253 271
254/* 272/*
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 3810d5ca84ac..1b1f592fa2be 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -24,4 +24,10 @@
24 24
25#endif 25#endif
26 26
27#ifdef CONFIG_LEMOTE_MACH3A
28
29#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101
30
31#endif /* CONFIG_LEMOTE_MACH3A */
32
27#endif /* __ASM_MACH_LOONGSON_MACHINE_H */ 33#endif /* __ASM_MACH_LOONGSON_MACHINE_H */
diff --git a/arch/mips/include/asm/mach-loongson/pci.h b/arch/mips/include/asm/mach-loongson/pci.h
index bc99dab4ef63..1212774f66ef 100644
--- a/arch/mips/include/asm/mach-loongson/pci.h
+++ b/arch/mips/include/asm/mach-loongson/pci.h
@@ -40,8 +40,13 @@ extern struct pci_ops loongson_pci_ops;
40#else /* loongson2f/32bit & loongson2e */ 40#else /* loongson2f/32bit & loongson2e */
41 41
42/* this pci memory space is mapped by pcimap in pci.c */ 42/* this pci memory space is mapped by pcimap in pci.c */
43#ifdef CONFIG_CPU_LOONGSON3
44#define LOONGSON_PCI_MEM_START 0x40000000UL
45#define LOONGSON_PCI_MEM_END 0x7effffffUL
46#else
43#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE 47#define LOONGSON_PCI_MEM_START LOONGSON_PCILO1_BASE
44#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2) 48#define LOONGSON_PCI_MEM_END (LOONGSON_PCILO1_BASE + 0x04000000 * 2)
49#endif
45/* this is an offset from mips_io_port_base */ 50/* this is an offset from mips_io_port_base */
46#define LOONGSON_PCI_IO_START 0x00004000UL 51#define LOONGSON_PCI_IO_START 0x00004000UL
47 52
diff --git a/arch/mips/include/asm/mach-loongson/spaces.h b/arch/mips/include/asm/mach-loongson/spaces.h
new file mode 100644
index 000000000000..e2506ee90044
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/spaces.h
@@ -0,0 +1,9 @@
1#ifndef __ASM_MACH_LOONGSON_SPACES_H_
2#define __ASM_MACH_LOONGSON_SPACES_H_
3
4#if defined(CONFIG_64BIT)
5#define CAC_BASE _AC(0x9800000000000000, UL)
6#endif /* CONFIG_64BIT */
7
8#include <asm/mach-generic/spaces.h>
9#endif
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0b793e7bf67e..7c5e17a17849 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -5,10 +5,80 @@
5 * 5 *
6 * Chris Dearman (chris@mips.com) 6 * Chris Dearman (chris@mips.com)
7 * Copyright (C) 2007 Mips Technologies, Inc. 7 * Copyright (C) 2007 Mips Technologies, Inc.
8 * Copyright (C) 2014 Imagination Technologies Ltd.
8 */ 9 */
9#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 10#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
10#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H 11#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
11 12
13 /*
14 * Prepare segments for EVA boot:
15 *
16 * This is in case the processor boots in legacy configuration
17 * (SI_EVAReset is de-asserted and CONFIG5.K == 0)
18 *
19 * On entry, t1 is loaded with CP0_CONFIG
20 *
21 * ========================= Mappings =============================
22 * Virtual memory Physical memory Mapping
23 * 0x00000000 - 0x7fffffff 0x80000000 - 0xfffffffff MUSUK (kuseg)
24 * Flat 2GB physical memory
25 *
26 * 0x80000000 - 0x9fffffff 0x00000000 - 0x1ffffffff MUSUK (kseg0)
27 * 0xa0000000 - 0xbf000000 0x00000000 - 0x1ffffffff MUSUK (kseg1)
28 * 0xc0000000 - 0xdfffffff - MK (kseg2)
29 * 0xe0000000 - 0xffffffff - MK (kseg3)
30 *
31 *
32 * Lowmem is expanded to 2GB
33 */
34 .macro eva_entry
35 /*
36 * Get Config.K0 value and use it to program
37 * the segmentation registers
38 */
39 andi t1, 0x7 /* CCA */
40 move t2, t1
41 ins t2, t1, 16, 3
42 /* SegCtl0 */
43 li t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
44 (0 << MIPS_SEGCFG_PA_SHIFT) | \
45 (1 << MIPS_SEGCFG_EU_SHIFT)) | \
46 (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
47 (0 << MIPS_SEGCFG_PA_SHIFT) | \
48 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
49 or t0, t2
50 mtc0 t0, $5, 2
51
52 /* SegCtl1 */
53 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
54 (0 << MIPS_SEGCFG_PA_SHIFT) | \
55 (2 << MIPS_SEGCFG_C_SHIFT) | \
56 (1 << MIPS_SEGCFG_EU_SHIFT)) | \
57 (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
58 (0 << MIPS_SEGCFG_PA_SHIFT) | \
59 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
60 ins t0, t1, 16, 3
61 mtc0 t0, $5, 3
62
63 /* SegCtl2 */
64 li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
65 (6 << MIPS_SEGCFG_PA_SHIFT) | \
66 (1 << MIPS_SEGCFG_EU_SHIFT)) | \
67 (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
68 (4 << MIPS_SEGCFG_PA_SHIFT) | \
69 (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
70 or t0, t2
71 mtc0 t0, $5, 4
72
73 jal mips_ihb
74 mfc0 t0, $16, 5
75 li t2, 0x40000000 /* K bit */
76 or t0, t0, t2
77 mtc0 t0, $16, 5
78 sync
79 jal mips_ihb
80 .endm
81
12 .macro kernel_entry_setup 82 .macro kernel_entry_setup
13#ifdef CONFIG_MIPS_MT_SMTC 83#ifdef CONFIG_MIPS_MT_SMTC
14 mfc0 t0, CP0_CONFIG 84 mfc0 t0, CP0_CONFIG
@@ -39,14 +109,57 @@
39nonmt_processor: 109nonmt_processor:
40 .asciz "SMTC kernel requires the MT ASE to run\n" 110 .asciz "SMTC kernel requires the MT ASE to run\n"
41 __FINIT 111 __FINIT
420:
43#endif 112#endif
113
114#ifdef CONFIG_EVA
115 sync
116 ehb
117
118 mfc0 t1, CP0_CONFIG
119 bgez t1, 9f
120 mfc0 t0, CP0_CONFIG, 1
121 bgez t0, 9f
122 mfc0 t0, CP0_CONFIG, 2
123 bgez t0, 9f
124 mfc0 t0, CP0_CONFIG, 3
125 sll t0, t0, 6 /* SC bit */
126 bgez t0, 9f
127
128 eva_entry
129 b 0f
1309:
131 /* Assume we came from YAMON... */
132 PTR_LA v0, 0x9fc00534 /* YAMON print */
133 lw v0, (v0)
134 move a0, zero
135 PTR_LA a1, nonsc_processor
136 jal v0
137
138 PTR_LA v0, 0x9fc00520 /* YAMON exit */
139 lw v0, (v0)
140 li a0, 1
141 jal v0
142
1431: b 1b
144 nop
145 __INITDATA
146nonsc_processor:
147 .asciz "EVA kernel requires a MIPS core with Segment Control implemented\n"
148 __FINIT
149#endif /* CONFIG_EVA */
1500:
44 .endm 151 .endm
45 152
46/* 153/*
47 * Do SMP slave processor setup necessary before we can safely execute C code. 154 * Do SMP slave processor setup necessary before we can safely execute C code.
48 */ 155 */
49 .macro smp_slave_setup 156 .macro smp_slave_setup
157#ifdef CONFIG_EVA
158 sync
159 ehb
160 mfc0 t1, CP0_CONFIG
161 eva_entry
162#endif
50 .endm 163 .endm
51 164
52#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */ 165#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-malta/spaces.h b/arch/mips/include/asm/mach-malta/spaces.h
new file mode 100644
index 000000000000..d7e54971ec66
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/spaces.h
@@ -0,0 +1,46 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2014 Imagination Technologies Ltd.
7 */
8
9#ifndef _ASM_MALTA_SPACES_H
10#define _ASM_MALTA_SPACES_H
11
12#ifdef CONFIG_EVA
13
14/*
15 * Traditional Malta Board Memory Map for EVA
16 *
17 * 0x00000000 - 0x0fffffff: 1st RAM region, 256MB
18 * 0x10000000 - 0x1bffffff: GIC and CPC Control Registers
19 * 0x1c000000 - 0x1fffffff: I/O And Flash
20 * 0x20000000 - 0x7fffffff: 2nd RAM region, 1.5GB
21 * 0x80000000 - 0xffffffff: Physical memory aliases to 0x0 (2GB)
22 *
23 * The kernel is still located in 0x80000000(kseg0). However,
24 * the physical mask has been shifted to 0x80000000 which exploits the alias
25 * on the Malta board. As a result of which, we override the __pa_symbol
26 * to peform direct mapping from virtual to physical addresses. In other
27 * words, the 0x80000000 virtual address maps to 0x80000000 physical address
28 * which in turn aliases to 0x0. We do this in order to be able to use a flat
29 * 2GB of memory (0x80000000 - 0xffffffff) so we can avoid the I/O hole in
30 * 0x10000000 - 0x1fffffff.
31 * The last 64KB of physical memory are reserved for correct HIGHMEM
32 * macros arithmetics.
33 *
34 */
35
36#define PAGE_OFFSET _AC(0x0, UL)
37#define PHYS_OFFSET _AC(0x80000000, UL)
38#define HIGHMEM_START _AC(0xffff0000, UL)
39
40#define __pa_symbol(x) (RELOC_HIDE((unsigned long)(x), 0))
41
42#endif /* CONFIG_EVA */
43
44#include <asm/mach-generic/spaces.h>
45
46#endif /* _ASM_MALTA_SPACES_H */
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 2dbc7a8cec1a..fc946c835995 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -76,7 +76,7 @@ static inline void set_value_reg32(volatile u32 *const addr,
76 76
77 __asm__ __volatile__( 77 __asm__ __volatile__(
78 " .set push \n" 78 " .set push \n"
79 " .set mips3 \n" 79 " .set arch=r4000 \n"
80 "1: ll %0, %1 # set_value_reg32 \n" 80 "1: ll %0, %1 # set_value_reg32 \n"
81 " and %0, %2 \n" 81 " and %0, %2 \n"
82 " or %0, %3 \n" 82 " or %0, %3 \n"
@@ -98,7 +98,7 @@ static inline void set_reg32(volatile u32 *const addr,
98 98
99 __asm__ __volatile__( 99 __asm__ __volatile__(
100 " .set push \n" 100 " .set push \n"
101 " .set mips3 \n" 101 " .set arch=r4000 \n"
102 "1: ll %0, %1 # set_reg32 \n" 102 "1: ll %0, %1 # set_reg32 \n"
103 " or %0, %2 \n" 103 " or %0, %2 \n"
104 " sc %0, %1 \n" 104 " sc %0, %1 \n"
@@ -119,7 +119,7 @@ static inline void clear_reg32(volatile u32 *const addr,
119 119
120 __asm__ __volatile__( 120 __asm__ __volatile__(
121 " .set push \n" 121 " .set push \n"
122 " .set mips3 \n" 122 " .set arch=r4000 \n"
123 "1: ll %0, %1 # clear_reg32 \n" 123 "1: ll %0, %1 # clear_reg32 \n"
124 " and %0, %2 \n" 124 " and %0, %2 \n"
125 " sc %0, %1 \n" 125 " sc %0, %1 \n"
@@ -140,7 +140,7 @@ static inline void toggle_reg32(volatile u32 *const addr,
140 140
141 __asm__ __volatile__( 141 __asm__ __volatile__(
142 " .set push \n" 142 " .set push \n"
143 " .set mips3 \n" 143 " .set arch=r4000 \n"
144 "1: ll %0, %1 # toggle_reg32 \n" 144 "1: ll %0, %1 # toggle_reg32 \n"
145 " xor %0, %2 \n" 145 " xor %0, %2 \n"
146 " sc %0, %1 \n" 146 " sc %0, %1 \n"
@@ -216,7 +216,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
216#define custom_read_reg32(address, tmp) \ 216#define custom_read_reg32(address, tmp) \
217 __asm__ __volatile__( \ 217 __asm__ __volatile__( \
218 " .set push \n" \ 218 " .set push \n" \
219 " .set mips3 \n" \ 219 " .set arch=r4000 \n" \
220 "1: ll %0, %1 #custom_read_reg32 \n" \ 220 "1: ll %0, %1 #custom_read_reg32 \n" \
221 " .set pop \n" \ 221 " .set pop \n" \
222 : "=r" (tmp), "=m" (*address) \ 222 : "=r" (tmp), "=m" (*address) \
@@ -225,7 +225,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
225#define custom_write_reg32(address, tmp) \ 225#define custom_write_reg32(address, tmp) \
226 __asm__ __volatile__( \ 226 __asm__ __volatile__( \
227 " .set push \n" \ 227 " .set push \n" \
228 " .set mips3 \n" \ 228 " .set arch=r4000 \n" \
229 " sc %0, %1 #custom_write_reg32 \n" \ 229 " sc %0, %1 #custom_write_reg32 \n" \
230 " "__beqz"%0, 1b \n" \ 230 " "__beqz"%0, 1b \n" \
231 " nop \n" \ 231 " nop \n" \
diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h
index 722bc889eab5..fd9774269a5e 100644
--- a/arch/mips/include/asm/mips-boards/malta.h
+++ b/arch/mips/include/asm/mips-boards/malta.h
@@ -64,6 +64,11 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
64#define GIC_ADDRSPACE_SZ (128 * 1024) 64#define GIC_ADDRSPACE_SZ (128 * 1024)
65 65
66/* 66/*
67 * CPC Specific definitions
68 */
69#define CPC_BASE_ADDR 0x1bde0000
70
71/*
67 * MSC01 BIU Specific definitions 72 * MSC01 BIU Specific definitions
68 * FIXME : These should be elsewhere ? 73 * FIXME : These should be elsewhere ?
69 */ 74 */
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index 836e2ede24de..9cf54041d416 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -50,4 +50,9 @@
50#define PIIX4_FUNC1_IDETIM_SECONDARY_HI 0x43 50#define PIIX4_FUNC1_IDETIM_SECONDARY_HI 0x43
51#define PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN (1 << 7) 51#define PIIX4_FUNC1_IDETIM_SECONDARY_HI_IDE_DECODE_EN (1 << 7)
52 52
53/* Power Management Configuration Space */
54#define PIIX4_FUNC3_PMBA 0x40
55#define PIIX4_FUNC3_PMREGMISC 0x80
56#define PIIX4_FUNC3_PMREGMISC_EN (1 << 0)
57
53#endif /* __ASM_MIPS_BOARDS_PIIX4_H */ 58#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
new file mode 100644
index 000000000000..6a9d2dd005ca
--- /dev/null
+++ b/arch/mips/include/asm/mips-cm.h
@@ -0,0 +1,322 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MIPS_ASM_MIPS_CM_H__
12#define __MIPS_ASM_MIPS_CM_H__
13
14#include <linux/io.h>
15#include <linux/types.h>
16
17/* The base address of the CM GCR block */
18extern void __iomem *mips_cm_base;
19
20/* The base address of the CM L2-only sync region */
21extern void __iomem *mips_cm_l2sync_base;
22
23/**
24 * __mips_cm_phys_base - retrieve the physical base address of the CM
25 *
26 * This function returns the physical base address of the Coherence Manager
27 * global control block, or 0 if no Coherence Manager is present. It provides
28 * a default implementation which reads the CMGCRBase register where available,
29 * and may be overriden by platforms which determine this address in a
30 * different way by defining a function with the same prototype except for the
31 * name mips_cm_phys_base (without underscores).
32 */
33extern phys_t __mips_cm_phys_base(void);
34
35/**
36 * mips_cm_probe - probe for a Coherence Manager
37 *
38 * Attempt to detect the presence of a Coherence Manager. Returns 0 if a CM
39 * is successfully detected, else -errno.
40 */
41#ifdef CONFIG_MIPS_CM
42extern int mips_cm_probe(void);
43#else
44static inline int mips_cm_probe(void)
45{
46 return -ENODEV;
47}
48#endif
49
50/**
51 * mips_cm_present - determine whether a Coherence Manager is present
52 *
53 * Returns true if a CM is present in the system, else false.
54 */
55static inline bool mips_cm_present(void)
56{
57#ifdef CONFIG_MIPS_CM
58 return mips_cm_base != NULL;
59#else
60 return false;
61#endif
62}
63
64/**
65 * mips_cm_has_l2sync - determine whether an L2-only sync region is present
66 *
67 * Returns true if the system implements an L2-only sync region, else false.
68 */
69static inline bool mips_cm_has_l2sync(void)
70{
71#ifdef CONFIG_MIPS_CM
72 return mips_cm_l2sync_base != NULL;
73#else
74 return false;
75#endif
76}
77
78/* Offsets to register blocks from the CM base address */
79#define MIPS_CM_GCB_OFS 0x0000 /* Global Control Block */
80#define MIPS_CM_CLCB_OFS 0x2000 /* Core Local Control Block */
81#define MIPS_CM_COCB_OFS 0x4000 /* Core Other Control Block */
82#define MIPS_CM_GDB_OFS 0x6000 /* Global Debug Block */
83
84/* Total size of the CM memory mapped registers */
85#define MIPS_CM_GCR_SIZE 0x8000
86
87/* Size of the L2-only sync region */
88#define MIPS_CM_L2SYNC_SIZE 0x1000
89
90/* Macros to ease the creation of register access functions */
91#define BUILD_CM_R_(name, off) \
92static inline u32 *addr_gcr_##name(void) \
93{ \
94 return (u32 *)(mips_cm_base + (off)); \
95} \
96 \
97static inline u32 read_gcr_##name(void) \
98{ \
99 return __raw_readl(addr_gcr_##name()); \
100}
101
102#define BUILD_CM__W(name, off) \
103static inline void write_gcr_##name(u32 value) \
104{ \
105 __raw_writel(value, addr_gcr_##name()); \
106}
107
108#define BUILD_CM_RW(name, off) \
109 BUILD_CM_R_(name, off) \
110 BUILD_CM__W(name, off)
111
112#define BUILD_CM_Cx_R_(name, off) \
113 BUILD_CM_R_(cl_##name, MIPS_CM_CLCB_OFS + (off)) \
114 BUILD_CM_R_(co_##name, MIPS_CM_COCB_OFS + (off))
115
116#define BUILD_CM_Cx__W(name, off) \
117 BUILD_CM__W(cl_##name, MIPS_CM_CLCB_OFS + (off)) \
118 BUILD_CM__W(co_##name, MIPS_CM_COCB_OFS + (off))
119
120#define BUILD_CM_Cx_RW(name, off) \
121 BUILD_CM_Cx_R_(name, off) \
122 BUILD_CM_Cx__W(name, off)
123
124/* GCB register accessor functions */
125BUILD_CM_R_(config, MIPS_CM_GCB_OFS + 0x00)
126BUILD_CM_RW(base, MIPS_CM_GCB_OFS + 0x08)
127BUILD_CM_RW(access, MIPS_CM_GCB_OFS + 0x20)
128BUILD_CM_R_(rev, MIPS_CM_GCB_OFS + 0x30)
129BUILD_CM_RW(error_mask, MIPS_CM_GCB_OFS + 0x40)
130BUILD_CM_RW(error_cause, MIPS_CM_GCB_OFS + 0x48)
131BUILD_CM_RW(error_addr, MIPS_CM_GCB_OFS + 0x50)
132BUILD_CM_RW(error_mult, MIPS_CM_GCB_OFS + 0x58)
133BUILD_CM_RW(l2_only_sync_base, MIPS_CM_GCB_OFS + 0x70)
134BUILD_CM_RW(gic_base, MIPS_CM_GCB_OFS + 0x80)
135BUILD_CM_RW(cpc_base, MIPS_CM_GCB_OFS + 0x88)
136BUILD_CM_RW(reg0_base, MIPS_CM_GCB_OFS + 0x90)
137BUILD_CM_RW(reg0_mask, MIPS_CM_GCB_OFS + 0x98)
138BUILD_CM_RW(reg1_base, MIPS_CM_GCB_OFS + 0xa0)
139BUILD_CM_RW(reg1_mask, MIPS_CM_GCB_OFS + 0xa8)
140BUILD_CM_RW(reg2_base, MIPS_CM_GCB_OFS + 0xb0)
141BUILD_CM_RW(reg2_mask, MIPS_CM_GCB_OFS + 0xb8)
142BUILD_CM_RW(reg3_base, MIPS_CM_GCB_OFS + 0xc0)
143BUILD_CM_RW(reg3_mask, MIPS_CM_GCB_OFS + 0xc8)
144BUILD_CM_R_(gic_status, MIPS_CM_GCB_OFS + 0xd0)
145BUILD_CM_R_(cpc_status, MIPS_CM_GCB_OFS + 0xf0)
146
147/* Core Local & Core Other register accessor functions */
148BUILD_CM_Cx_RW(reset_release, 0x00)
149BUILD_CM_Cx_RW(coherence, 0x08)
150BUILD_CM_Cx_R_(config, 0x10)
151BUILD_CM_Cx_RW(other, 0x18)
152BUILD_CM_Cx_RW(reset_base, 0x20)
153BUILD_CM_Cx_R_(id, 0x28)
154BUILD_CM_Cx_RW(reset_ext_base, 0x30)
155BUILD_CM_Cx_R_(tcid_0_priority, 0x40)
156BUILD_CM_Cx_R_(tcid_1_priority, 0x48)
157BUILD_CM_Cx_R_(tcid_2_priority, 0x50)
158BUILD_CM_Cx_R_(tcid_3_priority, 0x58)
159BUILD_CM_Cx_R_(tcid_4_priority, 0x60)
160BUILD_CM_Cx_R_(tcid_5_priority, 0x68)
161BUILD_CM_Cx_R_(tcid_6_priority, 0x70)
162BUILD_CM_Cx_R_(tcid_7_priority, 0x78)
163BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
164
165/* GCR_CONFIG register fields */
166#define CM_GCR_CONFIG_NUMIOCU_SHF 8
167#define CM_GCR_CONFIG_NUMIOCU_MSK (_ULCAST_(0xf) << 8)
168#define CM_GCR_CONFIG_PCORES_SHF 0
169#define CM_GCR_CONFIG_PCORES_MSK (_ULCAST_(0xff) << 0)
170
171/* GCR_BASE register fields */
172#define CM_GCR_BASE_GCRBASE_SHF 15
173#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
174#define CM_GCR_BASE_CMDEFTGT_SHF 0
175#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
176#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
177#define CM_GCR_BASE_CMDEFTGT_MEM 1
178#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
179#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
180
181/* GCR_ACCESS register fields */
182#define CM_GCR_ACCESS_ACCESSEN_SHF 0
183#define CM_GCR_ACCESS_ACCESSEN_MSK (_ULCAST_(0xff) << 0)
184
185/* GCR_REV register fields */
186#define CM_GCR_REV_MAJOR_SHF 8
187#define CM_GCR_REV_MAJOR_MSK (_ULCAST_(0xff) << 8)
188#define CM_GCR_REV_MINOR_SHF 0
189#define CM_GCR_REV_MINOR_MSK (_ULCAST_(0xff) << 0)
190
191/* GCR_ERROR_CAUSE register fields */
192#define CM_GCR_ERROR_CAUSE_ERRTYPE_SHF 27
193#define CM_GCR_ERROR_CAUSE_ERRTYPE_MSK (_ULCAST_(0x1f) << 27)
194#define CM_GCR_ERROR_CAUSE_ERRINFO_SHF 0
195#define CM_GCR_ERROR_CAUSE_ERRINGO_MSK (_ULCAST_(0x7ffffff) << 0)
196
197/* GCR_ERROR_MULT register fields */
198#define CM_GCR_ERROR_MULT_ERR2ND_SHF 0
199#define CM_GCR_ERROR_MULT_ERR2ND_MSK (_ULCAST_(0x1f) << 0)
200
201/* GCR_L2_ONLY_SYNC_BASE register fields */
202#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_SHF 12
203#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK (_ULCAST_(0xfffff) << 12)
204#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_SHF 0
205#define CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK (_ULCAST_(0x1) << 0)
206
207/* GCR_GIC_BASE register fields */
208#define CM_GCR_GIC_BASE_GICBASE_SHF 17
209#define CM_GCR_GIC_BASE_GICBASE_MSK (_ULCAST_(0x7fff) << 17)
210#define CM_GCR_GIC_BASE_GICEN_SHF 0
211#define CM_GCR_GIC_BASE_GICEN_MSK (_ULCAST_(0x1) << 0)
212
213/* GCR_CPC_BASE register fields */
214#define CM_GCR_CPC_BASE_CPCBASE_SHF 17
215#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x7fff) << 17)
216#define CM_GCR_CPC_BASE_CPCEN_SHF 0
217#define CM_GCR_CPC_BASE_CPCEN_MSK (_ULCAST_(0x1) << 0)
218
219/* GCR_REGn_BASE register fields */
220#define CM_GCR_REGn_BASE_BASEADDR_SHF 16
221#define CM_GCR_REGn_BASE_BASEADDR_MSK (_ULCAST_(0xffff) << 16)
222
223/* GCR_REGn_MASK register fields */
224#define CM_GCR_REGn_MASK_ADDRMASK_SHF 16
225#define CM_GCR_REGn_MASK_ADDRMASK_MSK (_ULCAST_(0xffff) << 16)
226#define CM_GCR_REGn_MASK_CCAOVR_SHF 5
227#define CM_GCR_REGn_MASK_CCAOVR_MSK (_ULCAST_(0x3) << 5)
228#define CM_GCR_REGn_MASK_CCAOVREN_SHF 4
229#define CM_GCR_REGn_MASK_CCAOVREN_MSK (_ULCAST_(0x1) << 4)
230#define CM_GCR_REGn_MASK_DROPL2_SHF 2
231#define CM_GCR_REGn_MASK_DROPL2_MSK (_ULCAST_(0x1) << 2)
232#define CM_GCR_REGn_MASK_CMTGT_SHF 0
233#define CM_GCR_REGn_MASK_CMTGT_MSK (_ULCAST_(0x3) << 0)
234#define CM_GCR_REGn_MASK_CMTGT_DISABLED (_ULCAST_(0x0) << 0)
235#define CM_GCR_REGn_MASK_CMTGT_MEM (_ULCAST_(0x1) << 0)
236#define CM_GCR_REGn_MASK_CMTGT_IOCU0 (_ULCAST_(0x2) << 0)
237#define CM_GCR_REGn_MASK_CMTGT_IOCU1 (_ULCAST_(0x3) << 0)
238
239/* GCR_GIC_STATUS register fields */
240#define CM_GCR_GIC_STATUS_EX_SHF 0
241#define CM_GCR_GIC_STATUS_EX_MSK (_ULCAST_(0x1) << 0)
242
243/* GCR_CPC_STATUS register fields */
244#define CM_GCR_CPC_STATUS_EX_SHF 0
245#define CM_GCR_CPC_STATUS_EX_MSK (_ULCAST_(0x1) << 0)
246
247/* GCR_Cx_COHERENCE register fields */
248#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_SHF 0
249#define CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK (_ULCAST_(0xff) << 0)
250
251/* GCR_Cx_CONFIG register fields */
252#define CM_GCR_Cx_CONFIG_IOCUTYPE_SHF 10
253#define CM_GCR_Cx_CONFIG_IOCUTYPE_MSK (_ULCAST_(0x3) << 10)
254#define CM_GCR_Cx_CONFIG_PVPE_SHF 0
255#define CM_GCR_Cx_CONFIG_PVPE_MSK (_ULCAST_(0x1ff) << 0)
256
257/* GCR_Cx_OTHER register fields */
258#define CM_GCR_Cx_OTHER_CORENUM_SHF 16
259#define CM_GCR_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xffff) << 16)
260
261/* GCR_Cx_RESET_BASE register fields */
262#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_SHF 12
263#define CM_GCR_Cx_RESET_BASE_BEVEXCBASE_MSK (_ULCAST_(0xfffff) << 12)
264
265/* GCR_Cx_RESET_EXT_BASE register fields */
266#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_SHF 31
267#define CM_GCR_Cx_RESET_EXT_BASE_EVARESET_MSK (_ULCAST_(0x1) << 31)
268#define CM_GCR_Cx_RESET_EXT_BASE_UEB_SHF 30
269#define CM_GCR_Cx_RESET_EXT_BASE_UEB_MSK (_ULCAST_(0x1) << 30)
270#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_SHF 20
271#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCMASK_MSK (_ULCAST_(0xff) << 20)
272#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_SHF 1
273#define CM_GCR_Cx_RESET_EXT_BASE_BEVEXCPA_MSK (_ULCAST_(0x7f) << 1)
274#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_SHF 0
275#define CM_GCR_Cx_RESET_EXT_BASE_PRESENT_MSK (_ULCAST_(0x1) << 0)
276
277/**
278 * mips_cm_numcores - return the number of cores present in the system
279 *
280 * Returns the value of the PCORES field of the GCR_CONFIG register plus 1, or
281 * zero if no Coherence Manager is present.
282 */
283static inline unsigned mips_cm_numcores(void)
284{
285 if (!mips_cm_present())
286 return 0;
287
288 return ((read_gcr_config() & CM_GCR_CONFIG_PCORES_MSK)
289 >> CM_GCR_CONFIG_PCORES_SHF) + 1;
290}
291
292/**
293 * mips_cm_numiocu - return the number of IOCUs present in the system
294 *
295 * Returns the value of the NUMIOCU field of the GCR_CONFIG register, or zero
296 * if no Coherence Manager is present.
297 */
298static inline unsigned mips_cm_numiocu(void)
299{
300 if (!mips_cm_present())
301 return 0;
302
303 return (read_gcr_config() & CM_GCR_CONFIG_NUMIOCU_MSK)
304 >> CM_GCR_CONFIG_NUMIOCU_SHF;
305}
306
307/**
308 * mips_cm_l2sync - perform an L2-only sync operation
309 *
310 * If an L2-only sync region is present in the system then this function
311 * performs and L2-only sync and returns zero. Otherwise it returns -ENODEV.
312 */
313static inline int mips_cm_l2sync(void)
314{
315 if (!mips_cm_has_l2sync())
316 return -ENODEV;
317
318 writel(0, mips_cm_l2sync_base);
319 return 0;
320}
321
322#endif /* __MIPS_ASM_MIPS_CM_H__ */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
new file mode 100644
index 000000000000..988507e46d42
--- /dev/null
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -0,0 +1,150 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MIPS_ASM_MIPS_CPC_H__
12#define __MIPS_ASM_MIPS_CPC_H__
13
14#include <linux/io.h>
15#include <linux/types.h>
16
17/* The base address of the CPC registers */
18extern void __iomem *mips_cpc_base;
19
20/**
21 * mips_cpc_default_phys_base - retrieve the default physical base address of
22 * the CPC
23 *
24 * Returns the default physical base address of the Cluster Power Controller
25 * memory mapped registers. This is platform dependant & must therefore be
26 * implemented per-platform.
27 */
28extern phys_t mips_cpc_default_phys_base(void);
29
30/**
31 * mips_cpc_phys_base - retrieve the physical base address of the CPC
32 *
33 * This function returns the physical base address of the Cluster Power
34 * Controller memory mapped registers, or 0 if no Cluster Power Controller
35 * is present. It may be overriden by individual platforms which determine
36 * this address in a different way.
37 */
38extern phys_t __weak mips_cpc_phys_base(void);
39
40/**
41 * mips_cpc_probe - probe for a Cluster Power Controller
42 *
43 * Attempt to detect the presence of a Cluster Power Controller. Returns 0 if
44 * a CPC is successfully detected, else -errno.
45 */
46#ifdef CONFIG_MIPS_CPC
47extern int mips_cpc_probe(void);
48#else
49static inline int mips_cpc_probe(void)
50{
51 return -ENODEV;
52}
53#endif
54
55/**
56 * mips_cpc_present - determine whether a Cluster Power Controller is present
57 *
58 * Returns true if a CPC is present in the system, else false.
59 */
60static inline bool mips_cpc_present(void)
61{
62#ifdef CONFIG_MIPS_CPC
63 return mips_cpc_base != NULL;
64#else
65 return false;
66#endif
67}
68
69/* Offsets from the CPC base address to various control blocks */
70#define MIPS_CPC_GCB_OFS 0x0000
71#define MIPS_CPC_CLCB_OFS 0x2000
72#define MIPS_CPC_COCB_OFS 0x4000
73
74/* Macros to ease the creation of register access functions */
75#define BUILD_CPC_R_(name, off) \
76static inline u32 read_cpc_##name(void) \
77{ \
78 return __raw_readl(mips_cpc_base + (off)); \
79}
80
81#define BUILD_CPC__W(name, off) \
82static inline void write_cpc_##name(u32 value) \
83{ \
84 __raw_writel(value, mips_cpc_base + (off)); \
85}
86
87#define BUILD_CPC_RW(name, off) \
88 BUILD_CPC_R_(name, off) \
89 BUILD_CPC__W(name, off)
90
91#define BUILD_CPC_Cx_R_(name, off) \
92 BUILD_CPC_R_(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \
93 BUILD_CPC_R_(co_##name, MIPS_CPC_COCB_OFS + (off))
94
95#define BUILD_CPC_Cx__W(name, off) \
96 BUILD_CPC__W(cl_##name, MIPS_CPC_CLCB_OFS + (off)) \
97 BUILD_CPC__W(co_##name, MIPS_CPC_COCB_OFS + (off))
98
99#define BUILD_CPC_Cx_RW(name, off) \
100 BUILD_CPC_Cx_R_(name, off) \
101 BUILD_CPC_Cx__W(name, off)
102
103/* GCB register accessor functions */
104BUILD_CPC_RW(access, MIPS_CPC_GCB_OFS + 0x00)
105BUILD_CPC_RW(seqdel, MIPS_CPC_GCB_OFS + 0x08)
106BUILD_CPC_RW(rail, MIPS_CPC_GCB_OFS + 0x10)
107BUILD_CPC_RW(resetlen, MIPS_CPC_GCB_OFS + 0x18)
108BUILD_CPC_R_(revision, MIPS_CPC_GCB_OFS + 0x20)
109
110/* Core Local & Core Other accessor functions */
111BUILD_CPC_Cx_RW(cmd, 0x00)
112BUILD_CPC_Cx_RW(stat_conf, 0x08)
113BUILD_CPC_Cx_RW(other, 0x10)
114
115/* CPC_Cx_CMD register fields */
116#define CPC_Cx_CMD_SHF 0
117#define CPC_Cx_CMD_MSK (_ULCAST_(0xf) << 0)
118#define CPC_Cx_CMD_CLOCKOFF (_ULCAST_(0x1) << 0)
119#define CPC_Cx_CMD_PWRDOWN (_ULCAST_(0x2) << 0)
120#define CPC_Cx_CMD_PWRUP (_ULCAST_(0x3) << 0)
121#define CPC_Cx_CMD_RESET (_ULCAST_(0x4) << 0)
122
123/* CPC_Cx_STAT_CONF register fields */
124#define CPC_Cx_STAT_CONF_PWRUPE_SHF 23
125#define CPC_Cx_STAT_CONF_PWRUPE_MSK (_ULCAST_(0x1) << 23)
126#define CPC_Cx_STAT_CONF_SEQSTATE_SHF 19
127#define CPC_Cx_STAT_CONF_SEQSTATE_MSK (_ULCAST_(0xf) << 19)
128#define CPC_Cx_STAT_CONF_SEQSTATE_D0 (_ULCAST_(0x0) << 19)
129#define CPC_Cx_STAT_CONF_SEQSTATE_U0 (_ULCAST_(0x1) << 19)
130#define CPC_Cx_STAT_CONF_SEQSTATE_U1 (_ULCAST_(0x2) << 19)
131#define CPC_Cx_STAT_CONF_SEQSTATE_U2 (_ULCAST_(0x3) << 19)
132#define CPC_Cx_STAT_CONF_SEQSTATE_U3 (_ULCAST_(0x4) << 19)
133#define CPC_Cx_STAT_CONF_SEQSTATE_U4 (_ULCAST_(0x5) << 19)
134#define CPC_Cx_STAT_CONF_SEQSTATE_U5 (_ULCAST_(0x6) << 19)
135#define CPC_Cx_STAT_CONF_SEQSTATE_U6 (_ULCAST_(0x7) << 19)
136#define CPC_Cx_STAT_CONF_SEQSTATE_D1 (_ULCAST_(0x8) << 19)
137#define CPC_Cx_STAT_CONF_SEQSTATE_D3 (_ULCAST_(0x9) << 19)
138#define CPC_Cx_STAT_CONF_SEQSTATE_D2 (_ULCAST_(0xa) << 19)
139#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_SHF 17
140#define CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK (_ULCAST_(0x1) << 17)
141#define CPC_Cx_STAT_CONF_PWRDN_IMPL_SHF 16
142#define CPC_Cx_STAT_CONF_PWRDN_IMPL_MSK (_ULCAST_(0x1) << 16)
143#define CPC_Cx_STAT_CONF_EJTAG_PROBE_SHF 15
144#define CPC_Cx_STAT_CONF_EJTAG_PROBE_MSK (_ULCAST_(0x1) << 15)
145
146/* CPC_Cx_OTHER register fields */
147#define CPC_Cx_OTHER_CORENUM_SHF 16
148#define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16)
149
150#endif /* __MIPS_ASM_MIPS_CPC_H__ */
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index ac7935203f89..a3df0c3faa0e 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -18,7 +18,12 @@ extern cpumask_t mt_fpu_cpumask;
18extern unsigned long mt_fpemul_threshold; 18extern unsigned long mt_fpemul_threshold;
19 19
20extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value); 20extern void mips_mt_regdump(unsigned long previous_mvpcontrol_value);
21
22#ifdef CONFIG_MIPS_MT
21extern void mips_mt_set_cpuoptions(void); 23extern void mips_mt_set_cpuoptions(void);
24#else
25static inline void mips_mt_set_cpuoptions(void) { }
26#endif
22 27
23struct class; 28struct class;
24extern struct class *mt_class; 29extern struct class *mt_class;
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 38b7704ee376..6efa79a27b6a 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -176,6 +176,17 @@
176 176
177#ifndef __ASSEMBLY__ 177#ifndef __ASSEMBLY__
178 178
179static inline unsigned core_nvpes(void)
180{
181 unsigned conf0;
182
183 if (!cpu_has_mipsmt)
184 return 1;
185
186 conf0 = read_c0_mvpconf0();
187 return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
188}
189
179static inline unsigned int dvpe(void) 190static inline unsigned int dvpe(void)
180{ 191{
181 int res = 0; 192 int res = 0;
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index bbc3dd4294bc..3e025b5311db 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -568,11 +568,23 @@
568#define MIPS_CONF1_PC (_ULCAST_(1) << 4) 568#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
569#define MIPS_CONF1_MD (_ULCAST_(1) << 5) 569#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
570#define MIPS_CONF1_C2 (_ULCAST_(1) << 6) 570#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
571#define MIPS_CONF1_DA_SHF 7
572#define MIPS_CONF1_DA_SZ 3
571#define MIPS_CONF1_DA (_ULCAST_(7) << 7) 573#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
574#define MIPS_CONF1_DL_SHF 10
575#define MIPS_CONF1_DL_SZ 3
572#define MIPS_CONF1_DL (_ULCAST_(7) << 10) 576#define MIPS_CONF1_DL (_ULCAST_(7) << 10)
577#define MIPS_CONF1_DS_SHF 13
578#define MIPS_CONF1_DS_SZ 3
573#define MIPS_CONF1_DS (_ULCAST_(7) << 13) 579#define MIPS_CONF1_DS (_ULCAST_(7) << 13)
580#define MIPS_CONF1_IA_SHF 16
581#define MIPS_CONF1_IA_SZ 3
574#define MIPS_CONF1_IA (_ULCAST_(7) << 16) 582#define MIPS_CONF1_IA (_ULCAST_(7) << 16)
583#define MIPS_CONF1_IL_SHF 19
584#define MIPS_CONF1_IL_SZ 3
575#define MIPS_CONF1_IL (_ULCAST_(7) << 19) 585#define MIPS_CONF1_IL (_ULCAST_(7) << 19)
586#define MIPS_CONF1_IS_SHF 22
587#define MIPS_CONF1_IS_SZ 3
576#define MIPS_CONF1_IS (_ULCAST_(7) << 22) 588#define MIPS_CONF1_IS (_ULCAST_(7) << 22)
577#define MIPS_CONF1_TLBS_SHIFT (25) 589#define MIPS_CONF1_TLBS_SHIFT (25)
578#define MIPS_CONF1_TLBS_SIZE (6) 590#define MIPS_CONF1_TLBS_SIZE (6)
@@ -653,9 +665,16 @@
653 665
654#define MIPS_CONF7_RPS (_ULCAST_(1) << 2) 666#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
655 667
668#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
669#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
670
656/* EntryHI bit definition */ 671/* EntryHI bit definition */
657#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10) 672#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10)
658 673
674/* CMGCRBase bit definitions */
675#define MIPS_CMGCRB_BASE 11
676#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
677
659/* 678/*
660 * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register. 679 * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
661 */ 680 */
@@ -1010,6 +1029,8 @@ do { \
1010 1029
1011#define read_c0_prid() __read_32bit_c0_register($15, 0) 1030#define read_c0_prid() __read_32bit_c0_register($15, 0)
1012 1031
1032#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3)
1033
1013#define read_c0_config() __read_32bit_c0_register($16, 0) 1034#define read_c0_config() __read_32bit_c0_register($16, 0)
1014#define read_c0_config1() __read_32bit_c0_register($16, 1) 1035#define read_c0_config1() __read_32bit_c0_register($16, 1)
1015#define read_c0_config2() __read_32bit_c0_register($16, 2) 1036#define read_c0_config2() __read_32bit_c0_register($16, 2)
@@ -1883,6 +1904,7 @@ change_c0_##name(unsigned int change, unsigned int newbits) \
1883__BUILD_SET_C0(status) 1904__BUILD_SET_C0(status)
1884__BUILD_SET_C0(cause) 1905__BUILD_SET_C0(cause)
1885__BUILD_SET_C0(config) 1906__BUILD_SET_C0(config)
1907__BUILD_SET_C0(config5)
1886__BUILD_SET_C0(intcontrol) 1908__BUILD_SET_C0(intcontrol)
1887__BUILD_SET_C0(intctl) 1909__BUILD_SET_C0(intctl)
1888__BUILD_SET_C0(srsmap) 1910__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 44b705d08262..c2edae382d5d 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -126,6 +126,8 @@ search_module_dbetables(unsigned long addr)
126#define MODULE_PROC_FAMILY "LOONGSON1 " 126#define MODULE_PROC_FAMILY "LOONGSON1 "
127#elif defined CONFIG_CPU_LOONGSON2 127#elif defined CONFIG_CPU_LOONGSON2
128#define MODULE_PROC_FAMILY "LOONGSON2 " 128#define MODULE_PROC_FAMILY "LOONGSON2 "
129#elif defined CONFIG_CPU_LOONGSON3
130#define MODULE_PROC_FAMILY "LOONGSON3 "
129#elif defined CONFIG_CPU_CAVIUM_OCTEON 131#elif defined CONFIG_CPU_CAVIUM_OCTEON
130#define MODULE_PROC_FAMILY "OCTEON " 132#define MODULE_PROC_FAMILY "OCTEON "
131#elif defined CONFIG_CPU_XLR 133#elif defined CONFIG_CPU_XLR
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
new file mode 100644
index 000000000000..a2aba6c3ec05
--- /dev/null
+++ b/arch/mips/include/asm/msa.h
@@ -0,0 +1,203 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10#ifndef _ASM_MSA_H
11#define _ASM_MSA_H
12
13#include <asm/mipsregs.h>
14
15extern void _save_msa(struct task_struct *);
16extern void _restore_msa(struct task_struct *);
17
18static inline void enable_msa(void)
19{
20 if (cpu_has_msa) {
21 set_c0_config5(MIPS_CONF5_MSAEN);
22 enable_fpu_hazard();
23 }
24}
25
26static inline void disable_msa(void)
27{
28 if (cpu_has_msa) {
29 clear_c0_config5(MIPS_CONF5_MSAEN);
30 disable_fpu_hazard();
31 }
32}
33
34static inline int is_msa_enabled(void)
35{
36 if (!cpu_has_msa)
37 return 0;
38
39 return read_c0_config5() & MIPS_CONF5_MSAEN;
40}
41
42static inline int thread_msa_context_live(void)
43{
44 /*
45 * Check cpu_has_msa only if it's a constant. This will allow the
46 * compiler to optimise out code for CPUs without MSA without adding
47 * an extra redundant check for CPUs with MSA.
48 */
49 if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
50 return 0;
51
52 return test_thread_flag(TIF_MSA_CTX_LIVE);
53}
54
55static inline void save_msa(struct task_struct *t)
56{
57 if (cpu_has_msa)
58 _save_msa(t);
59}
60
61static inline void restore_msa(struct task_struct *t)
62{
63 if (cpu_has_msa)
64 _restore_msa(t);
65}
66
67#ifdef TOOLCHAIN_SUPPORTS_MSA
68
69#define __BUILD_MSA_CTL_REG(name, cs) \
70static inline unsigned int read_msa_##name(void) \
71{ \
72 unsigned int reg; \
73 __asm__ __volatile__( \
74 " .set push\n" \
75 " .set msa\n" \
76 " cfcmsa %0, $" #cs "\n" \
77 " .set pop\n" \
78 : "=r"(reg)); \
79 return reg; \
80} \
81 \
82static inline void write_msa_##name(unsigned int val) \
83{ \
84 __asm__ __volatile__( \
85 " .set push\n" \
86 " .set msa\n" \
87 " cfcmsa $" #cs ", %0\n" \
88 " .set pop\n" \
89 : : "r"(val)); \
90}
91
92#else /* !TOOLCHAIN_SUPPORTS_MSA */
93
94/*
95 * Define functions using .word for the c[ft]cmsa instructions in order to
96 * allow compilation with toolchains that do not support MSA. Once all
97 * toolchains in use support MSA these can be removed.
98 */
99
100#define __BUILD_MSA_CTL_REG(name, cs) \
101static inline unsigned int read_msa_##name(void) \
102{ \
103 unsigned int reg; \
104 __asm__ __volatile__( \
105 " .set push\n" \
106 " .set noat\n" \
107 " .word 0x787e0059 | (" #cs " << 11)\n" \
108 " move %0, $1\n" \
109 " .set pop\n" \
110 : "=r"(reg)); \
111 return reg; \
112} \
113 \
114static inline void write_msa_##name(unsigned int val) \
115{ \
116 __asm__ __volatile__( \
117 " .set push\n" \
118 " .set noat\n" \
119 " move $1, %0\n" \
120 " .word 0x783e0819 | (" #cs " << 6)\n" \
121 " .set pop\n" \
122 : : "r"(val)); \
123}
124
125#endif /* !TOOLCHAIN_SUPPORTS_MSA */
126
127#define MSA_IR 0
128#define MSA_CSR 1
129#define MSA_ACCESS 2
130#define MSA_SAVE 3
131#define MSA_MODIFY 4
132#define MSA_REQUEST 5
133#define MSA_MAP 6
134#define MSA_UNMAP 7
135
136__BUILD_MSA_CTL_REG(ir, 0)
137__BUILD_MSA_CTL_REG(csr, 1)
138__BUILD_MSA_CTL_REG(access, 2)
139__BUILD_MSA_CTL_REG(save, 3)
140__BUILD_MSA_CTL_REG(modify, 4)
141__BUILD_MSA_CTL_REG(request, 5)
142__BUILD_MSA_CTL_REG(map, 6)
143__BUILD_MSA_CTL_REG(unmap, 7)
144
145/* MSA Implementation Register (MSAIR) */
146#define MSA_IR_REVB 0
147#define MSA_IR_REVF (_ULCAST_(0xff) << MSA_IR_REVB)
148#define MSA_IR_PROCB 8
149#define MSA_IR_PROCF (_ULCAST_(0xff) << MSA_IR_PROCB)
150#define MSA_IR_WRPB 16
151#define MSA_IR_WRPF (_ULCAST_(0x1) << MSA_IR_WRPB)
152
153/* MSA Control & Status Register (MSACSR) */
154#define MSA_CSR_RMB 0
155#define MSA_CSR_RMF (_ULCAST_(0x3) << MSA_CSR_RMB)
156#define MSA_CSR_RM_NEAREST 0
157#define MSA_CSR_RM_TO_ZERO 1
158#define MSA_CSR_RM_TO_POS 2
159#define MSA_CSR_RM_TO_NEG 3
160#define MSA_CSR_FLAGSB 2
161#define MSA_CSR_FLAGSF (_ULCAST_(0x1f) << MSA_CSR_FLAGSB)
162#define MSA_CSR_FLAGS_IB 2
163#define MSA_CSR_FLAGS_IF (_ULCAST_(0x1) << MSA_CSR_FLAGS_IB)
164#define MSA_CSR_FLAGS_UB 3
165#define MSA_CSR_FLAGS_UF (_ULCAST_(0x1) << MSA_CSR_FLAGS_UB)
166#define MSA_CSR_FLAGS_OB 4
167#define MSA_CSR_FLAGS_OF (_ULCAST_(0x1) << MSA_CSR_FLAGS_OB)
168#define MSA_CSR_FLAGS_ZB 5
169#define MSA_CSR_FLAGS_ZF (_ULCAST_(0x1) << MSA_CSR_FLAGS_ZB)
170#define MSA_CSR_FLAGS_VB 6
171#define MSA_CSR_FLAGS_VF (_ULCAST_(0x1) << MSA_CSR_FLAGS_VB)
172#define MSA_CSR_ENABLESB 7
173#define MSA_CSR_ENABLESF (_ULCAST_(0x1f) << MSA_CSR_ENABLESB)
174#define MSA_CSR_ENABLES_IB 7
175#define MSA_CSR_ENABLES_IF (_ULCAST_(0x1) << MSA_CSR_ENABLES_IB)
176#define MSA_CSR_ENABLES_UB 8
177#define MSA_CSR_ENABLES_UF (_ULCAST_(0x1) << MSA_CSR_ENABLES_UB)
178#define MSA_CSR_ENABLES_OB 9
179#define MSA_CSR_ENABLES_OF (_ULCAST_(0x1) << MSA_CSR_ENABLES_OB)
180#define MSA_CSR_ENABLES_ZB 10
181#define MSA_CSR_ENABLES_ZF (_ULCAST_(0x1) << MSA_CSR_ENABLES_ZB)
182#define MSA_CSR_ENABLES_VB 11
183#define MSA_CSR_ENABLES_VF (_ULCAST_(0x1) << MSA_CSR_ENABLES_VB)
184#define MSA_CSR_CAUSEB 12
185#define MSA_CSR_CAUSEF (_ULCAST_(0x3f) << MSA_CSR_CAUSEB)
186#define MSA_CSR_CAUSE_IB 12
187#define MSA_CSR_CAUSE_IF (_ULCAST_(0x1) << MSA_CSR_CAUSE_IB)
188#define MSA_CSR_CAUSE_UB 13
189#define MSA_CSR_CAUSE_UF (_ULCAST_(0x1) << MSA_CSR_CAUSE_UB)
190#define MSA_CSR_CAUSE_OB 14
191#define MSA_CSR_CAUSE_OF (_ULCAST_(0x1) << MSA_CSR_CAUSE_OB)
192#define MSA_CSR_CAUSE_ZB 15
193#define MSA_CSR_CAUSE_ZF (_ULCAST_(0x1) << MSA_CSR_CAUSE_ZB)
194#define MSA_CSR_CAUSE_VB 16
195#define MSA_CSR_CAUSE_VF (_ULCAST_(0x1) << MSA_CSR_CAUSE_VB)
196#define MSA_CSR_CAUSE_EB 17
197#define MSA_CSR_CAUSE_EF (_ULCAST_(0x1) << MSA_CSR_CAUSE_EB)
198#define MSA_CSR_NXB 18
199#define MSA_CSR_NXF (_ULCAST_(0x1) << MSA_CSR_NXB)
200#define MSA_CSR_FSB 24
201#define MSA_CSR_FSF (_ULCAST_(0x1) << MSA_CSR_FSB)
202
203#endif /* _ASM_MSA_H */
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 5e08bcc74897..5699ec3a71af 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -190,7 +190,9 @@ typedef struct { unsigned long pgprot; } pgprot_t;
190 * https://patchwork.linux-mips.org/patch/1541/ 190 * https://patchwork.linux-mips.org/patch/1541/
191 */ 191 */
192 192
193#ifndef __pa_symbol
193#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 194#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
195#endif
194 196
195#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 197#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
196 198
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 32aea4852fb0..e592f3687d6f 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -235,6 +235,15 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
235#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) 235#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
236#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) 236#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
237 237
238#elif defined(CONFIG_CPU_LOONGSON3)
239
240/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
241
242#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* LOONGSON */
243#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */
244#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */
245#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* LOONGSON */
246
238#else 247#else
239 248
240#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */ 249#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 3605b844ad87..ad70cba8daff 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -97,18 +97,48 @@ extern unsigned int vced_count, vcei_count;
97 97
98#define NUM_FPU_REGS 32 98#define NUM_FPU_REGS 32
99 99
100typedef __u64 fpureg_t; 100#ifdef CONFIG_CPU_HAS_MSA
101# define FPU_REG_WIDTH 128
102#else
103# define FPU_REG_WIDTH 64
104#endif
105
106union fpureg {
107 __u32 val32[FPU_REG_WIDTH / 32];
108 __u64 val64[FPU_REG_WIDTH / 64];
109};
110
111#ifdef CONFIG_CPU_LITTLE_ENDIAN
112# define FPR_IDX(width, idx) (idx)
113#else
114# define FPR_IDX(width, idx) ((FPU_REG_WIDTH / (width)) - 1 - (idx))
115#endif
116
117#define BUILD_FPR_ACCESS(width) \
118static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
119{ \
120 return fpr->val##width[FPR_IDX(width, idx)]; \
121} \
122 \
123static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
124 u##width val) \
125{ \
126 fpr->val##width[FPR_IDX(width, idx)] = val; \
127}
128
129BUILD_FPR_ACCESS(32)
130BUILD_FPR_ACCESS(64)
101 131
102/* 132/*
103 * It would be nice to add some more fields for emulator statistics, but there 133 * It would be nice to add some more fields for emulator statistics,
104 * are a number of fixed offsets in offset.h and elsewhere that would have to 134 * the additional information is private to the FPU emulator for now.
105 * be recalculated by hand. So the additional information will be private to 135 * See arch/mips/include/asm/fpu_emulator.h.
106 * the FPU emulator for now. See asm-mips/fpu_emulator.h.
107 */ 136 */
108 137
109struct mips_fpu_struct { 138struct mips_fpu_struct {
110 fpureg_t fpr[NUM_FPU_REGS]; 139 union fpureg fpr[NUM_FPU_REGS];
111 unsigned int fcr31; 140 unsigned int fcr31;
141 unsigned int msacsr;
112}; 142};
113 143
114#define NUM_DSP_REGS 6 144#define NUM_DSP_REGS 6
@@ -284,8 +314,9 @@ struct thread_struct {
284 * Saved FPU/FPU emulator stuff \ 314 * Saved FPU/FPU emulator stuff \
285 */ \ 315 */ \
286 .fpu = { \ 316 .fpu = { \
287 .fpr = {0,}, \ 317 .fpr = {{{0,},},}, \
288 .fcr31 = 0, \ 318 .fcr31 = 0, \
319 .msacsr = 0, \
289 }, \ 320 }, \
290 /* \ 321 /* \
291 * FPU affinity state (null if not FPAFF) \ 322 * FPU affinity state (null if not FPAFF) \
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h
index 7bba9da110af..bf1ac8d35783 100644
--- a/arch/mips/include/asm/ptrace.h
+++ b/arch/mips/include/asm/ptrace.h
@@ -82,7 +82,7 @@ static inline long regs_return_value(struct pt_regs *regs)
82#define instruction_pointer(regs) ((regs)->cp0_epc) 82#define instruction_pointer(regs) ((regs)->cp0_epc)
83#define profile_pc(regs) instruction_pointer(regs) 83#define profile_pc(regs) instruction_pointer(regs)
84 84
85extern asmlinkage void syscall_trace_enter(struct pt_regs *regs); 85extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
86extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); 86extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
87 87
88extern void die(const char *, struct pt_regs *) __noreturn; 88extern void die(const char *, struct pt_regs *) __noreturn;
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index c84caddb8bde..ca64cbe44493 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -17,6 +17,7 @@
17#include <asm/cpu-features.h> 17#include <asm/cpu-features.h>
18#include <asm/cpu-type.h> 18#include <asm/cpu-type.h>
19#include <asm/mipsmtregs.h> 19#include <asm/mipsmtregs.h>
20#include <asm/uaccess.h> /* for segment_eq() */
20 21
21/* 22/*
22 * This macro return a properly sign-extended address suitable as base address 23 * This macro return a properly sign-extended address suitable as base address
@@ -35,7 +36,7 @@
35 __asm__ __volatile__( \ 36 __asm__ __volatile__( \
36 " .set push \n" \ 37 " .set push \n" \
37 " .set noreorder \n" \ 38 " .set noreorder \n" \
38 " .set mips3\n\t \n" \ 39 " .set arch=r4000 \n" \
39 " cache %0, %1 \n" \ 40 " cache %0, %1 \n" \
40 " .set pop \n" \ 41 " .set pop \n" \
41 : \ 42 : \
@@ -203,7 +204,7 @@ static inline void flush_scache_line(unsigned long addr)
203 __asm__ __volatile__( \ 204 __asm__ __volatile__( \
204 " .set push \n" \ 205 " .set push \n" \
205 " .set noreorder \n" \ 206 " .set noreorder \n" \
206 " .set mips3 \n" \ 207 " .set arch=r4000 \n" \
207 "1: cache %0, (%1) \n" \ 208 "1: cache %0, (%1) \n" \
208 "2: .set pop \n" \ 209 "2: .set pop \n" \
209 " .section __ex_table,\"a\" \n" \ 210 " .section __ex_table,\"a\" \n" \
@@ -212,6 +213,20 @@ static inline void flush_scache_line(unsigned long addr)
212 : \ 213 : \
213 : "i" (op), "r" (addr)) 214 : "i" (op), "r" (addr))
214 215
216#define protected_cachee_op(op,addr) \
217 __asm__ __volatile__( \
218 " .set push \n" \
219 " .set noreorder \n" \
220 " .set mips0 \n" \
221 " .set eva \n" \
222 "1: cachee %0, (%1) \n" \
223 "2: .set pop \n" \
224 " .section __ex_table,\"a\" \n" \
225 " "STR(PTR)" 1b, 2b \n" \
226 " .previous" \
227 : \
228 : "i" (op), "r" (addr))
229
215/* 230/*
216 * The next two are for badland addresses like signal trampolines. 231 * The next two are for badland addresses like signal trampolines.
217 */ 232 */
@@ -223,7 +238,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
223 break; 238 break;
224 239
225 default: 240 default:
241#ifdef CONFIG_EVA
242 protected_cachee_op(Hit_Invalidate_I, addr);
243#else
226 protected_cache_op(Hit_Invalidate_I, addr); 244 protected_cache_op(Hit_Invalidate_I, addr);
245#endif
227 break; 246 break;
228 } 247 }
229} 248}
@@ -356,6 +375,91 @@ static inline void invalidate_tcache_page(unsigned long addr)
356 : "r" (base), \ 375 : "r" (base), \
357 "i" (op)); 376 "i" (op));
358 377
378/*
379 * Perform the cache operation specified by op using a user mode virtual
380 * address while in kernel mode.
381 */
382#define cache16_unroll32_user(base,op) \
383 __asm__ __volatile__( \
384 " .set push \n" \
385 " .set noreorder \n" \
386 " .set mips0 \n" \
387 " .set eva \n" \
388 " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
389 " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
390 " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
391 " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
392 " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
393 " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
394 " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
395 " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
396 " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
397 " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
398 " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
399 " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
400 " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
401 " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
402 " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
403 " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
404 " .set pop \n" \
405 : \
406 : "r" (base), \
407 "i" (op));
408
409#define cache32_unroll32_user(base, op) \
410 __asm__ __volatile__( \
411 " .set push \n" \
412 " .set noreorder \n" \
413 " .set mips0 \n" \
414 " .set eva \n" \
415 " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
416 " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
417 " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
418 " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
419 " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
420 " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
421 " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
422 " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
423 " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
424 " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
425 " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
426 " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
427 " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
428 " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
429 " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
430 " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
431 " .set pop \n" \
432 : \
433 : "r" (base), \
434 "i" (op));
435
436#define cache64_unroll32_user(base, op) \
437 __asm__ __volatile__( \
438 " .set push \n" \
439 " .set noreorder \n" \
440 " .set mips0 \n" \
441 " .set eva \n" \
442 " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
443 " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
444 " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
445 " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
446 " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
447 " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
448 " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
449 " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
450 " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
451 " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
452 " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
453 " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
454 " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
455 " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
456 " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
457 " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
458 " .set pop \n" \
459 : \
460 : "r" (base), \
461 "i" (op));
462
359/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ 463/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
360#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ 464#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
361static inline void extra##blast_##pfx##cache##lsize(void) \ 465static inline void extra##blast_##pfx##cache##lsize(void) \
@@ -429,6 +533,32 @@ __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32
429__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) 533__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
430__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) 534__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
431 535
536#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
537static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
538{ \
539 unsigned long start = page; \
540 unsigned long end = page + PAGE_SIZE; \
541 \
542 __##pfx##flush_prologue \
543 \
544 do { \
545 cache##lsize##_unroll32_user(start, hitop); \
546 start += lsize * 32; \
547 } while (start < end); \
548 \
549 __##pfx##flush_epilogue \
550}
551
552__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
553 16)
554__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
555__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
556 32)
557__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
558__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
559 64)
560__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
561
432/* build blast_xxx_range, protected_blast_xxx_range */ 562/* build blast_xxx_range, protected_blast_xxx_range */
433#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ 563#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
434static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ 564static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
@@ -450,12 +580,51 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
450 __##pfx##flush_epilogue \ 580 __##pfx##flush_epilogue \
451} 581}
452 582
583#ifndef CONFIG_EVA
584
453__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) 585__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
454__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
455__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) 586__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
587
588#else
589
590#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
591static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
592 unsigned long end) \
593{ \
594 unsigned long lsize = cpu_##desc##_line_size(); \
595 unsigned long addr = start & ~(lsize - 1); \
596 unsigned long aend = (end - 1) & ~(lsize - 1); \
597 \
598 __##pfx##flush_prologue \
599 \
600 if (segment_eq(get_fs(), USER_DS)) { \
601 while (1) { \
602 protected_cachee_op(hitop, addr); \
603 if (addr == aend) \
604 break; \
605 addr += lsize; \
606 } \
607 } else { \
608 while (1) { \
609 protected_cache_op(hitop, addr); \
610 if (addr == aend) \
611 break; \
612 addr += lsize; \
613 } \
614 \
615 } \
616 __##pfx##flush_epilogue \
617}
618
619__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
620__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
621
622#endif
623__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
456__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ 624__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
457 protected_, loongson2_) 625 protected_, loongson2_)
458__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) 626__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
627__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
459__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) 628__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
460/* blast_inv_dcache_range */ 629/* blast_inv_dcache_range */
461__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , ) 630__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index eeeb0f48c767..f54bdbe85c0d 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,6 +32,8 @@ struct sigcontext32 {
32 __u32 sc_lo2; 32 __u32 sc_lo2;
33 __u32 sc_hi3; 33 __u32 sc_hi3;
34 __u32 sc_lo3; 34 __u32 sc_lo3;
35 __u64 sc_msaregs[32]; /* Most significant 64 bits */
36 __u32 sc_msa_csr;
35}; 37};
36#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 38#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
37#endif /* _ASM_SIGCONTEXT_H */ 39#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
new file mode 100644
index 000000000000..d60d1a2180d1
--- /dev/null
+++ b/arch/mips/include/asm/smp-cps.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#ifndef __MIPS_ASM_SMP_CPS_H__
12#define __MIPS_ASM_SMP_CPS_H__
13
14#ifndef __ASSEMBLY__
15
16struct boot_config {
17 unsigned int core;
18 unsigned int vpe;
19 unsigned long pc;
20 unsigned long sp;
21 unsigned long gp;
22};
23
24extern struct boot_config mips_cps_bootcfg;
25
26extern void mips_cps_core_entry(void);
27
28#else /* __ASSEMBLY__ */
29
30.extern mips_cps_bootcfg;
31
32#endif /* __ASSEMBLY__ */
33#endif /* __MIPS_ASM_SMP_CPS_H__ */
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index ef2a8041e78b..73d35b18fb64 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -13,6 +13,8 @@
13 13
14#include <linux/errno.h> 14#include <linux/errno.h>
15 15
16#include <asm/mips-cm.h>
17
16#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
17 19
18#include <linux/cpumask.h> 20#include <linux/cpumask.h>
@@ -43,6 +45,9 @@ static inline void plat_smp_setup(void)
43 mp_ops->smp_setup(); 45 mp_ops->smp_setup();
44} 46}
45 47
48extern void gic_send_ipi_single(int cpu, unsigned int action);
49extern void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action);
50
46#else /* !CONFIG_SMP */ 51#else /* !CONFIG_SMP */
47 52
48struct plat_smp_ops; 53struct plat_smp_ops;
@@ -76,6 +81,9 @@ static inline int register_cmp_smp_ops(void)
76#ifdef CONFIG_MIPS_CMP 81#ifdef CONFIG_MIPS_CMP
77 extern struct plat_smp_ops cmp_smp_ops; 82 extern struct plat_smp_ops cmp_smp_ops;
78 83
84 if (!mips_cm_present())
85 return -ENODEV;
86
79 register_smp_ops(&cmp_smp_ops); 87 register_smp_ops(&cmp_smp_ops);
80 88
81 return 0; 89 return 0;
@@ -97,4 +105,13 @@ static inline int register_vsmp_smp_ops(void)
97#endif 105#endif
98} 106}
99 107
108#ifdef CONFIG_MIPS_CPS
109extern int register_cps_smp_ops(void);
110#else
111static inline int register_cps_smp_ops(void)
112{
113 return -ENODEV;
114}
115#endif
116
100#endif /* __ASM_SMP_OPS_H */ 117#endif /* __ASM_SMP_OPS_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index eb6008758484..efa02acd3dd5 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -42,6 +42,7 @@ extern int __cpu_logical_map[NR_CPUS];
42#define SMP_ICACHE_FLUSH 0x4 42#define SMP_ICACHE_FLUSH 0x4
43/* Used by kexec crashdump to save all cpu's state */ 43/* Used by kexec crashdump to save all cpu's state */
44#define SMP_DUMP 0x8 44#define SMP_DUMP 0x8
45#define SMP_ASK_C0COUNT 0x10
45 46
46extern volatile cpumask_t cpu_callin_map; 47extern volatile cpumask_t cpu_callin_map;
47 48
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 4857e2c8df5a..d301e108d5b8 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -435,7 +435,7 @@
435 435
436 .macro RESTORE_SP_AND_RET 436 .macro RESTORE_SP_AND_RET
437 LONG_L sp, PT_R29(sp) 437 LONG_L sp, PT_R29(sp)
438 .set mips3 438 .set arch=r4000
439 eret 439 eret
440 .set mips0 440 .set mips0
441 .endm 441 .endm
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 278d45a09728..495c1041a2cc 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -16,22 +16,29 @@
16#include <asm/watch.h> 16#include <asm/watch.h>
17#include <asm/dsp.h> 17#include <asm/dsp.h>
18#include <asm/cop2.h> 18#include <asm/cop2.h>
19#include <asm/msa.h>
19 20
20struct task_struct; 21struct task_struct;
21 22
23enum {
24 FP_SAVE_NONE = 0,
25 FP_SAVE_VECTOR = -1,
26 FP_SAVE_SCALAR = 1,
27};
28
22/** 29/**
23 * resume - resume execution of a task 30 * resume - resume execution of a task
24 * @prev: The task previously executed. 31 * @prev: The task previously executed.
25 * @next: The task to begin executing. 32 * @next: The task to begin executing.
26 * @next_ti: task_thread_info(next). 33 * @next_ti: task_thread_info(next).
27 * @usedfpu: Non-zero if prev's FP context should be saved. 34 * @fp_save: Which, if any, FP context to save for prev.
28 * 35 *
29 * This function is used whilst scheduling to save the context of prev & load 36 * This function is used whilst scheduling to save the context of prev & load
30 * the context of next. Returns prev. 37 * the context of next. Returns prev.
31 */ 38 */
32extern asmlinkage struct task_struct *resume(struct task_struct *prev, 39extern asmlinkage struct task_struct *resume(struct task_struct *prev,
33 struct task_struct *next, struct thread_info *next_ti, 40 struct task_struct *next, struct thread_info *next_ti,
34 u32 usedfpu); 41 s32 fp_save);
35 42
36extern unsigned int ll_bit; 43extern unsigned int ll_bit;
37extern struct task_struct *ll_task; 44extern struct task_struct *ll_task;
@@ -75,7 +82,8 @@ do { \
75 82
76#define switch_to(prev, next, last) \ 83#define switch_to(prev, next, last) \
77do { \ 84do { \
78 u32 __usedfpu, __c0_stat; \ 85 u32 __c0_stat; \
86 s32 __fpsave = FP_SAVE_NONE; \
79 __mips_mt_fpaff_switch_to(prev); \ 87 __mips_mt_fpaff_switch_to(prev); \
80 if (cpu_has_dsp) \ 88 if (cpu_has_dsp) \
81 __save_dsp(prev); \ 89 __save_dsp(prev); \
@@ -88,8 +96,12 @@ do { \
88 write_c0_status(__c0_stat & ~ST0_CU2); \ 96 write_c0_status(__c0_stat & ~ST0_CU2); \
89 } \ 97 } \
90 __clear_software_ll_bit(); \ 98 __clear_software_ll_bit(); \
91 __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \ 99 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
92 (last) = resume(prev, next, task_thread_info(next), __usedfpu); \ 100 __fpsave = FP_SAVE_SCALAR; \
101 if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
102 __fpsave = FP_SAVE_VECTOR; \
103 (last) = resume(prev, next, task_thread_info(next), __fpsave); \
104 disable_msa(); \
93} while (0) 105} while (0)
94 106
95#define finish_arch_switch(prev) \ 107#define finish_arch_switch(prev) \
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index f35b131977e6..c6e9cd2bca8d 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -14,17 +14,28 @@
14#define __ASM_MIPS_SYSCALL_H 14#define __ASM_MIPS_SYSCALL_H
15 15
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/audit.h> 17#include <uapi/linux/audit.h>
18#include <linux/elf-em.h> 18#include <linux/elf-em.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <asm/ptrace.h> 22#include <asm/ptrace.h>
23#include <asm/unistd.h>
24
25#ifndef __NR_syscall /* Only defined if _MIPS_SIM == _MIPS_SIM_ABI32 */
26#define __NR_syscall 4000
27#endif
23 28
24static inline long syscall_get_nr(struct task_struct *task, 29static inline long syscall_get_nr(struct task_struct *task,
25 struct pt_regs *regs) 30 struct pt_regs *regs)
26{ 31{
27 return regs->regs[2]; 32 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
33 if ((config_enabled(CONFIG_32BIT) ||
34 test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
35 (regs->regs[2] == __NR_syscall))
36 return regs->regs[4];
37 else
38 return regs->regs[2];
28} 39}
29 40
30static inline unsigned long mips_get_syscall_arg(unsigned long *arg, 41static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
@@ -68,6 +79,12 @@ static inline long syscall_get_return_value(struct task_struct *task,
68 return regs->regs[2]; 79 return regs->regs[2];
69} 80}
70 81
82static inline void syscall_rollback(struct task_struct *task,
83 struct pt_regs *regs)
84{
85 /* Do nothing */
86}
87
71static inline void syscall_set_return_value(struct task_struct *task, 88static inline void syscall_set_return_value(struct task_struct *task,
72 struct pt_regs *regs, 89 struct pt_regs *regs,
73 int error, long val) 90 int error, long val)
@@ -87,6 +104,13 @@ static inline void syscall_get_arguments(struct task_struct *task,
87 unsigned long *args) 104 unsigned long *args)
88{ 105{
89 int ret; 106 int ret;
107 /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
108 if ((config_enabled(CONFIG_32BIT) ||
109 test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
110 (regs->regs[2] == __NR_syscall)) {
111 i++;
112 n++;
113 }
90 114
91 while (n--) 115 while (n--)
92 ret |= mips_get_syscall_arg(args++, task, regs, i++); 116 ret |= mips_get_syscall_arg(args++, task, regs, i++);
@@ -103,11 +127,12 @@ extern const unsigned long sys_call_table[];
103extern const unsigned long sys32_call_table[]; 127extern const unsigned long sys32_call_table[];
104extern const unsigned long sysn32_call_table[]; 128extern const unsigned long sysn32_call_table[];
105 129
106static inline int __syscall_get_arch(void) 130static inline int syscall_get_arch(void)
107{ 131{
108 int arch = EM_MIPS; 132 int arch = EM_MIPS;
109#ifdef CONFIG_64BIT 133#ifdef CONFIG_64BIT
110 arch |= __AUDIT_ARCH_64BIT; 134 if (!test_thread_flag(TIF_32BIT_REGS))
135 arch |= __AUDIT_ARCH_64BIT;
111#endif 136#endif
112#if defined(__LITTLE_ENDIAN) 137#if defined(__LITTLE_ENDIAN)
113 arch |= __AUDIT_ARCH_LE; 138 arch |= __AUDIT_ARCH_LE;
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 24846f9053fe..d2d961d6cb86 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
116#define TIF_LOAD_WATCH 25 /* If set, load watch registers */ 116#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
117#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */ 117#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
118#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */ 118#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
119#define TIF_USEDMSA 29 /* MSA has been used this quantum */
120#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */
119#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ 121#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
120 122
121#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 123#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -133,10 +135,13 @@ static inline struct thread_info *current_thread_info(void)
133#define _TIF_FPUBOUND (1<<TIF_FPUBOUND) 135#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
134#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 136#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
135#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS) 137#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
138#define _TIF_USEDMSA (1<<TIF_USEDMSA)
139#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
136#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 140#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
137 141
138#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ 142#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
139 _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT) 143 _TIF_SYSCALL_AUDIT | \
144 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
140 145
141/* work to do in syscall_trace_leave() */ 146/* work to do in syscall_trace_leave() */
142#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \ 147#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index f3fa3750f577..a10951090234 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -6,6 +6,7 @@
6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle 6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2007 Maciej W. Rozycki 8 * Copyright (C) 2007 Maciej W. Rozycki
9 * Copyright (C) 2014, Imagination Technologies Ltd.
9 */ 10 */
10#ifndef _ASM_UACCESS_H 11#ifndef _ASM_UACCESS_H
11#define _ASM_UACCESS_H 12#define _ASM_UACCESS_H
@@ -13,6 +14,7 @@
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/errno.h> 15#include <linux/errno.h>
15#include <linux/thread_info.h> 16#include <linux/thread_info.h>
17#include <asm/asm-eva.h>
16 18
17/* 19/*
18 * The fs value determines whether argument validity checking should be 20 * The fs value determines whether argument validity checking should be
@@ -222,11 +224,44 @@ struct __large_struct { unsigned long buf[100]; };
222 * Yuck. We need two variants, one for 64bit operation and one 224 * Yuck. We need two variants, one for 64bit operation and one
223 * for 32 bit mode and old iron. 225 * for 32 bit mode and old iron.
224 */ 226 */
227#ifndef CONFIG_EVA
228#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
229#else
230/*
231 * Kernel specific functions for EVA. We need to use normal load instructions
232 * to read data from kernel when operating in EVA mode. We use these macros to
233 * avoid redefining __get_user_asm for EVA.
234 */
235#undef _loadd
236#undef _loadw
237#undef _loadh
238#undef _loadb
225#ifdef CONFIG_32BIT 239#ifdef CONFIG_32BIT
226#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr) 240#define _loadd _loadw
241#else
242#define _loadd(reg, addr) "ld " reg ", " addr
243#endif
244#define _loadw(reg, addr) "lw " reg ", " addr
245#define _loadh(reg, addr) "lh " reg ", " addr
246#define _loadb(reg, addr) "lb " reg ", " addr
247
248#define __get_kernel_common(val, size, ptr) \
249do { \
250 switch (size) { \
251 case 1: __get_data_asm(val, _loadb, ptr); break; \
252 case 2: __get_data_asm(val, _loadh, ptr); break; \
253 case 4: __get_data_asm(val, _loadw, ptr); break; \
254 case 8: __GET_DW(val, _loadd, ptr); break; \
255 default: __get_user_unknown(); break; \
256 } \
257} while (0)
258#endif
259
260#ifdef CONFIG_32BIT
261#define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
227#endif 262#endif
228#ifdef CONFIG_64BIT 263#ifdef CONFIG_64BIT
229#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr) 264#define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
230#endif 265#endif
231 266
232extern void __get_user_unknown(void); 267extern void __get_user_unknown(void);
@@ -234,10 +269,10 @@ extern void __get_user_unknown(void);
234#define __get_user_common(val, size, ptr) \ 269#define __get_user_common(val, size, ptr) \
235do { \ 270do { \
236 switch (size) { \ 271 switch (size) { \
237 case 1: __get_user_asm(val, "lb", ptr); break; \ 272 case 1: __get_data_asm(val, user_lb, ptr); break; \
238 case 2: __get_user_asm(val, "lh", ptr); break; \ 273 case 2: __get_data_asm(val, user_lh, ptr); break; \
239 case 4: __get_user_asm(val, "lw", ptr); break; \ 274 case 4: __get_data_asm(val, user_lw, ptr); break; \
240 case 8: __GET_USER_DW(val, ptr); break; \ 275 case 8: __GET_DW(val, user_ld, ptr); break; \
241 default: __get_user_unknown(); break; \ 276 default: __get_user_unknown(); break; \
242 } \ 277 } \
243} while (0) 278} while (0)
@@ -246,8 +281,12 @@ do { \
246({ \ 281({ \
247 int __gu_err; \ 282 int __gu_err; \
248 \ 283 \
249 __chk_user_ptr(ptr); \ 284 if (segment_eq(get_fs(), get_ds())) { \
250 __get_user_common((x), size, ptr); \ 285 __get_kernel_common((x), size, ptr); \
286 } else { \
287 __chk_user_ptr(ptr); \
288 __get_user_common((x), size, ptr); \
289 } \
251 __gu_err; \ 290 __gu_err; \
252}) 291})
253 292
@@ -257,18 +296,22 @@ do { \
257 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ 296 const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
258 \ 297 \
259 might_fault(); \ 298 might_fault(); \
260 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ 299 if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) { \
261 __get_user_common((x), size, __gu_ptr); \ 300 if (segment_eq(get_fs(), get_ds())) \
301 __get_kernel_common((x), size, __gu_ptr); \
302 else \
303 __get_user_common((x), size, __gu_ptr); \
304 } \
262 \ 305 \
263 __gu_err; \ 306 __gu_err; \
264}) 307})
265 308
266#define __get_user_asm(val, insn, addr) \ 309#define __get_data_asm(val, insn, addr) \
267{ \ 310{ \
268 long __gu_tmp; \ 311 long __gu_tmp; \
269 \ 312 \
270 __asm__ __volatile__( \ 313 __asm__ __volatile__( \
271 "1: " insn " %1, %3 \n" \ 314 "1: "insn("%1", "%3")" \n" \
272 "2: \n" \ 315 "2: \n" \
273 " .insn \n" \ 316 " .insn \n" \
274 " .section .fixup,\"ax\" \n" \ 317 " .section .fixup,\"ax\" \n" \
@@ -287,7 +330,7 @@ do { \
287/* 330/*
288 * Get a long long 64 using 32 bit registers. 331 * Get a long long 64 using 32 bit registers.
289 */ 332 */
290#define __get_user_asm_ll32(val, addr) \ 333#define __get_data_asm_ll32(val, insn, addr) \
291{ \ 334{ \
292 union { \ 335 union { \
293 unsigned long long l; \ 336 unsigned long long l; \
@@ -295,8 +338,8 @@ do { \
295 } __gu_tmp; \ 338 } __gu_tmp; \
296 \ 339 \
297 __asm__ __volatile__( \ 340 __asm__ __volatile__( \
298 "1: lw %1, (%3) \n" \ 341 "1: " insn("%1", "(%3)")" \n" \
299 "2: lw %D1, 4(%3) \n" \ 342 "2: " insn("%D1", "4(%3)")" \n" \
300 "3: \n" \ 343 "3: \n" \
301 " .insn \n" \ 344 " .insn \n" \
302 " .section .fixup,\"ax\" \n" \ 345 " .section .fixup,\"ax\" \n" \
@@ -315,30 +358,73 @@ do { \
315 (val) = __gu_tmp.t; \ 358 (val) = __gu_tmp.t; \
316} 359}
317 360
361#ifndef CONFIG_EVA
362#define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
363#else
364/*
365 * Kernel specific functions for EVA. We need to use normal load instructions
366 * to read data from kernel when operating in EVA mode. We use these macros to
367 * avoid redefining __get_data_asm for EVA.
368 */
369#undef _stored
370#undef _storew
371#undef _storeh
372#undef _storeb
373#ifdef CONFIG_32BIT
374#define _stored _storew
375#else
376#define _stored(reg, addr) "ld " reg ", " addr
377#endif
378
379#define _storew(reg, addr) "sw " reg ", " addr
380#define _storeh(reg, addr) "sh " reg ", " addr
381#define _storeb(reg, addr) "sb " reg ", " addr
382
383#define __put_kernel_common(ptr, size) \
384do { \
385 switch (size) { \
386 case 1: __put_data_asm(_storeb, ptr); break; \
387 case 2: __put_data_asm(_storeh, ptr); break; \
388 case 4: __put_data_asm(_storew, ptr); break; \
389 case 8: __PUT_DW(_stored, ptr); break; \
390 default: __put_user_unknown(); break; \
391 } \
392} while(0)
393#endif
394
318/* 395/*
319 * Yuck. We need two variants, one for 64bit operation and one 396 * Yuck. We need two variants, one for 64bit operation and one
320 * for 32 bit mode and old iron. 397 * for 32 bit mode and old iron.
321 */ 398 */
322#ifdef CONFIG_32BIT 399#ifdef CONFIG_32BIT
323#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr) 400#define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
324#endif 401#endif
325#ifdef CONFIG_64BIT 402#ifdef CONFIG_64BIT
326#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr) 403#define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
327#endif 404#endif
328 405
406#define __put_user_common(ptr, size) \
407do { \
408 switch (size) { \
409 case 1: __put_data_asm(user_sb, ptr); break; \
410 case 2: __put_data_asm(user_sh, ptr); break; \
411 case 4: __put_data_asm(user_sw, ptr); break; \
412 case 8: __PUT_DW(user_sd, ptr); break; \
413 default: __put_user_unknown(); break; \
414 } \
415} while (0)
416
329#define __put_user_nocheck(x, ptr, size) \ 417#define __put_user_nocheck(x, ptr, size) \
330({ \ 418({ \
331 __typeof__(*(ptr)) __pu_val; \ 419 __typeof__(*(ptr)) __pu_val; \
332 int __pu_err = 0; \ 420 int __pu_err = 0; \
333 \ 421 \
334 __chk_user_ptr(ptr); \
335 __pu_val = (x); \ 422 __pu_val = (x); \
336 switch (size) { \ 423 if (segment_eq(get_fs(), get_ds())) { \
337 case 1: __put_user_asm("sb", ptr); break; \ 424 __put_kernel_common(ptr, size); \
338 case 2: __put_user_asm("sh", ptr); break; \ 425 } else { \
339 case 4: __put_user_asm("sw", ptr); break; \ 426 __chk_user_ptr(ptr); \
340 case 8: __PUT_USER_DW(ptr); break; \ 427 __put_user_common(ptr, size); \
341 default: __put_user_unknown(); break; \
342 } \ 428 } \
343 __pu_err; \ 429 __pu_err; \
344}) 430})
@@ -351,21 +437,19 @@ do { \
351 \ 437 \
352 might_fault(); \ 438 might_fault(); \
353 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 439 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
354 switch (size) { \ 440 if (segment_eq(get_fs(), get_ds())) \
355 case 1: __put_user_asm("sb", __pu_addr); break; \ 441 __put_kernel_common(__pu_addr, size); \
356 case 2: __put_user_asm("sh", __pu_addr); break; \ 442 else \
357 case 4: __put_user_asm("sw", __pu_addr); break; \ 443 __put_user_common(__pu_addr, size); \
358 case 8: __PUT_USER_DW(__pu_addr); break; \
359 default: __put_user_unknown(); break; \
360 } \
361 } \ 444 } \
445 \
362 __pu_err; \ 446 __pu_err; \
363}) 447})
364 448
365#define __put_user_asm(insn, ptr) \ 449#define __put_data_asm(insn, ptr) \
366{ \ 450{ \
367 __asm__ __volatile__( \ 451 __asm__ __volatile__( \
368 "1: " insn " %z2, %3 # __put_user_asm\n" \ 452 "1: "insn("%z2", "%3")" # __put_data_asm \n" \
369 "2: \n" \ 453 "2: \n" \
370 " .insn \n" \ 454 " .insn \n" \
371 " .section .fixup,\"ax\" \n" \ 455 " .section .fixup,\"ax\" \n" \
@@ -380,11 +464,11 @@ do { \
380 "i" (-EFAULT)); \ 464 "i" (-EFAULT)); \
381} 465}
382 466
383#define __put_user_asm_ll32(ptr) \ 467#define __put_data_asm_ll32(insn, ptr) \
384{ \ 468{ \
385 __asm__ __volatile__( \ 469 __asm__ __volatile__( \
386 "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ 470 "1: "insn("%2", "(%3)")" # __put_data_asm_ll32 \n" \
387 "2: sw %D2, 4(%3) \n" \ 471 "2: "insn("%D2", "4(%3)")" \n" \
388 "3: \n" \ 472 "3: \n" \
389 " .insn \n" \ 473 " .insn \n" \
390 " .section .fixup,\"ax\" \n" \ 474 " .section .fixup,\"ax\" \n" \
@@ -403,6 +487,11 @@ do { \
403extern void __put_user_unknown(void); 487extern void __put_user_unknown(void);
404 488
405/* 489/*
490 * ul{b,h,w} are macros and there are no equivalent macros for EVA.
491 * EVA unaligned access is handled in the ADE exception handler.
492 */
493#ifndef CONFIG_EVA
494/*
406 * put_user_unaligned: - Write a simple value into user space. 495 * put_user_unaligned: - Write a simple value into user space.
407 * @x: Value to copy to user space. 496 * @x: Value to copy to user space.
408 * @ptr: Destination address, in user space. 497 * @ptr: Destination address, in user space.
@@ -504,7 +593,7 @@ extern void __get_user_unaligned_unknown(void);
504#define __get_user_unaligned_common(val, size, ptr) \ 593#define __get_user_unaligned_common(val, size, ptr) \
505do { \ 594do { \
506 switch (size) { \ 595 switch (size) { \
507 case 1: __get_user_asm(val, "lb", ptr); break; \ 596 case 1: __get_data_asm(val, "lb", ptr); break; \
508 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ 597 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
509 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ 598 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
510 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ 599 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
@@ -531,7 +620,7 @@ do { \
531 __gu_err; \ 620 __gu_err; \
532}) 621})
533 622
534#define __get_user_unaligned_asm(val, insn, addr) \ 623#define __get_data_unaligned_asm(val, insn, addr) \
535{ \ 624{ \
536 long __gu_tmp; \ 625 long __gu_tmp; \
537 \ 626 \
@@ -594,19 +683,23 @@ do { \
594#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr) 683#define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
595#endif 684#endif
596 685
686#define __put_user_unaligned_common(ptr, size) \
687do { \
688 switch (size) { \
689 case 1: __put_data_asm("sb", ptr); break; \
690 case 2: __put_user_unaligned_asm("ush", ptr); break; \
691 case 4: __put_user_unaligned_asm("usw", ptr); break; \
692 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
693 default: __put_user_unaligned_unknown(); break; \
694} while (0)
695
597#define __put_user_unaligned_nocheck(x,ptr,size) \ 696#define __put_user_unaligned_nocheck(x,ptr,size) \
598({ \ 697({ \
599 __typeof__(*(ptr)) __pu_val; \ 698 __typeof__(*(ptr)) __pu_val; \
600 int __pu_err = 0; \ 699 int __pu_err = 0; \
601 \ 700 \
602 __pu_val = (x); \ 701 __pu_val = (x); \
603 switch (size) { \ 702 __put_user_unaligned_common(ptr, size); \
604 case 1: __put_user_asm("sb", ptr); break; \
605 case 2: __put_user_unaligned_asm("ush", ptr); break; \
606 case 4: __put_user_unaligned_asm("usw", ptr); break; \
607 case 8: __PUT_USER_UNALIGNED_DW(ptr); break; \
608 default: __put_user_unaligned_unknown(); break; \
609 } \
610 __pu_err; \ 703 __pu_err; \
611}) 704})
612 705
@@ -616,15 +709,9 @@ do { \
616 __typeof__(*(ptr)) __pu_val = (x); \ 709 __typeof__(*(ptr)) __pu_val = (x); \
617 int __pu_err = -EFAULT; \ 710 int __pu_err = -EFAULT; \
618 \ 711 \
619 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \ 712 if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
620 switch (size) { \ 713 __put_user_unaligned_common(__pu_addr, size); \
621 case 1: __put_user_asm("sb", __pu_addr); break; \ 714 \
622 case 2: __put_user_unaligned_asm("ush", __pu_addr); break; \
623 case 4: __put_user_unaligned_asm("usw", __pu_addr); break; \
624 case 8: __PUT_USER_UNALGINED_DW(__pu_addr); break; \
625 default: __put_user_unaligned_unknown(); break; \
626 } \
627 } \
628 __pu_err; \ 715 __pu_err; \
629}) 716})
630 717
@@ -669,6 +756,7 @@ do { \
669} 756}
670 757
671extern void __put_user_unaligned_unknown(void); 758extern void __put_user_unaligned_unknown(void);
759#endif
672 760
673/* 761/*
674 * We're generating jump to subroutines which will be outside the range of 762 * We're generating jump to subroutines which will be outside the range of
@@ -693,6 +781,7 @@ extern void __put_user_unaligned_unknown(void);
693 781
694extern size_t __copy_user(void *__to, const void *__from, size_t __n); 782extern size_t __copy_user(void *__to, const void *__from, size_t __n);
695 783
784#ifndef CONFIG_EVA
696#define __invoke_copy_to_user(to, from, n) \ 785#define __invoke_copy_to_user(to, from, n) \
697({ \ 786({ \
698 register void __user *__cu_to_r __asm__("$4"); \ 787 register void __user *__cu_to_r __asm__("$4"); \
@@ -711,6 +800,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
711 __cu_len_r; \ 800 __cu_len_r; \
712}) 801})
713 802
803#define __invoke_copy_to_kernel(to, from, n) \
804 __invoke_copy_to_user(to, from, n)
805
806#endif
807
714/* 808/*
715 * __copy_to_user: - Copy a block of data into user space, with less checking. 809 * __copy_to_user: - Copy a block of data into user space, with less checking.
716 * @to: Destination address, in user space. 810 * @to: Destination address, in user space.
@@ -735,7 +829,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
735 __cu_from = (from); \ 829 __cu_from = (from); \
736 __cu_len = (n); \ 830 __cu_len = (n); \
737 might_fault(); \ 831 might_fault(); \
738 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 832 if (segment_eq(get_fs(), get_ds())) \
833 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
834 __cu_len); \
835 else \
836 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
837 __cu_len); \
739 __cu_len; \ 838 __cu_len; \
740}) 839})
741 840
@@ -750,7 +849,12 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
750 __cu_to = (to); \ 849 __cu_to = (to); \
751 __cu_from = (from); \ 850 __cu_from = (from); \
752 __cu_len = (n); \ 851 __cu_len = (n); \
753 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 852 if (segment_eq(get_fs(), get_ds())) \
853 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
854 __cu_len); \
855 else \
856 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
857 __cu_len); \
754 __cu_len; \ 858 __cu_len; \
755}) 859})
756 860
@@ -763,8 +867,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
763 __cu_to = (to); \ 867 __cu_to = (to); \
764 __cu_from = (from); \ 868 __cu_from = (from); \
765 __cu_len = (n); \ 869 __cu_len = (n); \
766 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ 870 if (segment_eq(get_fs(), get_ds())) \
767 __cu_len); \ 871 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \
872 __cu_from,\
873 __cu_len);\
874 else \
875 __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \
876 __cu_from, \
877 __cu_len); \
768 __cu_len; \ 878 __cu_len; \
769}) 879})
770 880
@@ -790,14 +900,23 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
790 __cu_to = (to); \ 900 __cu_to = (to); \
791 __cu_from = (from); \ 901 __cu_from = (from); \
792 __cu_len = (n); \ 902 __cu_len = (n); \
793 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ 903 if (segment_eq(get_fs(), get_ds())) { \
794 might_fault(); \ 904 __cu_len = __invoke_copy_to_kernel(__cu_to, \
795 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 905 __cu_from, \
796 __cu_len); \ 906 __cu_len); \
907 } else { \
908 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
909 might_fault(); \
910 __cu_len = __invoke_copy_to_user(__cu_to, \
911 __cu_from, \
912 __cu_len); \
913 } \
797 } \ 914 } \
798 __cu_len; \ 915 __cu_len; \
799}) 916})
800 917
918#ifndef CONFIG_EVA
919
801#define __invoke_copy_from_user(to, from, n) \ 920#define __invoke_copy_from_user(to, from, n) \
802({ \ 921({ \
803 register void *__cu_to_r __asm__("$4"); \ 922 register void *__cu_to_r __asm__("$4"); \
@@ -821,6 +940,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
821 __cu_len_r; \ 940 __cu_len_r; \
822}) 941})
823 942
943#define __invoke_copy_from_kernel(to, from, n) \
944 __invoke_copy_from_user(to, from, n)
945
946/* For userland <-> userland operations */
947#define ___invoke_copy_in_user(to, from, n) \
948 __invoke_copy_from_user(to, from, n)
949
950/* For kernel <-> kernel operations */
951#define ___invoke_copy_in_kernel(to, from, n) \
952 __invoke_copy_from_user(to, from, n)
953
824#define __invoke_copy_from_user_inatomic(to, from, n) \ 954#define __invoke_copy_from_user_inatomic(to, from, n) \
825({ \ 955({ \
826 register void *__cu_to_r __asm__("$4"); \ 956 register void *__cu_to_r __asm__("$4"); \
@@ -844,6 +974,97 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
844 __cu_len_r; \ 974 __cu_len_r; \
845}) 975})
846 976
977#define __invoke_copy_from_kernel_inatomic(to, from, n) \
978 __invoke_copy_from_user_inatomic(to, from, n) \
979
980#else
981
982/* EVA specific functions */
983
984extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
985 size_t __n);
986extern size_t __copy_from_user_eva(void *__to, const void *__from,
987 size_t __n);
988extern size_t __copy_to_user_eva(void *__to, const void *__from,
989 size_t __n);
990extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
991
992#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \
993({ \
994 register void *__cu_to_r __asm__("$4"); \
995 register const void __user *__cu_from_r __asm__("$5"); \
996 register long __cu_len_r __asm__("$6"); \
997 \
998 __cu_to_r = (to); \
999 __cu_from_r = (from); \
1000 __cu_len_r = (n); \
1001 __asm__ __volatile__( \
1002 ".set\tnoreorder\n\t" \
1003 __MODULE_JAL(func_ptr) \
1004 ".set\tnoat\n\t" \
1005 __UA_ADDU "\t$1, %1, %2\n\t" \
1006 ".set\tat\n\t" \
1007 ".set\treorder" \
1008 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1009 : \
1010 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1011 DADDI_SCRATCH, "memory"); \
1012 __cu_len_r; \
1013})
1014
1015#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \
1016({ \
1017 register void *__cu_to_r __asm__("$4"); \
1018 register const void __user *__cu_from_r __asm__("$5"); \
1019 register long __cu_len_r __asm__("$6"); \
1020 \
1021 __cu_to_r = (to); \
1022 __cu_from_r = (from); \
1023 __cu_len_r = (n); \
1024 __asm__ __volatile__( \
1025 __MODULE_JAL(func_ptr) \
1026 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
1027 : \
1028 : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
1029 DADDI_SCRATCH, "memory"); \
1030 __cu_len_r; \
1031})
1032
1033/*
1034 * Source or destination address is in userland. We need to go through
1035 * the TLB
1036 */
1037#define __invoke_copy_from_user(to, from, n) \
1038 __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1039
1040#define __invoke_copy_from_user_inatomic(to, from, n) \
1041 __invoke_copy_from_user_eva_generic(to, from, n, \
1042 __copy_user_inatomic_eva)
1043
1044#define __invoke_copy_to_user(to, from, n) \
1045 __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1046
1047#define ___invoke_copy_in_user(to, from, n) \
1048 __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1049
1050/*
1051 * Source or destination address in the kernel. We are not going through
1052 * the TLB
1053 */
1054#define __invoke_copy_from_kernel(to, from, n) \
1055 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1056
1057#define __invoke_copy_from_kernel_inatomic(to, from, n) \
1058 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1059
1060#define __invoke_copy_to_kernel(to, from, n) \
1061 __invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1062
1063#define ___invoke_copy_in_kernel(to, from, n) \
1064 __invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1065
1066#endif /* CONFIG_EVA */
1067
847/* 1068/*
848 * __copy_from_user: - Copy a block of data from user space, with less checking. 1069 * __copy_from_user: - Copy a block of data from user space, with less checking.
849 * @to: Destination address, in kernel space. 1070 * @to: Destination address, in kernel space.
@@ -901,10 +1122,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
901 __cu_to = (to); \ 1122 __cu_to = (to); \
902 __cu_from = (from); \ 1123 __cu_from = (from); \
903 __cu_len = (n); \ 1124 __cu_len = (n); \
904 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ 1125 if (segment_eq(get_fs(), get_ds())) { \
905 might_fault(); \ 1126 __cu_len = __invoke_copy_from_kernel(__cu_to, \
906 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1127 __cu_from, \
907 __cu_len); \ 1128 __cu_len); \
1129 } else { \
1130 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
1131 might_fault(); \
1132 __cu_len = __invoke_copy_from_user(__cu_to, \
1133 __cu_from, \
1134 __cu_len); \
1135 } \
908 } \ 1136 } \
909 __cu_len; \ 1137 __cu_len; \
910}) 1138})
@@ -918,9 +1146,14 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
918 __cu_to = (to); \ 1146 __cu_to = (to); \
919 __cu_from = (from); \ 1147 __cu_from = (from); \
920 __cu_len = (n); \ 1148 __cu_len = (n); \
921 might_fault(); \ 1149 if (segment_eq(get_fs(), get_ds())) { \
922 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1150 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
923 __cu_len); \ 1151 __cu_len); \
1152 } else { \
1153 might_fault(); \
1154 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \
1155 __cu_len); \
1156 } \
924 __cu_len; \ 1157 __cu_len; \
925}) 1158})
926 1159
@@ -933,11 +1166,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
933 __cu_to = (to); \ 1166 __cu_to = (to); \
934 __cu_from = (from); \ 1167 __cu_from = (from); \
935 __cu_len = (n); \ 1168 __cu_len = (n); \
936 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ 1169 if (segment_eq(get_fs(), get_ds())) { \
937 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \ 1170 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \
938 might_fault(); \ 1171 __cu_len); \
939 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1172 } else { \
940 __cu_len); \ 1173 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1174 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1175 might_fault(); \
1176 __cu_len = ___invoke_copy_in_user(__cu_to, \
1177 __cu_from, \
1178 __cu_len); \
1179 } \
941 } \ 1180 } \
942 __cu_len; \ 1181 __cu_len; \
943}) 1182})
@@ -1007,16 +1246,28 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
1007{ 1246{
1008 long res; 1247 long res;
1009 1248
1010 might_fault(); 1249 if (segment_eq(get_fs(), get_ds())) {
1011 __asm__ __volatile__( 1250 __asm__ __volatile__(
1012 "move\t$4, %1\n\t" 1251 "move\t$4, %1\n\t"
1013 "move\t$5, %2\n\t" 1252 "move\t$5, %2\n\t"
1014 "move\t$6, %3\n\t" 1253 "move\t$6, %3\n\t"
1015 __MODULE_JAL(__strncpy_from_user_nocheck_asm) 1254 __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1016 "move\t%0, $2" 1255 "move\t%0, $2"
1017 : "=r" (res) 1256 : "=r" (res)
1018 : "r" (__to), "r" (__from), "r" (__len) 1257 : "r" (__to), "r" (__from), "r" (__len)
1019 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1258 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1259 } else {
1260 might_fault();
1261 __asm__ __volatile__(
1262 "move\t$4, %1\n\t"
1263 "move\t$5, %2\n\t"
1264 "move\t$6, %3\n\t"
1265 __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1266 "move\t%0, $2"
1267 : "=r" (res)
1268 : "r" (__to), "r" (__from), "r" (__len)
1269 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1270 }
1020 1271
1021 return res; 1272 return res;
1022} 1273}
@@ -1044,16 +1295,28 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
1044{ 1295{
1045 long res; 1296 long res;
1046 1297
1047 might_fault(); 1298 if (segment_eq(get_fs(), get_ds())) {
1048 __asm__ __volatile__( 1299 __asm__ __volatile__(
1049 "move\t$4, %1\n\t" 1300 "move\t$4, %1\n\t"
1050 "move\t$5, %2\n\t" 1301 "move\t$5, %2\n\t"
1051 "move\t$6, %3\n\t" 1302 "move\t$6, %3\n\t"
1052 __MODULE_JAL(__strncpy_from_user_asm) 1303 __MODULE_JAL(__strncpy_from_kernel_asm)
1053 "move\t%0, $2" 1304 "move\t%0, $2"
1054 : "=r" (res) 1305 : "=r" (res)
1055 : "r" (__to), "r" (__from), "r" (__len) 1306 : "r" (__to), "r" (__from), "r" (__len)
1056 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 1307 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1308 } else {
1309 might_fault();
1310 __asm__ __volatile__(
1311 "move\t$4, %1\n\t"
1312 "move\t$5, %2\n\t"
1313 "move\t$6, %3\n\t"
1314 __MODULE_JAL(__strncpy_from_user_asm)
1315 "move\t%0, $2"
1316 : "=r" (res)
1317 : "r" (__to), "r" (__from), "r" (__len)
1318 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1319 }
1057 1320
1058 return res; 1321 return res;
1059} 1322}
@@ -1063,14 +1326,24 @@ static inline long __strlen_user(const char __user *s)
1063{ 1326{
1064 long res; 1327 long res;
1065 1328
1066 might_fault(); 1329 if (segment_eq(get_fs(), get_ds())) {
1067 __asm__ __volatile__( 1330 __asm__ __volatile__(
1068 "move\t$4, %1\n\t" 1331 "move\t$4, %1\n\t"
1069 __MODULE_JAL(__strlen_user_nocheck_asm) 1332 __MODULE_JAL(__strlen_kernel_nocheck_asm)
1070 "move\t%0, $2" 1333 "move\t%0, $2"
1071 : "=r" (res) 1334 : "=r" (res)
1072 : "r" (s) 1335 : "r" (s)
1073 : "$2", "$4", __UA_t0, "$31"); 1336 : "$2", "$4", __UA_t0, "$31");
1337 } else {
1338 might_fault();
1339 __asm__ __volatile__(
1340 "move\t$4, %1\n\t"
1341 __MODULE_JAL(__strlen_user_nocheck_asm)
1342 "move\t%0, $2"
1343 : "=r" (res)
1344 : "r" (s)
1345 : "$2", "$4", __UA_t0, "$31");
1346 }
1074 1347
1075 return res; 1348 return res;
1076} 1349}
@@ -1093,14 +1366,24 @@ static inline long strlen_user(const char __user *s)
1093{ 1366{
1094 long res; 1367 long res;
1095 1368
1096 might_fault(); 1369 if (segment_eq(get_fs(), get_ds())) {
1097 __asm__ __volatile__( 1370 __asm__ __volatile__(
1098 "move\t$4, %1\n\t" 1371 "move\t$4, %1\n\t"
1099 __MODULE_JAL(__strlen_user_asm) 1372 __MODULE_JAL(__strlen_kernel_asm)
1100 "move\t%0, $2" 1373 "move\t%0, $2"
1101 : "=r" (res) 1374 : "=r" (res)
1102 : "r" (s) 1375 : "r" (s)
1103 : "$2", "$4", __UA_t0, "$31"); 1376 : "$2", "$4", __UA_t0, "$31");
1377 } else {
1378 might_fault();
1379 __asm__ __volatile__(
1380 "move\t$4, %1\n\t"
1381 __MODULE_JAL(__strlen_kernel_asm)
1382 "move\t%0, $2"
1383 : "=r" (res)
1384 : "r" (s)
1385 : "$2", "$4", __UA_t0, "$31");
1386 }
1104 1387
1105 return res; 1388 return res;
1106} 1389}
@@ -1110,15 +1393,26 @@ static inline long __strnlen_user(const char __user *s, long n)
1110{ 1393{
1111 long res; 1394 long res;
1112 1395
1113 might_fault(); 1396 if (segment_eq(get_fs(), get_ds())) {
1114 __asm__ __volatile__( 1397 __asm__ __volatile__(
1115 "move\t$4, %1\n\t" 1398 "move\t$4, %1\n\t"
1116 "move\t$5, %2\n\t" 1399 "move\t$5, %2\n\t"
1117 __MODULE_JAL(__strnlen_user_nocheck_asm) 1400 __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1118 "move\t%0, $2" 1401 "move\t%0, $2"
1119 : "=r" (res) 1402 : "=r" (res)
1120 : "r" (s), "r" (n) 1403 : "r" (s), "r" (n)
1121 : "$2", "$4", "$5", __UA_t0, "$31"); 1404 : "$2", "$4", "$5", __UA_t0, "$31");
1405 } else {
1406 might_fault();
1407 __asm__ __volatile__(
1408 "move\t$4, %1\n\t"
1409 "move\t$5, %2\n\t"
1410 __MODULE_JAL(__strnlen_user_nocheck_asm)
1411 "move\t%0, $2"
1412 : "=r" (res)
1413 : "r" (s), "r" (n)
1414 : "$2", "$4", "$5", __UA_t0, "$31");
1415 }
1122 1416
1123 return res; 1417 return res;
1124} 1418}
@@ -1142,14 +1436,25 @@ static inline long strnlen_user(const char __user *s, long n)
1142 long res; 1436 long res;
1143 1437
1144 might_fault(); 1438 might_fault();
1145 __asm__ __volatile__( 1439 if (segment_eq(get_fs(), get_ds())) {
1146 "move\t$4, %1\n\t" 1440 __asm__ __volatile__(
1147 "move\t$5, %2\n\t" 1441 "move\t$4, %1\n\t"
1148 __MODULE_JAL(__strnlen_user_asm) 1442 "move\t$5, %2\n\t"
1149 "move\t%0, $2" 1443 __MODULE_JAL(__strnlen_kernel_asm)
1150 : "=r" (res) 1444 "move\t%0, $2"
1151 : "r" (s), "r" (n) 1445 : "=r" (res)
1152 : "$2", "$4", "$5", __UA_t0, "$31"); 1446 : "r" (s), "r" (n)
1447 : "$2", "$4", "$5", __UA_t0, "$31");
1448 } else {
1449 __asm__ __volatile__(
1450 "move\t$4, %1\n\t"
1451 "move\t$5, %2\n\t"
1452 __MODULE_JAL(__strnlen_user_asm)
1453 "move\t%0, $2"
1454 : "=r" (res)
1455 : "r" (s), "r" (n)
1456 : "$2", "$4", "$5", __UA_t0, "$31");
1457 }
1153 1458
1154 return res; 1459 return res;
1155} 1460}
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index f25181b19941..df6e775f3fef 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -8,6 +8,7 @@
8 * Copyright (C) 1996, 2000 by Ralf Baechle 8 * Copyright (C) 1996, 2000 by Ralf Baechle
9 * Copyright (C) 2006 by Thiemo Seufer 9 * Copyright (C) 2006 by Thiemo Seufer
10 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 10 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
11 * Copyright (C) 2014 Imagination Technologies Ltd.
11 */ 12 */
12#ifndef _UAPI_ASM_INST_H 13#ifndef _UAPI_ASM_INST_H
13#define _UAPI_ASM_INST_H 14#define _UAPI_ASM_INST_H
@@ -73,10 +74,16 @@ enum spec2_op {
73enum spec3_op { 74enum spec3_op {
74 ext_op, dextm_op, dextu_op, dext_op, 75 ext_op, dextm_op, dextu_op, dext_op,
75 ins_op, dinsm_op, dinsu_op, dins_op, 76 ins_op, dinsm_op, dinsu_op, dins_op,
76 lx_op = 0x0a, 77 lx_op = 0x0a, lwle_op = 0x19,
77 bshfl_op = 0x20, 78 lwre_op = 0x1a, cachee_op = 0x1b,
78 dbshfl_op = 0x24, 79 sbe_op = 0x1c, she_op = 0x1d,
79 rdhwr_op = 0x3b 80 sce_op = 0x1e, swe_op = 0x1f,
81 bshfl_op = 0x20, swle_op = 0x21,
82 swre_op = 0x22, prefe_op = 0x23,
83 dbshfl_op = 0x24, lbue_op = 0x28,
84 lhue_op = 0x29, lbe_op = 0x2c,
85 lhe_op = 0x2d, lle_op = 0x2e,
86 lwe_op = 0x2f, rdhwr_op = 0x3b
80}; 87};
81 88
82/* 89/*
@@ -592,6 +599,15 @@ struct v_format { /* MDMX vector format */
592 ;))))))) 599 ;)))))))
593}; 600};
594 601
602struct spec3_format { /* SPEC3 */
603 BITFIELD_FIELD(unsigned int opcode:6,
604 BITFIELD_FIELD(unsigned int rs:5,
605 BITFIELD_FIELD(unsigned int rt:5,
606 BITFIELD_FIELD(signed int simmediate:9,
607 BITFIELD_FIELD(unsigned int func:7,
608 ;)))))
609};
610
595/* 611/*
596 * microMIPS instruction formats (32-bit length) 612 * microMIPS instruction formats (32-bit length)
597 * 613 *
@@ -863,6 +879,7 @@ union mips_instruction {
863 struct b_format b_format; 879 struct b_format b_format;
864 struct ps_format ps_format; 880 struct ps_format ps_format;
865 struct v_format v_format; 881 struct v_format v_format;
882 struct spec3_format spec3_format;
866 struct fb_format fb_format; 883 struct fb_format fb_format;
867 struct fp0_format fp0_format; 884 struct fp0_format fp0_format;
868 struct mm_fp0_format mm_fp0_format; 885 struct mm_fp0_format mm_fp0_format;
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 6c9906f59c6e..681c17603a48 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,6 +12,10 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/sgidefs.h> 13#include <asm/sgidefs.h>
14 14
15/* Bits which may be set in sc_used_math */
16#define USEDMATH_FP (1 << 0)
17#define USEDMATH_MSA (1 << 1)
18
15#if _MIPS_SIM == _MIPS_SIM_ABI32 19#if _MIPS_SIM == _MIPS_SIM_ABI32
16 20
17/* 21/*
@@ -37,6 +41,8 @@ struct sigcontext {
37 unsigned long sc_lo2; 41 unsigned long sc_lo2;
38 unsigned long sc_hi3; 42 unsigned long sc_hi3;
39 unsigned long sc_lo3; 43 unsigned long sc_lo3;
44 unsigned long long sc_msaregs[32]; /* Most significant 64 bits */
45 unsigned long sc_msa_csr;
40}; 46};
41 47
42#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 48#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -70,6 +76,8 @@ struct sigcontext {
70 __u32 sc_used_math; 76 __u32 sc_used_math;
71 __u32 sc_dsp; 77 __u32 sc_dsp;
72 __u32 sc_reserved; 78 __u32 sc_reserved;
79 __u64 sc_msaregs[32];
80 __u32 sc_msa_csr;
73}; 81};
74 82
75 83
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 26c6175e1379..277dab301cea 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -53,6 +53,8 @@ obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
53obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o 53obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
54obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 54obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
55obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 55obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
56obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
57obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o
56obj-$(CONFIG_CPU_MIPSR2) += spram.o 58obj-$(CONFIG_CPU_MIPSR2) += spram.o
57 59
58obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 60obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
@@ -102,6 +104,9 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
102 104
103obj-$(CONFIG_JUMP_LABEL) += jump_label.o 105obj-$(CONFIG_JUMP_LABEL) += jump_label.o
104 106
107obj-$(CONFIG_MIPS_CM) += mips-cm.o
108obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
109
105# 110#
106# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not 111# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
107# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches 112# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0c2e853c3db4..0ea75c244b48 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -16,6 +16,7 @@
16#include <linux/suspend.h> 16#include <linux/suspend.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/processor.h> 18#include <asm/processor.h>
19#include <asm/smp-cps.h>
19 20
20#include <linux/kvm_host.h> 21#include <linux/kvm_host.h>
21 22
@@ -168,6 +169,72 @@ void output_thread_fpu_defines(void)
168 OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); 169 OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
169 OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); 170 OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
170 171
172 /* the least significant 64 bits of each FP register */
173 OFFSET(THREAD_FPR0_LS64, task_struct,
174 thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
175 OFFSET(THREAD_FPR1_LS64, task_struct,
176 thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
177 OFFSET(THREAD_FPR2_LS64, task_struct,
178 thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
179 OFFSET(THREAD_FPR3_LS64, task_struct,
180 thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
181 OFFSET(THREAD_FPR4_LS64, task_struct,
182 thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
183 OFFSET(THREAD_FPR5_LS64, task_struct,
184 thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
185 OFFSET(THREAD_FPR6_LS64, task_struct,
186 thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
187 OFFSET(THREAD_FPR7_LS64, task_struct,
188 thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
189 OFFSET(THREAD_FPR8_LS64, task_struct,
190 thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
191 OFFSET(THREAD_FPR9_LS64, task_struct,
192 thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
193 OFFSET(THREAD_FPR10_LS64, task_struct,
194 thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
195 OFFSET(THREAD_FPR11_LS64, task_struct,
196 thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
197 OFFSET(THREAD_FPR12_LS64, task_struct,
198 thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
199 OFFSET(THREAD_FPR13_LS64, task_struct,
200 thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
201 OFFSET(THREAD_FPR14_LS64, task_struct,
202 thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
203 OFFSET(THREAD_FPR15_LS64, task_struct,
204 thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
205 OFFSET(THREAD_FPR16_LS64, task_struct,
206 thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
207 OFFSET(THREAD_FPR17_LS64, task_struct,
208 thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
209 OFFSET(THREAD_FPR18_LS64, task_struct,
210 thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
211 OFFSET(THREAD_FPR19_LS64, task_struct,
212 thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
213 OFFSET(THREAD_FPR20_LS64, task_struct,
214 thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
215 OFFSET(THREAD_FPR21_LS64, task_struct,
216 thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
217 OFFSET(THREAD_FPR22_LS64, task_struct,
218 thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
219 OFFSET(THREAD_FPR23_LS64, task_struct,
220 thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
221 OFFSET(THREAD_FPR24_LS64, task_struct,
222 thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
223 OFFSET(THREAD_FPR25_LS64, task_struct,
224 thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
225 OFFSET(THREAD_FPR26_LS64, task_struct,
226 thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
227 OFFSET(THREAD_FPR27_LS64, task_struct,
228 thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
229 OFFSET(THREAD_FPR28_LS64, task_struct,
230 thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
231 OFFSET(THREAD_FPR29_LS64, task_struct,
232 thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
233 OFFSET(THREAD_FPR30_LS64, task_struct,
234 thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
235 OFFSET(THREAD_FPR31_LS64, task_struct,
236 thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
237
171 OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); 238 OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
172 BLANK(); 239 BLANK();
173} 240}
@@ -228,6 +295,7 @@ void output_sc_defines(void)
228 OFFSET(SC_LO2, sigcontext, sc_lo2); 295 OFFSET(SC_LO2, sigcontext, sc_lo2);
229 OFFSET(SC_HI3, sigcontext, sc_hi3); 296 OFFSET(SC_HI3, sigcontext, sc_hi3);
230 OFFSET(SC_LO3, sigcontext, sc_lo3); 297 OFFSET(SC_LO3, sigcontext, sc_lo3);
298 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
231 BLANK(); 299 BLANK();
232} 300}
233#endif 301#endif
@@ -242,6 +310,7 @@ void output_sc_defines(void)
242 OFFSET(SC_MDLO, sigcontext, sc_mdlo); 310 OFFSET(SC_MDLO, sigcontext, sc_mdlo);
243 OFFSET(SC_PC, sigcontext, sc_pc); 311 OFFSET(SC_PC, sigcontext, sc_pc);
244 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); 312 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
313 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
245 BLANK(); 314 BLANK();
246} 315}
247#endif 316#endif
@@ -253,6 +322,7 @@ void output_sc32_defines(void)
253 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); 322 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
254 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); 323 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
255 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); 324 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
325 OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
256 BLANK(); 326 BLANK();
257} 327}
258#endif 328#endif
@@ -397,3 +467,15 @@ void output_kvm_defines(void)
397 OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]); 467 OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
398 BLANK(); 468 BLANK();
399} 469}
470
471#ifdef CONFIG_MIPS_CPS
472void output_cps_defines(void)
473{
474 COMMENT(" MIPS CPS offsets. ");
475 OFFSET(BOOTCFG_CORE, boot_config, core);
476 OFFSET(BOOTCFG_VPE, boot_config, vpe);
477 OFFSET(BOOTCFG_PC, boot_config, pc);
478 OFFSET(BOOTCFG_SP, boot_config, sp);
479 OFFSET(BOOTCFG_GP, boot_config, gp);
480}
481#endif
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index a5bf73d22fcc..290c23b51678 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -122,7 +122,7 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
122 jr k0 122 jr k0
123 123
124 RESTORE_ALL 124 RESTORE_ALL
125 .set mips3 125 .set arch=r4000
126 eret 126 eret
127 127
128/*********************************************************************** 128/***********************************************************************
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644
index 000000000000..f7a46db4b161
--- /dev/null
+++ b/arch/mips/kernel/cps-vec.S
@@ -0,0 +1,191 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <asm/addrspace.h>
12#include <asm/asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/mipsregs.h>
17
18#define GCR_CL_COHERENCE_OFS 0x2008
19
20.section .text.cps-vec
21.balign 0x1000
22.set noreorder
23
24LEAF(mips_cps_core_entry)
25 /*
26 * These first 8 bytes will be patched by cps_smp_setup to load the
27 * base address of the CM GCRs into register v1.
28 */
29 .quad 0
30
31 /* Check whether we're here due to an NMI */
32 mfc0 k0, CP0_STATUS
33 and k0, k0, ST0_NMI
34 beqz k0, not_nmi
35 nop
36
37 /* This is an NMI */
38 la k0, nmi_handler
39 jr k0
40 nop
41
42not_nmi:
43 /* Setup Cause */
44 li t0, CAUSEF_IV
45 mtc0 t0, CP0_CAUSE
46
47 /* Setup Status */
48 li t0, ST0_CU1 | ST0_CU0
49 mtc0 t0, CP0_STATUS
50
51 /*
52 * Clear the bits used to index the caches. Note that the architecture
53 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
54 * be valid for all MIPS32 CPUs, even those for which said writes are
55 * unnecessary.
56 */
57 mtc0 zero, CP0_TAGLO, 0
58 mtc0 zero, CP0_TAGHI, 0
59 mtc0 zero, CP0_TAGLO, 2
60 mtc0 zero, CP0_TAGHI, 2
61 ehb
62
63 /* Primary cache configuration is indicated by Config1 */
64 mfc0 v0, CP0_CONFIG, 1
65
66 /* Detect I-cache line size */
67 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
68 beqz t0, icache_done
69 li t1, 2
70 sllv t0, t1, t0
71
72 /* Detect I-cache size */
73 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
74 xori t2, t1, 0x7
75 beqz t2, 1f
76 li t3, 32
77 addi t1, t1, 1
78 sllv t1, t3, t1
791: /* At this point t1 == I-cache sets per way */
80 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
81 addi t2, t2, 1
82 mul t1, t1, t0
83 mul t1, t1, t2
84
85 li a0, KSEG0
86 add a1, a0, t1
871: cache Index_Store_Tag_I, 0(a0)
88 add a0, a0, t0
89 bne a0, a1, 1b
90 nop
91icache_done:
92
93 /* Detect D-cache line size */
94 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
95 beqz t0, dcache_done
96 li t1, 2
97 sllv t0, t1, t0
98
99 /* Detect D-cache size */
100 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
101 xori t2, t1, 0x7
102 beqz t2, 1f
103 li t3, 32
104 addi t1, t1, 1
105 sllv t1, t3, t1
1061: /* At this point t1 == D-cache sets per way */
107 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
108 addi t2, t2, 1
109 mul t1, t1, t0
110 mul t1, t1, t2
111
112 li a0, KSEG0
113 addu a1, a0, t1
114 subu a1, a1, t0
1151: cache Index_Store_Tag_D, 0(a0)
116 bne a0, a1, 1b
117 add a0, a0, t0
118dcache_done:
119
120 /* Set Kseg0 cacheable, coherent, write-back, write-allocate */
121 mfc0 t0, CP0_CONFIG
122 ori t0, 0x7
123 xori t0, 0x2
124 mtc0 t0, CP0_CONFIG
125 ehb
126
127 /* Enter the coherent domain */
128 li t0, 0xff
129 sw t0, GCR_CL_COHERENCE_OFS(v1)
130 ehb
131
132 /* Jump to kseg0 */
133 la t0, 1f
134 jr t0
135 nop
136
1371: /* We're up, cached & coherent */
138
139 /*
140 * TODO: We should check the VPE number we intended to boot here, and
141 * if non-zero we should start that VPE and stop this one. For
142 * the moment this doesn't matter since CPUs are brought up
143 * sequentially and in order, but once hotplug is implemented
144 * this will need revisiting.
145 */
146
147 /* Off we go! */
148 la t0, mips_cps_bootcfg
149 lw t1, BOOTCFG_PC(t0)
150 lw gp, BOOTCFG_GP(t0)
151 lw sp, BOOTCFG_SP(t0)
152 jr t1
153 nop
154 END(mips_cps_core_entry)
155
156.org 0x200
157LEAF(excep_tlbfill)
158 b .
159 nop
160 END(excep_tlbfill)
161
162.org 0x280
163LEAF(excep_xtlbfill)
164 b .
165 nop
166 END(excep_xtlbfill)
167
168.org 0x300
169LEAF(excep_cache)
170 b .
171 nop
172 END(excep_cache)
173
174.org 0x380
175LEAF(excep_genex)
176 b .
177 nop
178 END(excep_genex)
179
180.org 0x400
181LEAF(excep_intex)
182 b .
183 nop
184 END(excep_intex)
185
186.org 0x480
187LEAF(excep_ejtag)
188 la k0, ejtag_debug_handler
189 jr k0
190 nop
191 END(excep_ejtag)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 530f832de02c..6e8fb85ce7c3 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -23,6 +23,8 @@
23#include <asm/cpu-type.h> 23#include <asm/cpu-type.h>
24#include <asm/fpu.h> 24#include <asm/fpu.h>
25#include <asm/mipsregs.h> 25#include <asm/mipsregs.h>
26#include <asm/mipsmtregs.h>
27#include <asm/msa.h>
26#include <asm/watch.h> 28#include <asm/watch.h>
27#include <asm/elf.h> 29#include <asm/elf.h>
28#include <asm/spram.h> 30#include <asm/spram.h>
@@ -126,6 +128,20 @@ static inline int __cpu_has_fpu(void)
126 return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE); 128 return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE);
127} 129}
128 130
131static inline unsigned long cpu_get_msa_id(void)
132{
133 unsigned long status, conf5, msa_id;
134
135 status = read_c0_status();
136 __enable_fpu(FPU_64BIT);
137 conf5 = read_c0_config5();
138 enable_msa();
139 msa_id = read_msa_ir();
140 write_c0_config5(conf5);
141 write_c0_status(status);
142 return msa_id;
143}
144
129static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) 145static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
130{ 146{
131#ifdef __NEED_VMBITS_PROBE 147#ifdef __NEED_VMBITS_PROBE
@@ -166,11 +182,12 @@ static char unknown_isa[] = KERN_ERR \
166static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) 182static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
167{ 183{
168 unsigned int config6; 184 unsigned int config6;
169 /* 185
170 * Config6 is implementation dependent and it's currently only 186 /* It's implementation dependent how the FTLB can be enabled */
171 * used by proAptiv 187 switch (c->cputype) {
172 */ 188 case CPU_PROAPTIV:
173 if (c->cputype == CPU_PROAPTIV) { 189 case CPU_P5600:
190 /* proAptiv & related cores use Config6 to enable the FTLB */
174 config6 = read_c0_config6(); 191 config6 = read_c0_config6();
175 if (enable) 192 if (enable)
176 /* Enable FTLB */ 193 /* Enable FTLB */
@@ -179,6 +196,7 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
179 /* Disable FTLB */ 196 /* Disable FTLB */
180 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); 197 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
181 back_to_back_c0_hazard(); 198 back_to_back_c0_hazard();
199 break;
182 } 200 }
183} 201}
184 202
@@ -301,6 +319,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
301 c->ases |= MIPS_ASE_VZ; 319 c->ases |= MIPS_ASE_VZ;
302 if (config3 & MIPS_CONF3_SC) 320 if (config3 & MIPS_CONF3_SC)
303 c->options |= MIPS_CPU_SEGMENTS; 321 c->options |= MIPS_CPU_SEGMENTS;
322 if (config3 & MIPS_CONF3_MSA)
323 c->ases |= MIPS_ASE_MSA;
304 324
305 return config3 & MIPS_CONF_M; 325 return config3 & MIPS_CONF_M;
306} 326}
@@ -367,6 +387,9 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
367 config5 &= ~MIPS_CONF5_UFR; 387 config5 &= ~MIPS_CONF5_UFR;
368 write_c0_config5(config5); 388 write_c0_config5(config5);
369 389
390 if (config5 & MIPS_CONF5_EVA)
391 c->options |= MIPS_CPU_EVA;
392
370 return config5 & MIPS_CONF_M; 393 return config5 & MIPS_CONF_M;
371} 394}
372 395
@@ -398,8 +421,13 @@ static void decode_configs(struct cpuinfo_mips *c)
398 421
399 mips_probe_watch_registers(c); 422 mips_probe_watch_registers(c);
400 423
401 if (cpu_has_mips_r2) 424#ifndef CONFIG_MIPS_CPS
425 if (cpu_has_mips_r2) {
402 c->core = read_c0_ebase() & 0x3ff; 426 c->core = read_c0_ebase() & 0x3ff;
427 if (cpu_has_mipsmt)
428 c->core >>= fls(core_nvpes()) - 1;
429 }
430#endif
403} 431}
404 432
405#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ 433#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
@@ -710,17 +738,23 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
710 MIPS_CPU_LLSC; 738 MIPS_CPU_LLSC;
711 c->tlbsize = 64; 739 c->tlbsize = 64;
712 break; 740 break;
713 case PRID_IMP_LOONGSON2: 741 case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
714 c->cputype = CPU_LOONGSON2;
715 __cpu_name[cpu] = "ICT Loongson-2";
716
717 switch (c->processor_id & PRID_REV_MASK) { 742 switch (c->processor_id & PRID_REV_MASK) {
718 case PRID_REV_LOONGSON2E: 743 case PRID_REV_LOONGSON2E:
744 c->cputype = CPU_LOONGSON2;
745 __cpu_name[cpu] = "ICT Loongson-2";
719 set_elf_platform(cpu, "loongson2e"); 746 set_elf_platform(cpu, "loongson2e");
720 break; 747 break;
721 case PRID_REV_LOONGSON2F: 748 case PRID_REV_LOONGSON2F:
749 c->cputype = CPU_LOONGSON2;
750 __cpu_name[cpu] = "ICT Loongson-2";
722 set_elf_platform(cpu, "loongson2f"); 751 set_elf_platform(cpu, "loongson2f");
723 break; 752 break;
753 case PRID_REV_LOONGSON3A:
754 c->cputype = CPU_LOONGSON3;
755 __cpu_name[cpu] = "ICT Loongson-3";
756 set_elf_platform(cpu, "loongson3a");
757 break;
724 } 758 }
725 759
726 set_isa(c, MIPS_CPU_ISA_III); 760 set_isa(c, MIPS_CPU_ISA_III);
@@ -729,7 +763,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
729 MIPS_CPU_32FPR; 763 MIPS_CPU_32FPR;
730 c->tlbsize = 64; 764 c->tlbsize = 64;
731 break; 765 break;
732 case PRID_IMP_LOONGSON1: 766 case PRID_IMP_LOONGSON_32: /* Loongson-1 */
733 decode_configs(c); 767 decode_configs(c);
734 768
735 c->cputype = CPU_LOONGSON1; 769 c->cputype = CPU_LOONGSON1;
@@ -806,7 +840,7 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
806 __cpu_name[cpu] = "MIPS 1004Kc"; 840 __cpu_name[cpu] = "MIPS 1004Kc";
807 break; 841 break;
808 case PRID_IMP_1074K: 842 case PRID_IMP_1074K:
809 c->cputype = CPU_74K; 843 c->cputype = CPU_1074K;
810 __cpu_name[cpu] = "MIPS 1074Kc"; 844 __cpu_name[cpu] = "MIPS 1074Kc";
811 break; 845 break;
812 case PRID_IMP_INTERAPTIV_UP: 846 case PRID_IMP_INTERAPTIV_UP:
@@ -825,6 +859,14 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
825 c->cputype = CPU_PROAPTIV; 859 c->cputype = CPU_PROAPTIV;
826 __cpu_name[cpu] = "MIPS proAptiv (multi)"; 860 __cpu_name[cpu] = "MIPS proAptiv (multi)";
827 break; 861 break;
862 case PRID_IMP_P5600:
863 c->cputype = CPU_P5600;
864 __cpu_name[cpu] = "MIPS P5600";
865 break;
866 case PRID_IMP_M5150:
867 c->cputype = CPU_M5150;
868 __cpu_name[cpu] = "MIPS M5150";
869 break;
828 } 870 }
829 871
830 decode_configs(c); 872 decode_configs(c);
@@ -1176,6 +1218,12 @@ void cpu_probe(void)
1176 else 1218 else
1177 c->srsets = 1; 1219 c->srsets = 1;
1178 1220
1221 if (cpu_has_msa) {
1222 c->msa_id = cpu_get_msa_id();
1223 WARN(c->msa_id & MSA_IR_WRPF,
1224 "Vector register partitioning unimplemented!");
1225 }
1226
1179 cpu_probe_vmbits(c); 1227 cpu_probe_vmbits(c);
1180 1228
1181#ifdef CONFIG_64BIT 1229#ifdef CONFIG_64BIT
@@ -1192,4 +1240,6 @@ void cpu_report(void)
1192 smp_processor_id(), c->processor_id, cpu_name_string()); 1240 smp_processor_id(), c->processor_id, cpu_name_string());
1193 if (c->options & MIPS_CPU_FPU) 1241 if (c->options & MIPS_CPU_FPU)
1194 printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); 1242 printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
1243 if (cpu_has_msa)
1244 pr_info("MSA revision is: %08x\n", c->msa_id);
1195} 1245}
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 374ed74cd516..60e7e5e45af1 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -90,6 +90,7 @@ static inline void ftrace_dyn_arch_init_insns(void)
90static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 90static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
91{ 91{
92 int faulted; 92 int faulted;
93 mm_segment_t old_fs;
93 94
94 /* *(unsigned int *)ip = new_code; */ 95 /* *(unsigned int *)ip = new_code; */
95 safe_store_code(new_code, ip, faulted); 96 safe_store_code(new_code, ip, faulted);
@@ -97,7 +98,10 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
97 if (unlikely(faulted)) 98 if (unlikely(faulted))
98 return -EFAULT; 99 return -EFAULT;
99 100
101 old_fs = get_fs();
102 set_fs(get_ds());
100 flush_icache_range(ip, ip + 8); 103 flush_icache_range(ip, ip + 8);
104 set_fs(old_fs);
101 105
102 return 0; 106 return 0;
103} 107}
@@ -197,7 +201,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
197 return ftrace_modify_code(FTRACE_CALL_IP, new); 201 return ftrace_modify_code(FTRACE_CALL_IP, new);
198} 202}
199 203
200int __init ftrace_dyn_arch_init(void *data) 204int __init ftrace_dyn_arch_init(void)
201{ 205{
202 /* Encode the instructions when booting */ 206 /* Encode the instructions when booting */
203 ftrace_dyn_arch_init_insns(); 207 ftrace_dyn_arch_init_insns();
@@ -205,9 +209,6 @@ int __init ftrace_dyn_arch_init(void *data)
205 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ 209 /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
206 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); 210 ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
207 211
208 /* The return code is retured via data */
209 *(unsigned long *)data = 0;
210
211 return 0; 212 return 0;
212} 213}
213#endif /* CONFIG_DYNAMIC_FTRACE */ 214#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index d84f6a509502..a9ce3408be25 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -67,7 +67,7 @@ NESTED(except_vec3_generic, 0, sp)
67 */ 67 */
68NESTED(except_vec3_r4000, 0, sp) 68NESTED(except_vec3_r4000, 0, sp)
69 .set push 69 .set push
70 .set mips3 70 .set arch=r4000
71 .set noat 71 .set noat
72 mfc0 k1, CP0_CAUSE 72 mfc0 k1, CP0_CAUSE
73 li k0, 31<<2 73 li k0, 31<<2
@@ -139,7 +139,7 @@ LEAF(__r4k_wait)
139 nop 139 nop
140 nop 140 nop
141#endif 141#endif
142 .set mips3 142 .set arch=r4000
143 wait 143 wait
144 /* end of rollback region (the region size must be power of two) */ 144 /* end of rollback region (the region size must be power of two) */
1451: 1451:
@@ -475,8 +475,10 @@ NESTED(nmi_handler, PT_SIZE, sp)
475 BUILD_HANDLER cpu cpu sti silent /* #11 */ 475 BUILD_HANDLER cpu cpu sti silent /* #11 */
476 BUILD_HANDLER ov ov sti silent /* #12 */ 476 BUILD_HANDLER ov ov sti silent /* #12 */
477 BUILD_HANDLER tr tr sti silent /* #13 */ 477 BUILD_HANDLER tr tr sti silent /* #13 */
478 BUILD_HANDLER msa_fpe msa_fpe sti silent /* #14 */
478 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 479 BUILD_HANDLER fpe fpe fpe silent /* #15 */
479 BUILD_HANDLER ftlb ftlb none silent /* #16 */ 480 BUILD_HANDLER ftlb ftlb none silent /* #16 */
481 BUILD_HANDLER msa msa sti silent /* #21 */
480 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 482 BUILD_HANDLER mdmx mdmx sti silent /* #22 */
481#ifdef CONFIG_HARDWARE_WATCHPOINTS 483#ifdef CONFIG_HARDWARE_WATCHPOINTS
482 /* 484 /*
@@ -575,7 +577,7 @@ isrdhwr:
575 ori k1, _THREAD_MASK 577 ori k1, _THREAD_MASK
576 xori k1, _THREAD_MASK 578 xori k1, _THREAD_MASK
577 LONG_L v1, TI_TP_VALUE(k1) 579 LONG_L v1, TI_TP_VALUE(k1)
578 .set mips3 580 .set arch=r4000
579 eret 581 eret
580 .set mips0 582 .set mips0
581#endif 583#endif
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 7b6a5b3e3acf..e712dcf18b2d 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -175,8 +175,8 @@ NESTED(smp_bootstrap, 16, sp)
175 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */ 175 DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
176 jal mips_ihb 176 jal mips_ihb
177#endif /* CONFIG_MIPS_MT_SMTC */ 177#endif /* CONFIG_MIPS_MT_SMTC */
178 setup_c0_status_sec
179 smp_slave_setup 178 smp_slave_setup
179 setup_c0_status_sec
180#ifdef CONFIG_MIPS_MT_SMTC 180#ifdef CONFIG_MIPS_MT_SMTC
181 andi t2, t2, VPECONTROL_TE 181 andi t2, t2, VPECONTROL_TE
182 beqz t2, 2f 182 beqz t2, 2f
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 3553243bf9d6..837ff27950bc 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -64,7 +64,7 @@ void r4k_wait_irqoff(void)
64 if (!need_resched()) 64 if (!need_resched())
65 __asm__( 65 __asm__(
66 " .set push \n" 66 " .set push \n"
67 " .set mips3 \n" 67 " .set arch=r4000 \n"
68 " wait \n" 68 " wait \n"
69 " .set pop \n"); 69 " .set pop \n");
70 local_irq_enable(); 70 local_irq_enable();
@@ -82,7 +82,7 @@ static void rm7k_wait_irqoff(void)
82 if (!need_resched()) 82 if (!need_resched())
83 __asm__( 83 __asm__(
84 " .set push \n" 84 " .set push \n"
85 " .set mips3 \n" 85 " .set arch=r4000 \n"
86 " .set noat \n" 86 " .set noat \n"
87 " mfc0 $1, $12 \n" 87 " mfc0 $1, $12 \n"
88 " sync \n" 88 " sync \n"
@@ -103,7 +103,7 @@ static void au1k_wait(void)
103 unsigned long c0status = read_c0_status() | 1; /* irqs on */ 103 unsigned long c0status = read_c0_status() | 1; /* irqs on */
104 104
105 __asm__( 105 __asm__(
106 " .set mips3 \n" 106 " .set arch=r4000 \n"
107 " cache 0x14, 0(%0) \n" 107 " cache 0x14, 0(%0) \n"
108 " cache 0x14, 32(%0) \n" 108 " cache 0x14, 32(%0) \n"
109 " sync \n" 109 " sync \n"
@@ -184,8 +184,11 @@ void __init check_wait(void)
184 case CPU_24K: 184 case CPU_24K:
185 case CPU_34K: 185 case CPU_34K:
186 case CPU_1004K: 186 case CPU_1004K:
187 case CPU_1074K:
187 case CPU_INTERAPTIV: 188 case CPU_INTERAPTIV:
188 case CPU_PROAPTIV: 189 case CPU_PROAPTIV:
190 case CPU_P5600:
191 case CPU_M5150:
189 cpu_wait = r4k_wait; 192 cpu_wait = r4k_wait;
190 if (read_c0_config7() & MIPS_CONF7_WII) 193 if (read_c0_config7() & MIPS_CONF7_WII)
191 cpu_wait = r4k_wait_irqoff; 194 cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 5b5ddb231f26..8520dad6d4e3 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -16,7 +16,6 @@
16#include <asm/gic.h> 16#include <asm/gic.h>
17#include <asm/setup.h> 17#include <asm/setup.h>
18#include <asm/traps.h> 18#include <asm/traps.h>
19#include <asm/gcmpregs.h>
20#include <linux/hardirq.h> 19#include <linux/hardirq.h>
21#include <asm-generic/bitops/find.h> 20#include <asm-generic/bitops/find.h>
22 21
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index fcaac2f132f0..7afcc2f22c0d 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -32,6 +32,7 @@
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/processor.h> 33#include <asm/processor.h>
34#include <asm/sigcontext.h> 34#include <asm/sigcontext.h>
35#include <asm/uaccess.h>
35 36
36static struct hard_trap_info { 37static struct hard_trap_info {
37 unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ 38 unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@@ -208,7 +209,14 @@ void arch_kgdb_breakpoint(void)
208 209
209static void kgdb_call_nmi_hook(void *ignored) 210static void kgdb_call_nmi_hook(void *ignored)
210{ 211{
212 mm_segment_t old_fs;
213
214 old_fs = get_fs();
215 set_fs(get_ds());
216
211 kgdb_nmicallback(raw_smp_processor_id(), NULL); 217 kgdb_nmicallback(raw_smp_processor_id(), NULL);
218
219 set_fs(old_fs);
212} 220}
213 221
214void kgdb_roundup_cpus(unsigned long flags) 222void kgdb_roundup_cpus(unsigned long flags)
@@ -282,6 +290,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
282 struct die_args *args = (struct die_args *)ptr; 290 struct die_args *args = (struct die_args *)ptr;
283 struct pt_regs *regs = args->regs; 291 struct pt_regs *regs = args->regs;
284 int trap = (regs->cp0_cause & 0x7c) >> 2; 292 int trap = (regs->cp0_cause & 0x7c) >> 2;
293 mm_segment_t old_fs;
285 294
286#ifdef CONFIG_KPROBES 295#ifdef CONFIG_KPROBES
287 /* 296 /*
@@ -296,11 +305,17 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
296 if (user_mode(regs)) 305 if (user_mode(regs))
297 return NOTIFY_DONE; 306 return NOTIFY_DONE;
298 307
308 /* Kernel mode. Set correct address limit */
309 old_fs = get_fs();
310 set_fs(get_ds());
311
299 if (atomic_read(&kgdb_active) != -1) 312 if (atomic_read(&kgdb_active) != -1)
300 kgdb_nmicallback(smp_processor_id(), regs); 313 kgdb_nmicallback(smp_processor_id(), regs);
301 314
302 if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) 315 if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
316 set_fs(old_fs);
303 return NOTIFY_DONE; 317 return NOTIFY_DONE;
318 }
304 319
305 if (atomic_read(&kgdb_setting_breakpoint)) 320 if (atomic_read(&kgdb_setting_breakpoint))
306 if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) 321 if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
@@ -310,6 +325,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
310 local_irq_enable(); 325 local_irq_enable();
311 __flush_cache_all(); 326 __flush_cache_all();
312 327
328 set_fs(old_fs);
313 return NOTIFY_STOP; 329 return NOTIFY_STOP;
314} 330}
315 331
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644
index 000000000000..f76f7a08412d
--- /dev/null
+++ b/arch/mips/kernel/mips-cm.c
@@ -0,0 +1,121 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/errno.h>
12
13#include <asm/mips-cm.h>
14#include <asm/mipsregs.h>
15
16void __iomem *mips_cm_base;
17void __iomem *mips_cm_l2sync_base;
18
19phys_t __mips_cm_phys_base(void)
20{
21 u32 config3 = read_c0_config3();
22 u32 cmgcr;
23
24 /* Check the CMGCRBase register is implemented */
25 if (!(config3 & MIPS_CONF3_CMGCR))
26 return 0;
27
28 /* Read the address from CMGCRBase */
29 cmgcr = read_c0_cmgcrbase();
30 return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
31}
32
33phys_t mips_cm_phys_base(void)
34 __attribute__((weak, alias("__mips_cm_phys_base")));
35
36phys_t __mips_cm_l2sync_phys_base(void)
37{
38 u32 base_reg;
39
40 /*
41 * If the L2-only sync region is already enabled then leave it at it's
42 * current location.
43 */
44 base_reg = read_gcr_l2_only_sync_base();
45 if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK)
46 return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK;
47
48 /* Default to following the CM */
49 return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
50}
51
52phys_t mips_cm_l2sync_phys_base(void)
53 __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
54
55static void mips_cm_probe_l2sync(void)
56{
57 unsigned major_rev;
58 phys_t addr;
59
60 /* L2-only sync was introduced with CM major revision 6 */
61 major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
62 CM_GCR_REV_MAJOR_SHF;
63 if (major_rev < 6)
64 return;
65
66 /* Find a location for the L2 sync region */
67 addr = mips_cm_l2sync_phys_base();
68 BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE_MSK) != addr);
69 if (!addr)
70 return;
71
72 /* Set the region base address & enable it */
73 write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN_MSK);
74
75 /* Map the region */
76 mips_cm_l2sync_base = ioremap_nocache(addr, MIPS_CM_L2SYNC_SIZE);
77}
78
79int mips_cm_probe(void)
80{
81 phys_t addr;
82 u32 base_reg;
83
84 addr = mips_cm_phys_base();
85 BUG_ON((addr & CM_GCR_BASE_GCRBASE_MSK) != addr);
86 if (!addr)
87 return -ENODEV;
88
89 mips_cm_base = ioremap_nocache(addr, MIPS_CM_GCR_SIZE);
90 if (!mips_cm_base)
91 return -ENXIO;
92
93 /* sanity check that we're looking at a CM */
94 base_reg = read_gcr_base();
95 if ((base_reg & CM_GCR_BASE_GCRBASE_MSK) != addr) {
96 pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
97 (unsigned long)addr);
98 mips_cm_base = NULL;
99 return -ENODEV;
100 }
101
102 /* set default target to memory */
103 base_reg &= ~CM_GCR_BASE_CMDEFTGT_MSK;
104 base_reg |= CM_GCR_BASE_CMDEFTGT_MEM;
105 write_gcr_base(base_reg);
106
107 /* disable CM regions */
108 write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
109 write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
110 write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
111 write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
112 write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
113 write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
114 write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR_MSK);
115 write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK_MSK);
116
117 /* probe for an L2-only sync region */
118 mips_cm_probe_l2sync();
119
120 return 0;
121}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644
index 000000000000..c9dc67402969
--- /dev/null
+++ b/arch/mips/kernel/mips-cpc.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/errno.h>
12
13#include <asm/mips-cm.h>
14#include <asm/mips-cpc.h>
15
16void __iomem *mips_cpc_base;
17
18phys_t __weak mips_cpc_phys_base(void)
19{
20 u32 cpc_base;
21
22 if (!mips_cm_present())
23 return 0;
24
25 if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX_MSK))
26 return 0;
27
28 /* If the CPC is already enabled, leave it so */
29 cpc_base = read_gcr_cpc_base();
30 if (cpc_base & CM_GCR_CPC_BASE_CPCEN_MSK)
31 return cpc_base & CM_GCR_CPC_BASE_CPCBASE_MSK;
32
33 /* Otherwise, give it the default address & enable it */
34 cpc_base = mips_cpc_default_phys_base();
35 write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN_MSK);
36 return cpc_base;
37}
38
39int mips_cpc_probe(void)
40{
41 phys_t addr;
42
43 addr = mips_cpc_phys_base();
44 if (!addr)
45 return -ENODEV;
46
47 mips_cpc_base = ioremap_nocache(addr, 0x8000);
48 if (!mips_cpc_base)
49 return -ENXIO;
50
51 return 0;
52}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 6e58e97fcd39..2607c3a4ff7e 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -16,12 +16,20 @@
16#include <asm/ftrace.h> 16#include <asm/ftrace.h>
17 17
18extern void *__bzero(void *__s, size_t __count); 18extern void *__bzero(void *__s, size_t __count);
19extern long __strncpy_from_kernel_nocheck_asm(char *__to,
20 const char *__from, long __len);
21extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
22 long __len);
19extern long __strncpy_from_user_nocheck_asm(char *__to, 23extern long __strncpy_from_user_nocheck_asm(char *__to,
20 const char *__from, long __len); 24 const char *__from, long __len);
21extern long __strncpy_from_user_asm(char *__to, const char *__from, 25extern long __strncpy_from_user_asm(char *__to, const char *__from,
22 long __len); 26 long __len);
27extern long __strlen_kernel_nocheck_asm(const char *s);
28extern long __strlen_kernel_asm(const char *s);
23extern long __strlen_user_nocheck_asm(const char *s); 29extern long __strlen_user_nocheck_asm(const char *s);
24extern long __strlen_user_asm(const char *s); 30extern long __strlen_user_asm(const char *s);
31extern long __strnlen_kernel_nocheck_asm(const char *s);
32extern long __strnlen_kernel_asm(const char *s);
25extern long __strnlen_user_nocheck_asm(const char *s); 33extern long __strnlen_user_nocheck_asm(const char *s);
26extern long __strnlen_user_asm(const char *s); 34extern long __strnlen_user_asm(const char *s);
27 35
@@ -43,17 +51,31 @@ EXPORT_SYMBOL(copy_page);
43 */ 51 */
44EXPORT_SYMBOL(__copy_user); 52EXPORT_SYMBOL(__copy_user);
45EXPORT_SYMBOL(__copy_user_inatomic); 53EXPORT_SYMBOL(__copy_user_inatomic);
54#ifdef CONFIG_EVA
55EXPORT_SYMBOL(__copy_from_user_eva);
56EXPORT_SYMBOL(__copy_in_user_eva);
57EXPORT_SYMBOL(__copy_to_user_eva);
58EXPORT_SYMBOL(__copy_user_inatomic_eva);
59#endif
46EXPORT_SYMBOL(__bzero); 60EXPORT_SYMBOL(__bzero);
61EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
62EXPORT_SYMBOL(__strncpy_from_kernel_asm);
47EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); 63EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
48EXPORT_SYMBOL(__strncpy_from_user_asm); 64EXPORT_SYMBOL(__strncpy_from_user_asm);
65EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
66EXPORT_SYMBOL(__strlen_kernel_asm);
49EXPORT_SYMBOL(__strlen_user_nocheck_asm); 67EXPORT_SYMBOL(__strlen_user_nocheck_asm);
50EXPORT_SYMBOL(__strlen_user_asm); 68EXPORT_SYMBOL(__strlen_user_asm);
69EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
70EXPORT_SYMBOL(__strnlen_kernel_asm);
51EXPORT_SYMBOL(__strnlen_user_nocheck_asm); 71EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
52EXPORT_SYMBOL(__strnlen_user_asm); 72EXPORT_SYMBOL(__strnlen_user_asm);
53 73
54EXPORT_SYMBOL(csum_partial); 74EXPORT_SYMBOL(csum_partial);
55EXPORT_SYMBOL(csum_partial_copy_nocheck); 75EXPORT_SYMBOL(csum_partial_copy_nocheck);
56EXPORT_SYMBOL(__csum_partial_copy_user); 76EXPORT_SYMBOL(__csum_partial_copy_kernel);
77EXPORT_SYMBOL(__csum_partial_copy_to_user);
78EXPORT_SYMBOL(__csum_partial_copy_from_user);
57 79
58EXPORT_SYMBOL(invalid_pte_table); 80EXPORT_SYMBOL(invalid_pte_table);
59#ifdef CONFIG_FUNCTION_TRACER 81#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 24cdf64789c3..4f2d9dece7ab 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -805,7 +805,7 @@ static void reset_counters(void *arg)
805 } 805 }
806} 806}
807 807
808/* 24K/34K/1004K cores can share the same event map. */ 808/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
809static const struct mips_perf_event mipsxxcore_event_map 809static const struct mips_perf_event mipsxxcore_event_map
810 [PERF_COUNT_HW_MAX] = { 810 [PERF_COUNT_HW_MAX] = {
811 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 811 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
@@ -814,8 +814,8 @@ static const struct mips_perf_event mipsxxcore_event_map
814 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, 814 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
815}; 815};
816 816
817/* 74K core has different branch event code. */ 817/* 74K/proAptiv core has different branch event code. */
818static const struct mips_perf_event mipsxx74Kcore_event_map 818static const struct mips_perf_event mipsxxcore_event_map2
819 [PERF_COUNT_HW_MAX] = { 819 [PERF_COUNT_HW_MAX] = {
820 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 820 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
821 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, 821 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
@@ -849,7 +849,7 @@ static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
849 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ 849 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
850}; 850};
851 851
852/* 24K/34K/1004K cores can share the same cache event map. */ 852/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
853static const struct mips_perf_event mipsxxcore_cache_map 853static const struct mips_perf_event mipsxxcore_cache_map
854 [PERF_COUNT_HW_CACHE_MAX] 854 [PERF_COUNT_HW_CACHE_MAX]
855 [PERF_COUNT_HW_CACHE_OP_MAX] 855 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -930,8 +930,8 @@ static const struct mips_perf_event mipsxxcore_cache_map
930}, 930},
931}; 931};
932 932
933/* 74K core has completely different cache event map. */ 933/* 74K/proAptiv core has completely different cache event map. */
934static const struct mips_perf_event mipsxx74Kcore_cache_map 934static const struct mips_perf_event mipsxxcore_cache_map2
935 [PERF_COUNT_HW_CACHE_MAX] 935 [PERF_COUNT_HW_CACHE_MAX]
936 [PERF_COUNT_HW_CACHE_OP_MAX] 936 [PERF_COUNT_HW_CACHE_OP_MAX]
937 [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 937 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -978,6 +978,11 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
978 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, 978 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
979 }, 979 },
980}, 980},
981/*
982 * 74K core does not have specific DTLB events. proAptiv core has
983 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
984 * not included here. One can use raw events if really needed.
985 */
981[C(ITLB)] = { 986[C(ITLB)] = {
982 [C(OP_READ)] = { 987 [C(OP_READ)] = {
983 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, 988 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
@@ -1378,6 +1383,10 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1378#define IS_BOTH_COUNTERS_74K_EVENT(b) \ 1383#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1379 ((b) == 0 || (b) == 1) 1384 ((b) == 0 || (b) == 1)
1380 1385
1386/* proAptiv */
1387#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
1388 ((b) == 0 || (b) == 1)
1389
1381/* 1004K */ 1390/* 1004K */
1382#define IS_BOTH_COUNTERS_1004K_EVENT(b) \ 1391#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1383 ((b) == 0 || (b) == 1 || (b) == 11) 1392 ((b) == 0 || (b) == 1 || (b) == 11)
@@ -1391,6 +1400,20 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1391#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) 1400#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1392#endif 1401#endif
1393 1402
1403/* interAptiv */
1404#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
1405 ((b) == 0 || (b) == 1 || (b) == 11)
1406#ifdef CONFIG_MIPS_MT_SMP
1407/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1408#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
1409 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1410 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
1411 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
1412 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
1413 ((b) >= 64 && (b) <= 67))
1414#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
1415#endif
1416
1394/* BMIPS5000 */ 1417/* BMIPS5000 */
1395#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \ 1418#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1396 ((b) == 0 || (b) == 1) 1419 ((b) == 0 || (b) == 1)
@@ -1442,6 +1465,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1442#endif 1465#endif
1443 break; 1466 break;
1444 case CPU_74K: 1467 case CPU_74K:
1468 case CPU_1074K:
1445 if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) 1469 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1446 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1470 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1447 else 1471 else
@@ -1451,6 +1475,16 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1451 raw_event.range = P; 1475 raw_event.range = P;
1452#endif 1476#endif
1453 break; 1477 break;
1478 case CPU_PROAPTIV:
1479 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1480 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1481 else
1482 raw_event.cntr_mask =
1483 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1484#ifdef CONFIG_MIPS_MT_SMP
1485 raw_event.range = P;
1486#endif
1487 break;
1454 case CPU_1004K: 1488 case CPU_1004K:
1455 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) 1489 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1456 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1490 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1466,6 +1500,21 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1466 raw_event.range = T; 1500 raw_event.range = T;
1467#endif 1501#endif
1468 break; 1502 break;
1503 case CPU_INTERAPTIV:
1504 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1505 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1506 else
1507 raw_event.cntr_mask =
1508 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1509#ifdef CONFIG_MIPS_MT_SMP
1510 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1511 raw_event.range = P;
1512 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1513 raw_event.range = V;
1514 else
1515 raw_event.range = T;
1516#endif
1517 break;
1469 case CPU_BMIPS5000: 1518 case CPU_BMIPS5000:
1470 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id)) 1519 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1471 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; 1520 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1576,14 +1625,29 @@ init_hw_perf_events(void)
1576 break; 1625 break;
1577 case CPU_74K: 1626 case CPU_74K:
1578 mipspmu.name = "mips/74K"; 1627 mipspmu.name = "mips/74K";
1579 mipspmu.general_event_map = &mipsxx74Kcore_event_map; 1628 mipspmu.general_event_map = &mipsxxcore_event_map2;
1580 mipspmu.cache_event_map = &mipsxx74Kcore_cache_map; 1629 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1630 break;
1631 case CPU_PROAPTIV:
1632 mipspmu.name = "mips/proAptiv";
1633 mipspmu.general_event_map = &mipsxxcore_event_map2;
1634 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1581 break; 1635 break;
1582 case CPU_1004K: 1636 case CPU_1004K:
1583 mipspmu.name = "mips/1004K"; 1637 mipspmu.name = "mips/1004K";
1584 mipspmu.general_event_map = &mipsxxcore_event_map; 1638 mipspmu.general_event_map = &mipsxxcore_event_map;
1585 mipspmu.cache_event_map = &mipsxxcore_cache_map; 1639 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1586 break; 1640 break;
1641 case CPU_1074K:
1642 mipspmu.name = "mips/1074K";
1643 mipspmu.general_event_map = &mipsxxcore_event_map;
1644 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1645 break;
1646 case CPU_INTERAPTIV:
1647 mipspmu.name = "mips/interAptiv";
1648 mipspmu.general_event_map = &mipsxxcore_event_map;
1649 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1650 break;
1587 case CPU_LOONGSON1: 1651 case CPU_LOONGSON1:
1588 mipspmu.name = "mips/loongson1"; 1652 mipspmu.name = "mips/loongson1";
1589 mipspmu.general_event_map = &mipsxxcore_event_map; 1653 mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 00d20974b3e7..e40971b51d2f 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -17,8 +17,24 @@
17 17
18unsigned int vced_count, vcei_count; 18unsigned int vced_count, vcei_count;
19 19
20/*
21 * * No lock; only written during early bootup by CPU 0.
22 * */
23static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
24
25int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
26{
27 return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
28}
29
30int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
31{
32 return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
33}
34
20static int show_cpuinfo(struct seq_file *m, void *v) 35static int show_cpuinfo(struct seq_file *m, void *v)
21{ 36{
37 struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
22 unsigned long n = (unsigned long) v - 1; 38 unsigned long n = (unsigned long) v - 1;
23 unsigned int version = cpu_data[n].processor_id; 39 unsigned int version = cpu_data[n].processor_id;
24 unsigned int fp_vers = cpu_data[n].fpu_id; 40 unsigned int fp_vers = cpu_data[n].fpu_id;
@@ -95,6 +111,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
95 if (cpu_has_mipsmt) seq_printf(m, "%s", " mt"); 111 if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
96 if (cpu_has_mmips) seq_printf(m, "%s", " micromips"); 112 if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
97 if (cpu_has_vz) seq_printf(m, "%s", " vz"); 113 if (cpu_has_vz) seq_printf(m, "%s", " vz");
114 if (cpu_has_msa) seq_printf(m, "%s", " msa");
115 if (cpu_has_eva) seq_printf(m, "%s", " eva");
98 seq_printf(m, "\n"); 116 seq_printf(m, "\n");
99 117
100 if (cpu_has_mmips) { 118 if (cpu_has_mmips) {
@@ -118,6 +136,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
118 cpu_has_vce ? "%u" : "not available"); 136 cpu_has_vce ? "%u" : "not available");
119 seq_printf(m, fmt, 'D', vced_count); 137 seq_printf(m, fmt, 'D', vced_count);
120 seq_printf(m, fmt, 'I', vcei_count); 138 seq_printf(m, fmt, 'I', vcei_count);
139
140 proc_cpuinfo_notifier_args.m = m;
141 proc_cpuinfo_notifier_args.n = n;
142
143 raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
144 &proc_cpuinfo_notifier_args);
145
121 seq_printf(m, "\n"); 146 seq_printf(m, "\n");
122 147
123 return 0; 148 return 0;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 6ae540e133b2..60e39dc7f1eb 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
32#include <asm/cpu.h> 32#include <asm/cpu.h>
33#include <asm/dsp.h> 33#include <asm/dsp.h>
34#include <asm/fpu.h> 34#include <asm/fpu.h>
35#include <asm/msa.h>
35#include <asm/pgtable.h> 36#include <asm/pgtable.h>
36#include <asm/mipsregs.h> 37#include <asm/mipsregs.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
@@ -65,6 +66,8 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
65 clear_used_math(); 66 clear_used_math();
66 clear_fpu_owner(); 67 clear_fpu_owner();
67 init_dsp(); 68 init_dsp();
69 clear_thread_flag(TIF_MSA_CTX_LIVE);
70 disable_msa();
68 regs->cp0_epc = pc; 71 regs->cp0_epc = pc;
69 regs->regs[29] = sp; 72 regs->regs[29] = sp;
70} 73}
@@ -89,7 +92,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
89 92
90 preempt_disable(); 93 preempt_disable();
91 94
92 if (is_fpu_owner()) 95 if (is_msa_enabled())
96 save_msa(p);
97 else if (is_fpu_owner())
93 save_fp(p); 98 save_fp(p);
94 99
95 if (cpu_has_dsp) 100 if (cpu_has_dsp)
@@ -157,7 +162,13 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
157/* Fill in the fpu structure for a core dump.. */ 162/* Fill in the fpu structure for a core dump.. */
158int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r) 163int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
159{ 164{
160 memcpy(r, &current->thread.fpu, sizeof(current->thread.fpu)); 165 int i;
166
167 for (i = 0; i < NUM_FPU_REGS; i++)
168 memcpy(&r[i], &current->thread.fpu.fpr[i], sizeof(*r));
169
170 memcpy(&r[NUM_FPU_REGS], &current->thread.fpu.fcr31,
171 sizeof(current->thread.fpu.fcr31));
161 172
162 return 1; 173 return 1;
163} 174}
@@ -192,7 +203,13 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
192 203
193int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr) 204int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
194{ 205{
195 memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu)); 206 int i;
207
208 for (i = 0; i < NUM_FPU_REGS; i++)
209 memcpy(&fpr[i], &t->thread.fpu.fpr[i], sizeof(*fpr));
210
211 memcpy(&fpr[NUM_FPU_REGS], &t->thread.fpu.fcr31,
212 sizeof(t->thread.fpu.fcr31));
196 213
197 return 1; 214 return 1;
198} 215}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 7da9b76db4d9..71f85f427034 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -114,51 +114,30 @@ int ptrace_setregs(struct task_struct *child, __s64 __user *data)
114int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) 114int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
115{ 115{
116 int i; 116 int i;
117 unsigned int tmp;
118 117
119 if (!access_ok(VERIFY_WRITE, data, 33 * 8)) 118 if (!access_ok(VERIFY_WRITE, data, 33 * 8))
120 return -EIO; 119 return -EIO;
121 120
122 if (tsk_used_math(child)) { 121 if (tsk_used_math(child)) {
123 fpureg_t *fregs = get_fpu_regs(child); 122 union fpureg *fregs = get_fpu_regs(child);
124 for (i = 0; i < 32; i++) 123 for (i = 0; i < 32; i++)
125 __put_user(fregs[i], i + (__u64 __user *) data); 124 __put_user(get_fpr64(&fregs[i], 0),
125 i + (__u64 __user *)data);
126 } else { 126 } else {
127 for (i = 0; i < 32; i++) 127 for (i = 0; i < 32; i++)
128 __put_user((__u64) -1, i + (__u64 __user *) data); 128 __put_user((__u64) -1, i + (__u64 __user *) data);
129 } 129 }
130 130
131 __put_user(child->thread.fpu.fcr31, data + 64); 131 __put_user(child->thread.fpu.fcr31, data + 64);
132 132 __put_user(current_cpu_data.fpu_id, data + 65);
133 preempt_disable();
134 if (cpu_has_fpu) {
135 unsigned int flags;
136
137 if (cpu_has_mipsmt) {
138 unsigned int vpflags = dvpe();
139 flags = read_c0_status();
140 __enable_fpu(FPU_AS_IS);
141 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
142 write_c0_status(flags);
143 evpe(vpflags);
144 } else {
145 flags = read_c0_status();
146 __enable_fpu(FPU_AS_IS);
147 __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
148 write_c0_status(flags);
149 }
150 } else {
151 tmp = 0;
152 }
153 preempt_enable();
154 __put_user(tmp, data + 65);
155 133
156 return 0; 134 return 0;
157} 135}
158 136
159int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) 137int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
160{ 138{
161 fpureg_t *fregs; 139 union fpureg *fregs;
140 u64 fpr_val;
162 int i; 141 int i;
163 142
164 if (!access_ok(VERIFY_READ, data, 33 * 8)) 143 if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -166,8 +145,10 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
166 145
167 fregs = get_fpu_regs(child); 146 fregs = get_fpu_regs(child);
168 147
169 for (i = 0; i < 32; i++) 148 for (i = 0; i < 32; i++) {
170 __get_user(fregs[i], i + (__u64 __user *) data); 149 __get_user(fpr_val, i + (__u64 __user *)data);
150 set_fpr64(&fregs[i], 0, fpr_val);
151 }
171 152
172 __get_user(child->thread.fpu.fcr31, data + 64); 153 __get_user(child->thread.fpu.fcr31, data + 64);
173 154
@@ -300,10 +281,27 @@ static int fpr_get(struct task_struct *target,
300 unsigned int pos, unsigned int count, 281 unsigned int pos, unsigned int count,
301 void *kbuf, void __user *ubuf) 282 void *kbuf, void __user *ubuf)
302{ 283{
303 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, 284 unsigned i;
304 &target->thread.fpu, 285 int err;
305 0, sizeof(elf_fpregset_t)); 286 u64 fpr_val;
287
306 /* XXX fcr31 */ 288 /* XXX fcr31 */
289
290 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
291 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
292 &target->thread.fpu,
293 0, sizeof(elf_fpregset_t));
294
295 for (i = 0; i < NUM_FPU_REGS; i++) {
296 fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
297 err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
298 &fpr_val, i * sizeof(elf_fpreg_t),
299 (i + 1) * sizeof(elf_fpreg_t));
300 if (err)
301 return err;
302 }
303
304 return 0;
307} 305}
308 306
309static int fpr_set(struct task_struct *target, 307static int fpr_set(struct task_struct *target,
@@ -311,10 +309,27 @@ static int fpr_set(struct task_struct *target,
311 unsigned int pos, unsigned int count, 309 unsigned int pos, unsigned int count,
312 const void *kbuf, const void __user *ubuf) 310 const void *kbuf, const void __user *ubuf)
313{ 311{
314 return user_regset_copyin(&pos, &count, &kbuf, &ubuf, 312 unsigned i;
315 &target->thread.fpu, 313 int err;
316 0, sizeof(elf_fpregset_t)); 314 u64 fpr_val;
315
317 /* XXX fcr31 */ 316 /* XXX fcr31 */
317
318 if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
319 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
320 &target->thread.fpu,
321 0, sizeof(elf_fpregset_t));
322
323 for (i = 0; i < NUM_FPU_REGS; i++) {
324 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
325 &fpr_val, i * sizeof(elf_fpreg_t),
326 (i + 1) * sizeof(elf_fpreg_t));
327 if (err)
328 return err;
329 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
330 }
331
332 return 0;
318} 333}
319 334
320enum mips_regset { 335enum mips_regset {
@@ -408,7 +423,7 @@ long arch_ptrace(struct task_struct *child, long request,
408 /* Read the word at location addr in the USER area. */ 423 /* Read the word at location addr in the USER area. */
409 case PTRACE_PEEKUSR: { 424 case PTRACE_PEEKUSR: {
410 struct pt_regs *regs; 425 struct pt_regs *regs;
411 fpureg_t *fregs; 426 union fpureg *fregs;
412 unsigned long tmp = 0; 427 unsigned long tmp = 0;
413 428
414 regs = task_pt_regs(child); 429 regs = task_pt_regs(child);
@@ -433,14 +448,12 @@ long arch_ptrace(struct task_struct *child, long request,
433 * order bits of the values stored in the even 448 * order bits of the values stored in the even
434 * registers - unless we're using r2k_switch.S. 449 * registers - unless we're using r2k_switch.S.
435 */ 450 */
436 if (addr & 1) 451 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
437 tmp = fregs[(addr & ~1) - 32] >> 32; 452 addr & 1);
438 else
439 tmp = fregs[addr - 32];
440 break; 453 break;
441 } 454 }
442#endif 455#endif
443 tmp = fregs[addr - FPR_BASE]; 456 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
444 break; 457 break;
445 case PC: 458 case PC:
446 tmp = regs->cp0_epc; 459 tmp = regs->cp0_epc;
@@ -465,44 +478,10 @@ long arch_ptrace(struct task_struct *child, long request,
465 case FPC_CSR: 478 case FPC_CSR:
466 tmp = child->thread.fpu.fcr31; 479 tmp = child->thread.fpu.fcr31;
467 break; 480 break;
468 case FPC_EIR: { /* implementation / version register */ 481 case FPC_EIR:
469 unsigned int flags; 482 /* implementation / version register */
470#ifdef CONFIG_MIPS_MT_SMTC 483 tmp = current_cpu_data.fpu_id;
471 unsigned long irqflags;
472 unsigned int mtflags;
473#endif /* CONFIG_MIPS_MT_SMTC */
474
475 preempt_disable();
476 if (!cpu_has_fpu) {
477 preempt_enable();
478 break;
479 }
480
481#ifdef CONFIG_MIPS_MT_SMTC
482 /* Read-modify-write of Status must be atomic */
483 local_irq_save(irqflags);
484 mtflags = dmt();
485#endif /* CONFIG_MIPS_MT_SMTC */
486 if (cpu_has_mipsmt) {
487 unsigned int vpflags = dvpe();
488 flags = read_c0_status();
489 __enable_fpu(FPU_AS_IS);
490 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
491 write_c0_status(flags);
492 evpe(vpflags);
493 } else {
494 flags = read_c0_status();
495 __enable_fpu(FPU_AS_IS);
496 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
497 write_c0_status(flags);
498 }
499#ifdef CONFIG_MIPS_MT_SMTC
500 emt(mtflags);
501 local_irq_restore(irqflags);
502#endif /* CONFIG_MIPS_MT_SMTC */
503 preempt_enable();
504 break; 484 break;
505 }
506 case DSP_BASE ... DSP_BASE + 5: { 485 case DSP_BASE ... DSP_BASE + 5: {
507 dspreg_t *dregs; 486 dspreg_t *dregs;
508 487
@@ -548,7 +527,7 @@ long arch_ptrace(struct task_struct *child, long request,
548 regs->regs[addr] = data; 527 regs->regs[addr] = data;
549 break; 528 break;
550 case FPR_BASE ... FPR_BASE + 31: { 529 case FPR_BASE ... FPR_BASE + 31: {
551 fpureg_t *fregs = get_fpu_regs(child); 530 union fpureg *fregs = get_fpu_regs(child);
552 531
553 if (!tsk_used_math(child)) { 532 if (!tsk_used_math(child)) {
554 /* FP not yet used */ 533 /* FP not yet used */
@@ -563,19 +542,12 @@ long arch_ptrace(struct task_struct *child, long request,
563 * order bits of the values stored in the even 542 * order bits of the values stored in the even
564 * registers - unless we're using r2k_switch.S. 543 * registers - unless we're using r2k_switch.S.
565 */ 544 */
566 if (addr & 1) { 545 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
567 fregs[(addr & ~1) - FPR_BASE] &= 546 addr & 1, data);
568 0xffffffff;
569 fregs[(addr & ~1) - FPR_BASE] |=
570 ((u64)data) << 32;
571 } else {
572 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
573 fregs[addr - FPR_BASE] |= data;
574 }
575 break; 547 break;
576 } 548 }
577#endif 549#endif
578 fregs[addr - FPR_BASE] = data; 550 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
579 break; 551 break;
580 } 552 }
581 case PC: 553 case PC:
@@ -662,13 +634,13 @@ long arch_ptrace(struct task_struct *child, long request,
662 * Notification of system call entry/exit 634 * Notification of system call entry/exit
663 * - triggered by current->work.syscall_trace 635 * - triggered by current->work.syscall_trace
664 */ 636 */
665asmlinkage void syscall_trace_enter(struct pt_regs *regs) 637asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
666{ 638{
667 long ret = 0; 639 long ret = 0;
668 user_exit(); 640 user_exit();
669 641
670 /* do the secure computing check first */ 642 if (secure_computing(syscall) == -1)
671 secure_computing_strict(regs->regs[2]); 643 return -1;
672 644
673 if (test_thread_flag(TIF_SYSCALL_TRACE) && 645 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
674 tracehook_report_syscall_entry(regs)) 646 tracehook_report_syscall_entry(regs))
@@ -677,10 +649,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
677 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 649 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
678 trace_sys_enter(regs, regs->regs[2]); 650 trace_sys_enter(regs, regs->regs[2]);
679 651
680 audit_syscall_entry(__syscall_get_arch(), 652 audit_syscall_entry(syscall_get_arch(),
681 regs->regs[2], 653 syscall,
682 regs->regs[4], regs->regs[5], 654 regs->regs[4], regs->regs[5],
683 regs->regs[6], regs->regs[7]); 655 regs->regs[6], regs->regs[7]);
656 return syscall;
684} 657}
685 658
686/* 659/*
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index b8aa2dd5b00b..b40c3ca60ee5 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -80,7 +80,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
80 /* Read the word at location addr in the USER area. */ 80 /* Read the word at location addr in the USER area. */
81 case PTRACE_PEEKUSR: { 81 case PTRACE_PEEKUSR: {
82 struct pt_regs *regs; 82 struct pt_regs *regs;
83 fpureg_t *fregs; 83 union fpureg *fregs;
84 unsigned int tmp; 84 unsigned int tmp;
85 85
86 regs = task_pt_regs(child); 86 regs = task_pt_regs(child);
@@ -103,13 +103,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
103 * order bits of the values stored in the even 103 * order bits of the values stored in the even
104 * registers - unless we're using r2k_switch.S. 104 * registers - unless we're using r2k_switch.S.
105 */ 105 */
106 if (addr & 1) 106 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
107 tmp = fregs[(addr & ~1) - 32] >> 32; 107 addr & 1);
108 else
109 tmp = fregs[addr - 32];
110 break; 108 break;
111 } 109 }
112 tmp = fregs[addr - FPR_BASE]; 110 tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
113 break; 111 break;
114 case PC: 112 case PC:
115 tmp = regs->cp0_epc; 113 tmp = regs->cp0_epc;
@@ -129,46 +127,10 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
129 case FPC_CSR: 127 case FPC_CSR:
130 tmp = child->thread.fpu.fcr31; 128 tmp = child->thread.fpu.fcr31;
131 break; 129 break;
132 case FPC_EIR: { /* implementation / version register */ 130 case FPC_EIR:
133 unsigned int flags; 131 /* implementation / version register */
134#ifdef CONFIG_MIPS_MT_SMTC 132 tmp = current_cpu_data.fpu_id;
135 unsigned int irqflags;
136 unsigned int mtflags;
137#endif /* CONFIG_MIPS_MT_SMTC */
138
139 preempt_disable();
140 if (!cpu_has_fpu) {
141 preempt_enable();
142 tmp = 0;
143 break;
144 }
145
146#ifdef CONFIG_MIPS_MT_SMTC
147 /* Read-modify-write of Status must be atomic */
148 local_irq_save(irqflags);
149 mtflags = dmt();
150#endif /* CONFIG_MIPS_MT_SMTC */
151
152 if (cpu_has_mipsmt) {
153 unsigned int vpflags = dvpe();
154 flags = read_c0_status();
155 __enable_fpu(FPU_AS_IS);
156 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
157 write_c0_status(flags);
158 evpe(vpflags);
159 } else {
160 flags = read_c0_status();
161 __enable_fpu(FPU_AS_IS);
162 __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
163 write_c0_status(flags);
164 }
165#ifdef CONFIG_MIPS_MT_SMTC
166 emt(mtflags);
167 local_irq_restore(irqflags);
168#endif /* CONFIG_MIPS_MT_SMTC */
169 preempt_enable();
170 break; 133 break;
171 }
172 case DSP_BASE ... DSP_BASE + 5: { 134 case DSP_BASE ... DSP_BASE + 5: {
173 dspreg_t *dregs; 135 dspreg_t *dregs;
174 136
@@ -233,7 +195,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
233 regs->regs[addr] = data; 195 regs->regs[addr] = data;
234 break; 196 break;
235 case FPR_BASE ... FPR_BASE + 31: { 197 case FPR_BASE ... FPR_BASE + 31: {
236 fpureg_t *fregs = get_fpu_regs(child); 198 union fpureg *fregs = get_fpu_regs(child);
237 199
238 if (!tsk_used_math(child)) { 200 if (!tsk_used_math(child)) {
239 /* FP not yet used */ 201 /* FP not yet used */
@@ -247,18 +209,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
247 * order bits of the values stored in the even 209 * order bits of the values stored in the even
248 * registers - unless we're using r2k_switch.S. 210 * registers - unless we're using r2k_switch.S.
249 */ 211 */
250 if (addr & 1) { 212 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
251 fregs[(addr & ~1) - FPR_BASE] &= 213 addr & 1, data);
252 0xffffffff;
253 fregs[(addr & ~1) - FPR_BASE] |=
254 ((u64)data) << 32;
255 } else {
256 fregs[addr - FPR_BASE] &= ~0xffffffffLL;
257 fregs[addr - FPR_BASE] |= data;
258 }
259 break; 214 break;
260 } 215 }
261 fregs[addr - FPR_BASE] = data; 216 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
262 break; 217 break;
263 } 218 }
264 case PC: 219 case PC:
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 73b0ddf910d4..71814272d148 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,6 +13,7 @@
13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc. 13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
14 */ 14 */
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/asmmacro.h>
16#include <asm/errno.h> 17#include <asm/errno.h>
17#include <asm/fpregdef.h> 18#include <asm/fpregdef.h>
18#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
@@ -30,7 +31,7 @@
30 .endm 31 .endm
31 32
32 .set noreorder 33 .set noreorder
33 .set mips3 34 .set arch=r4000
34 35
35LEAF(_save_fp_context) 36LEAF(_save_fp_context)
36 cfc1 t1, fcr31 37 cfc1 t1, fcr31
@@ -245,6 +246,218 @@ LEAF(_restore_fp_context32)
245 END(_restore_fp_context32) 246 END(_restore_fp_context32)
246#endif 247#endif
247 248
249#ifdef CONFIG_CPU_HAS_MSA
250
251 .macro save_sc_msareg wr, off, sc, tmp
252#ifdef CONFIG_64BIT
253 copy_u_d \tmp, \wr, 1
254 EX sd \tmp, (\off+(\wr*8))(\sc)
255#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
256 copy_u_w \tmp, \wr, 2
257 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
258 copy_u_w \tmp, \wr, 3
259 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
260#else /* CONFIG_CPU_BIG_ENDIAN */
261 copy_u_w \tmp, \wr, 2
262 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
263 copy_u_w \tmp, \wr, 3
264 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
265#endif
266 .endm
267
268/*
269 * int _save_msa_context(struct sigcontext *sc)
270 *
271 * Save the upper 64 bits of each vector register along with the MSA_CSR
272 * register into sc. Returns zero on success, else non-zero.
273 */
274LEAF(_save_msa_context)
275 save_sc_msareg 0, SC_MSAREGS, a0, t0
276 save_sc_msareg 1, SC_MSAREGS, a0, t0
277 save_sc_msareg 2, SC_MSAREGS, a0, t0
278 save_sc_msareg 3, SC_MSAREGS, a0, t0
279 save_sc_msareg 4, SC_MSAREGS, a0, t0
280 save_sc_msareg 5, SC_MSAREGS, a0, t0
281 save_sc_msareg 6, SC_MSAREGS, a0, t0
282 save_sc_msareg 7, SC_MSAREGS, a0, t0
283 save_sc_msareg 8, SC_MSAREGS, a0, t0
284 save_sc_msareg 9, SC_MSAREGS, a0, t0
285 save_sc_msareg 10, SC_MSAREGS, a0, t0
286 save_sc_msareg 11, SC_MSAREGS, a0, t0
287 save_sc_msareg 12, SC_MSAREGS, a0, t0
288 save_sc_msareg 13, SC_MSAREGS, a0, t0
289 save_sc_msareg 14, SC_MSAREGS, a0, t0
290 save_sc_msareg 15, SC_MSAREGS, a0, t0
291 save_sc_msareg 16, SC_MSAREGS, a0, t0
292 save_sc_msareg 17, SC_MSAREGS, a0, t0
293 save_sc_msareg 18, SC_MSAREGS, a0, t0
294 save_sc_msareg 19, SC_MSAREGS, a0, t0
295 save_sc_msareg 20, SC_MSAREGS, a0, t0
296 save_sc_msareg 21, SC_MSAREGS, a0, t0
297 save_sc_msareg 22, SC_MSAREGS, a0, t0
298 save_sc_msareg 23, SC_MSAREGS, a0, t0
299 save_sc_msareg 24, SC_MSAREGS, a0, t0
300 save_sc_msareg 25, SC_MSAREGS, a0, t0
301 save_sc_msareg 26, SC_MSAREGS, a0, t0
302 save_sc_msareg 27, SC_MSAREGS, a0, t0
303 save_sc_msareg 28, SC_MSAREGS, a0, t0
304 save_sc_msareg 29, SC_MSAREGS, a0, t0
305 save_sc_msareg 30, SC_MSAREGS, a0, t0
306 save_sc_msareg 31, SC_MSAREGS, a0, t0
307 jr ra
308 li v0, 0
309 END(_save_msa_context)
310
311#ifdef CONFIG_MIPS32_COMPAT
312
313/*
314 * int _save_msa_context32(struct sigcontext32 *sc)
315 *
316 * Save the upper 64 bits of each vector register along with the MSA_CSR
317 * register into sc. Returns zero on success, else non-zero.
318 */
319LEAF(_save_msa_context32)
320 save_sc_msareg 0, SC32_MSAREGS, a0, t0
321 save_sc_msareg 1, SC32_MSAREGS, a0, t0
322 save_sc_msareg 2, SC32_MSAREGS, a0, t0
323 save_sc_msareg 3, SC32_MSAREGS, a0, t0
324 save_sc_msareg 4, SC32_MSAREGS, a0, t0
325 save_sc_msareg 5, SC32_MSAREGS, a0, t0
326 save_sc_msareg 6, SC32_MSAREGS, a0, t0
327 save_sc_msareg 7, SC32_MSAREGS, a0, t0
328 save_sc_msareg 8, SC32_MSAREGS, a0, t0
329 save_sc_msareg 9, SC32_MSAREGS, a0, t0
330 save_sc_msareg 10, SC32_MSAREGS, a0, t0
331 save_sc_msareg 11, SC32_MSAREGS, a0, t0
332 save_sc_msareg 12, SC32_MSAREGS, a0, t0
333 save_sc_msareg 13, SC32_MSAREGS, a0, t0
334 save_sc_msareg 14, SC32_MSAREGS, a0, t0
335 save_sc_msareg 15, SC32_MSAREGS, a0, t0
336 save_sc_msareg 16, SC32_MSAREGS, a0, t0
337 save_sc_msareg 17, SC32_MSAREGS, a0, t0
338 save_sc_msareg 18, SC32_MSAREGS, a0, t0
339 save_sc_msareg 19, SC32_MSAREGS, a0, t0
340 save_sc_msareg 20, SC32_MSAREGS, a0, t0
341 save_sc_msareg 21, SC32_MSAREGS, a0, t0
342 save_sc_msareg 22, SC32_MSAREGS, a0, t0
343 save_sc_msareg 23, SC32_MSAREGS, a0, t0
344 save_sc_msareg 24, SC32_MSAREGS, a0, t0
345 save_sc_msareg 25, SC32_MSAREGS, a0, t0
346 save_sc_msareg 26, SC32_MSAREGS, a0, t0
347 save_sc_msareg 27, SC32_MSAREGS, a0, t0
348 save_sc_msareg 28, SC32_MSAREGS, a0, t0
349 save_sc_msareg 29, SC32_MSAREGS, a0, t0
350 save_sc_msareg 30, SC32_MSAREGS, a0, t0
351 save_sc_msareg 31, SC32_MSAREGS, a0, t0
352 jr ra
353 li v0, 0
354 END(_save_msa_context32)
355
356#endif /* CONFIG_MIPS32_COMPAT */
357
358 .macro restore_sc_msareg wr, off, sc, tmp
359#ifdef CONFIG_64BIT
360 EX ld \tmp, (\off+(\wr*8))(\sc)
361 insert_d \wr, 1, \tmp
362#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
363 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
364 insert_w \wr, 2, \tmp
365 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
366 insert_w \wr, 3, \tmp
367#else /* CONFIG_CPU_BIG_ENDIAN */
368 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
369 insert_w \wr, 2, \tmp
370 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
371 insert_w \wr, 3, \tmp
372#endif
373 .endm
374
375/*
376 * int _restore_msa_context(struct sigcontext *sc)
377 */
378LEAF(_restore_msa_context)
379 restore_sc_msareg 0, SC_MSAREGS, a0, t0
380 restore_sc_msareg 1, SC_MSAREGS, a0, t0
381 restore_sc_msareg 2, SC_MSAREGS, a0, t0
382 restore_sc_msareg 3, SC_MSAREGS, a0, t0
383 restore_sc_msareg 4, SC_MSAREGS, a0, t0
384 restore_sc_msareg 5, SC_MSAREGS, a0, t0
385 restore_sc_msareg 6, SC_MSAREGS, a0, t0
386 restore_sc_msareg 7, SC_MSAREGS, a0, t0
387 restore_sc_msareg 8, SC_MSAREGS, a0, t0
388 restore_sc_msareg 9, SC_MSAREGS, a0, t0
389 restore_sc_msareg 10, SC_MSAREGS, a0, t0
390 restore_sc_msareg 11, SC_MSAREGS, a0, t0
391 restore_sc_msareg 12, SC_MSAREGS, a0, t0
392 restore_sc_msareg 13, SC_MSAREGS, a0, t0
393 restore_sc_msareg 14, SC_MSAREGS, a0, t0
394 restore_sc_msareg 15, SC_MSAREGS, a0, t0
395 restore_sc_msareg 16, SC_MSAREGS, a0, t0
396 restore_sc_msareg 17, SC_MSAREGS, a0, t0
397 restore_sc_msareg 18, SC_MSAREGS, a0, t0
398 restore_sc_msareg 19, SC_MSAREGS, a0, t0
399 restore_sc_msareg 20, SC_MSAREGS, a0, t0
400 restore_sc_msareg 21, SC_MSAREGS, a0, t0
401 restore_sc_msareg 22, SC_MSAREGS, a0, t0
402 restore_sc_msareg 23, SC_MSAREGS, a0, t0
403 restore_sc_msareg 24, SC_MSAREGS, a0, t0
404 restore_sc_msareg 25, SC_MSAREGS, a0, t0
405 restore_sc_msareg 26, SC_MSAREGS, a0, t0
406 restore_sc_msareg 27, SC_MSAREGS, a0, t0
407 restore_sc_msareg 28, SC_MSAREGS, a0, t0
408 restore_sc_msareg 29, SC_MSAREGS, a0, t0
409 restore_sc_msareg 30, SC_MSAREGS, a0, t0
410 restore_sc_msareg 31, SC_MSAREGS, a0, t0
411 jr ra
412 li v0, 0
413 END(_restore_msa_context)
414
415#ifdef CONFIG_MIPS32_COMPAT
416
417/*
418 * int _restore_msa_context32(struct sigcontext32 *sc)
419 */
420LEAF(_restore_msa_context32)
421 restore_sc_msareg 0, SC32_MSAREGS, a0, t0
422 restore_sc_msareg 1, SC32_MSAREGS, a0, t0
423 restore_sc_msareg 2, SC32_MSAREGS, a0, t0
424 restore_sc_msareg 3, SC32_MSAREGS, a0, t0
425 restore_sc_msareg 4, SC32_MSAREGS, a0, t0
426 restore_sc_msareg 5, SC32_MSAREGS, a0, t0
427 restore_sc_msareg 6, SC32_MSAREGS, a0, t0
428 restore_sc_msareg 7, SC32_MSAREGS, a0, t0
429 restore_sc_msareg 8, SC32_MSAREGS, a0, t0
430 restore_sc_msareg 9, SC32_MSAREGS, a0, t0
431 restore_sc_msareg 10, SC32_MSAREGS, a0, t0
432 restore_sc_msareg 11, SC32_MSAREGS, a0, t0
433 restore_sc_msareg 12, SC32_MSAREGS, a0, t0
434 restore_sc_msareg 13, SC32_MSAREGS, a0, t0
435 restore_sc_msareg 14, SC32_MSAREGS, a0, t0
436 restore_sc_msareg 15, SC32_MSAREGS, a0, t0
437 restore_sc_msareg 16, SC32_MSAREGS, a0, t0
438 restore_sc_msareg 17, SC32_MSAREGS, a0, t0
439 restore_sc_msareg 18, SC32_MSAREGS, a0, t0
440 restore_sc_msareg 19, SC32_MSAREGS, a0, t0
441 restore_sc_msareg 20, SC32_MSAREGS, a0, t0
442 restore_sc_msareg 21, SC32_MSAREGS, a0, t0
443 restore_sc_msareg 22, SC32_MSAREGS, a0, t0
444 restore_sc_msareg 23, SC32_MSAREGS, a0, t0
445 restore_sc_msareg 24, SC32_MSAREGS, a0, t0
446 restore_sc_msareg 25, SC32_MSAREGS, a0, t0
447 restore_sc_msareg 26, SC32_MSAREGS, a0, t0
448 restore_sc_msareg 27, SC32_MSAREGS, a0, t0
449 restore_sc_msareg 28, SC32_MSAREGS, a0, t0
450 restore_sc_msareg 29, SC32_MSAREGS, a0, t0
451 restore_sc_msareg 30, SC32_MSAREGS, a0, t0
452 restore_sc_msareg 31, SC32_MSAREGS, a0, t0
453 jr ra
454 li v0, 0
455 END(_restore_msa_context32)
456
457#endif /* CONFIG_MIPS32_COMPAT */
458
459#endif /* CONFIG_CPU_HAS_MSA */
460
248 .set reorder 461 .set reorder
249 462
250 .type fault@function 463 .type fault@function
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index cc78dd9a17c7..abacac7c33ef 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -29,18 +29,8 @@
29#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS) 29#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
30 30
31/* 31/*
32 * FPU context is saved iff the process has used it's FPU in the current
33 * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user
34 * space STATUS register should be 0, so that a process *always* starts its
35 * userland with FPU disabled after each context switch.
36 *
37 * FPU will be enabled as soon as the process accesses FPU again, through
38 * do_cpu() trap.
39 */
40
41/*
42 * task_struct *resume(task_struct *prev, task_struct *next, 32 * task_struct *resume(task_struct *prev, task_struct *next,
43 * struct thread_info *next_ti, int usedfpu) 33 * struct thread_info *next_ti, s32 fp_save)
44 */ 34 */
45 .align 5 35 .align 5
46 LEAF(resume) 36 LEAF(resume)
@@ -50,23 +40,37 @@
50 LONG_S ra, THREAD_REG31(a0) 40 LONG_S ra, THREAD_REG31(a0)
51 41
52 /* 42 /*
53 * check if we need to save FPU registers 43 * Check whether we need to save any FP context. FP context is saved
44 * iff the process has used the context with the scalar FPU or the MSA
45 * ASE in the current time slice, as indicated by _TIF_USEDFPU and
46 * _TIF_USEDMSA respectively. switch_to will have set fp_save
47 * accordingly to an FP_SAVE_ enum value.
54 */ 48 */
49 beqz a3, 2f
55 50
56 beqz a3, 1f
57
58 PTR_L t3, TASK_THREAD_INFO(a0)
59 /* 51 /*
60 * clear saved user stack CU1 bit 52 * We do. Clear the saved CU1 bit for prev, such that next time it is
53 * scheduled it will start in userland with the FPU disabled. If the
54 * task uses the FPU then it will be enabled again via the do_cpu trap.
55 * This allows us to lazily restore the FP context.
61 */ 56 */
57 PTR_L t3, TASK_THREAD_INFO(a0)
62 LONG_L t0, ST_OFF(t3) 58 LONG_L t0, ST_OFF(t3)
63 li t1, ~ST0_CU1 59 li t1, ~ST0_CU1
64 and t0, t0, t1 60 and t0, t0, t1
65 LONG_S t0, ST_OFF(t3) 61 LONG_S t0, ST_OFF(t3)
66 62
63 /* Check whether we're saving scalar or vector context. */
64 bgtz a3, 1f
65
66 /* Save 128b MSA vector context. */
67 msa_save_all a0
68 b 2f
69
701: /* Save 32b/64b scalar FP context. */
67 fpu_save_double a0 t0 t1 # c0_status passed in t0 71 fpu_save_double a0 t0 t1 # c0_status passed in t0
68 # clobbers t1 72 # clobbers t1
691: 732:
70 74
71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
72 PTR_LA t8, __stack_chk_guard 76 PTR_LA t8, __stack_chk_guard
@@ -141,6 +145,26 @@ LEAF(_restore_fp)
141 jr ra 145 jr ra
142 END(_restore_fp) 146 END(_restore_fp)
143 147
148#ifdef CONFIG_CPU_HAS_MSA
149
150/*
151 * Save a thread's MSA vector context.
152 */
153LEAF(_save_msa)
154 msa_save_all a0
155 jr ra
156 END(_save_msa)
157
158/*
159 * Restore a thread's MSA vector context.
160 */
161LEAF(_restore_msa)
162 msa_restore_all a0
163 jr ra
164 END(_restore_msa)
165
166#endif
167
144/* 168/*
145 * Load the FPU with signalling NANS. This bit pattern we're using has 169 * Load the FPU with signalling NANS. This bit pattern we're using has
146 * the property that no matter whether considered as single or as double 170 * the property that no matter whether considered as single or as double
@@ -270,7 +294,7 @@ LEAF(_init_fpu)
2701: .set pop 2941: .set pop
271#endif /* CONFIG_CPU_MIPS32_R2 */ 295#endif /* CONFIG_CPU_MIPS32_R2 */
272#else 296#else
273 .set mips3 297 .set arch=r4000
274 dmtc1 t1, $f0 298 dmtc1 t1, $f0
275 dmtc1 t1, $f2 299 dmtc1 t1, $f2
276 dmtc1 t1, $f4 300 dmtc1 t1, $f4
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a5b14f48e1af..fdc70b400442 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -6,6 +6,7 @@
6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> 6 * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2001 MIPS Technologies, Inc. 7 * Copyright (C) 2001 MIPS Technologies, Inc.
8 * Copyright (C) 2004 Thiemo Seufer 8 * Copyright (C) 2004 Thiemo Seufer
9 * Copyright (C) 2014 Imagination Technologies Ltd.
9 */ 10 */
10#include <linux/errno.h> 11#include <linux/errno.h>
11#include <asm/asm.h> 12#include <asm/asm.h>
@@ -74,10 +75,10 @@ NESTED(handle_sys, PT_SIZE, sp)
74 .set noreorder 75 .set noreorder
75 .set nomacro 76 .set nomacro
76 77
771: lw t5, 16(t0) # argument #5 from usp 781: user_lw(t5, 16(t0)) # argument #5 from usp
784: lw t6, 20(t0) # argument #6 from usp 794: user_lw(t6, 20(t0)) # argument #6 from usp
793: lw t7, 24(t0) # argument #7 from usp 803: user_lw(t7, 24(t0)) # argument #7 from usp
802: lw t8, 28(t0) # argument #8 from usp 812: user_lw(t8, 28(t0)) # argument #8 from usp
81 82
82 sw t5, 16(sp) # argument #5 to ksp 83 sw t5, 16(sp) # argument #5 to ksp
83 sw t6, 20(sp) # argument #6 to ksp 84 sw t6, 20(sp) # argument #6 to ksp
@@ -118,7 +119,18 @@ syscall_trace_entry:
118 SAVE_STATIC 119 SAVE_STATIC
119 move s0, t2 120 move s0, t2
120 move a0, sp 121 move a0, sp
121 jal syscall_trace_enter 122
123 /*
124 * syscall number is in v0 unless we called syscall(__NR_###)
125 * where the real syscall number is in a0
126 */
127 addiu a1, v0, __NR_O32_Linux
128 bnez v0, 1f /* __NR_syscall at offset 0 */
129 lw a1, PT_R4(sp)
130
1311: jal syscall_trace_enter
132
133 bltz v0, 2f # seccomp failed? Skip syscall
122 134
123 move t0, s0 135 move t0, s0
124 RESTORE_STATIC 136 RESTORE_STATIC
@@ -138,7 +150,7 @@ syscall_trace_entry:
138 sw t1, PT_R0(sp) # save it for syscall restarting 150 sw t1, PT_R0(sp) # save it for syscall restarting
1391: sw v0, PT_R2(sp) # result 1511: sw v0, PT_R2(sp) # result
140 152
141 j syscall_exit 1532: j syscall_exit
142 154
143/* ------------------------------------------------------------------------ */ 155/* ------------------------------------------------------------------------ */
144 156
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index b56e254beb15..dd99c3285aea 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,8 +80,11 @@ syscall_trace_entry:
80 SAVE_STATIC 80 SAVE_STATIC
81 move s0, t2 81 move s0, t2
82 move a0, sp 82 move a0, sp
83 daddiu a1, v0, __NR_64_Linux
83 jal syscall_trace_enter 84 jal syscall_trace_enter
84 85
86 bltz v0, 2f # seccomp failed? Skip syscall
87
85 move t0, s0 88 move t0, s0
86 RESTORE_STATIC 89 RESTORE_STATIC
87 ld a0, PT_R4(sp) # Restore argument registers 90 ld a0, PT_R4(sp) # Restore argument registers
@@ -102,7 +105,7 @@ syscall_trace_entry:
102 sd t1, PT_R0(sp) # save it for syscall restarting 105 sd t1, PT_R0(sp) # save it for syscall restarting
1031: sd v0, PT_R2(sp) # result 1061: sd v0, PT_R2(sp) # result
104 107
105 j syscall_exit 1082: j syscall_exit
106 109
107illegal_syscall: 110illegal_syscall:
108 /* This also isn't a 64-bit syscall, throw an error. */ 111 /* This also isn't a 64-bit syscall, throw an error. */
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f7e5b72cf481..f68d2f4f0090 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,8 +72,11 @@ n32_syscall_trace_entry:
72 SAVE_STATIC 72 SAVE_STATIC
73 move s0, t2 73 move s0, t2
74 move a0, sp 74 move a0, sp
75 daddiu a1, v0, __NR_N32_Linux
75 jal syscall_trace_enter 76 jal syscall_trace_enter
76 77
78 bltz v0, 2f # seccomp failed? Skip syscall
79
77 move t0, s0 80 move t0, s0
78 RESTORE_STATIC 81 RESTORE_STATIC
79 ld a0, PT_R4(sp) # Restore argument registers 82 ld a0, PT_R4(sp) # Restore argument registers
@@ -94,7 +97,7 @@ n32_syscall_trace_entry:
94 sd t1, PT_R0(sp) # save it for syscall restarting 97 sd t1, PT_R0(sp) # save it for syscall restarting
951: sd v0, PT_R2(sp) # result 981: sd v0, PT_R2(sp) # result
96 99
97 j syscall_exit 1002: j syscall_exit
98 101
99not_n32_scall: 102not_n32_scall:
100 /* This is not an n32 compatibility syscall, pass it on to 103 /* This is not an n32 compatibility syscall, pass it on to
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 6788727d91af..70f6acecd928 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -112,7 +112,20 @@ trace_a_syscall:
112 112
113 move s0, t2 # Save syscall pointer 113 move s0, t2 # Save syscall pointer
114 move a0, sp 114 move a0, sp
115 jal syscall_trace_enter 115 /*
116 * syscall number is in v0 unless we called syscall(__NR_###)
117 * where the real syscall number is in a0
118 * note: NR_syscall is the first O32 syscall but the macro is
119 * only defined when compiling with -mabi=32 (CONFIG_32BIT)
120 * therefore __NR_O32_Linux is used (4000)
121 */
122 addiu a1, v0, __NR_O32_Linux
123 bnez v0, 1f /* __NR_syscall at offset 0 */
124 lw a1, PT_R4(sp)
125
1261: jal syscall_trace_enter
127
128 bltz v0, 2f # seccomp failed? Skip syscall
116 129
117 move t0, s0 130 move t0, s0
118 RESTORE_STATIC 131 RESTORE_STATIC
@@ -136,7 +149,7 @@ trace_a_syscall:
136 sd t1, PT_R0(sp) # save it for syscall restarting 149 sd t1, PT_R0(sp) # save it for syscall restarting
1371: sd v0, PT_R2(sp) # result 1501: sd v0, PT_R2(sp) # result
138 151
139 j syscall_exit 1522: j syscall_exit
140 153
141/* ------------------------------------------------------------------------ */ 154/* ------------------------------------------------------------------------ */
142 155
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5199563c4403..33133d3df3e5 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Copyright (C) 1994 - 2000 Ralf Baechle 7 * Copyright (C) 1994 - 2000 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2014, Imagination Technologies Ltd.
9 */ 10 */
10#include <linux/cache.h> 11#include <linux/cache.h>
11#include <linux/context_tracking.h> 12#include <linux/context_tracking.h>
@@ -30,6 +31,7 @@
30#include <linux/bitops.h> 31#include <linux/bitops.h>
31#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
32#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/msa.h>
33#include <asm/sim.h> 35#include <asm/sim.h>
34#include <asm/ucontext.h> 36#include <asm/ucontext.h>
35#include <asm/cpu-features.h> 37#include <asm/cpu-features.h>
@@ -46,8 +48,8 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
46extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 48extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
47extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 49extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
48 50
49extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc); 51extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
50extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc); 52extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
51 53
52struct sigframe { 54struct sigframe {
53 u32 sf_ass[4]; /* argument save space for o32 */ 55 u32 sf_ass[4]; /* argument save space for o32 */
@@ -64,17 +66,95 @@ struct rt_sigframe {
64}; 66};
65 67
66/* 68/*
69 * Thread saved context copy to/from a signal context presumed to be on the
70 * user stack, and therefore accessed with appropriate macros from uaccess.h.
71 */
72static int copy_fp_to_sigcontext(struct sigcontext __user *sc)
73{
74 int i;
75 int err = 0;
76
77 for (i = 0; i < NUM_FPU_REGS; i++) {
78 err |=
79 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
80 &sc->sc_fpregs[i]);
81 }
82 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
83
84 return err;
85}
86
87static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
88{
89 int i;
90 int err = 0;
91 u64 fpr_val;
92
93 for (i = 0; i < NUM_FPU_REGS; i++) {
94 err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
95 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
96 }
97 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
98
99 return err;
100}
101
102/*
103 * These functions will save only the upper 64 bits of the vector registers,
104 * since the lower 64 bits have already been saved as the scalar FP context.
105 */
106static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
107{
108 int i;
109 int err = 0;
110
111 for (i = 0; i < NUM_FPU_REGS; i++) {
112 err |=
113 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
114 &sc->sc_msaregs[i]);
115 }
116 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
117
118 return err;
119}
120
121static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
122{
123 int i;
124 int err = 0;
125 u64 val;
126
127 for (i = 0; i < NUM_FPU_REGS; i++) {
128 err |= __get_user(val, &sc->sc_msaregs[i]);
129 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
130 }
131 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136/*
67 * Helper routines 137 * Helper routines
68 */ 138 */
69static int protected_save_fp_context(struct sigcontext __user *sc) 139static int protected_save_fp_context(struct sigcontext __user *sc,
140 unsigned used_math)
70{ 141{
71 int err; 142 int err;
143 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
144#ifndef CONFIG_EVA
72 while (1) { 145 while (1) {
73 lock_fpu_owner(); 146 lock_fpu_owner();
74 err = own_fpu_inatomic(1); 147 if (is_fpu_owner()) {
75 if (!err) 148 err = save_fp_context(sc);
76 err = save_fp_context(sc); /* this might fail */ 149 if (save_msa && !err)
77 unlock_fpu_owner(); 150 err = _save_msa_context(sc);
151 unlock_fpu_owner();
152 } else {
153 unlock_fpu_owner();
154 err = copy_fp_to_sigcontext(sc);
155 if (save_msa && !err)
156 err = copy_msa_to_sigcontext(sc);
157 }
78 if (likely(!err)) 158 if (likely(!err))
79 break; 159 break;
80 /* touch the sigcontext and try again */ 160 /* touch the sigcontext and try again */
@@ -84,18 +164,44 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
84 if (err) 164 if (err)
85 break; /* really bad sigcontext */ 165 break; /* really bad sigcontext */
86 } 166 }
167#else
168 /*
169 * EVA does not have FPU EVA instructions so saving fpu context directly
170 * does not work.
171 */
172 disable_msa();
173 lose_fpu(1);
174 err = save_fp_context(sc); /* this might fail */
175 if (save_msa && !err)
176 err = copy_msa_to_sigcontext(sc);
177#endif
87 return err; 178 return err;
88} 179}
89 180
90static int protected_restore_fp_context(struct sigcontext __user *sc) 181static int protected_restore_fp_context(struct sigcontext __user *sc,
182 unsigned used_math)
91{ 183{
92 int err, tmp __maybe_unused; 184 int err, tmp __maybe_unused;
185 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
186#ifndef CONFIG_EVA
93 while (1) { 187 while (1) {
94 lock_fpu_owner(); 188 lock_fpu_owner();
95 err = own_fpu_inatomic(0); 189 if (is_fpu_owner()) {
96 if (!err) 190 err = restore_fp_context(sc);
97 err = restore_fp_context(sc); /* this might fail */ 191 if (restore_msa && !err) {
98 unlock_fpu_owner(); 192 enable_msa();
193 err = _restore_msa_context(sc);
194 } else {
195 /* signal handler may have used MSA */
196 disable_msa();
197 }
198 unlock_fpu_owner();
199 } else {
200 unlock_fpu_owner();
201 err = copy_fp_from_sigcontext(sc);
202 if (!err && (used_math & USEDMATH_MSA))
203 err = copy_msa_from_sigcontext(sc);
204 }
99 if (likely(!err)) 205 if (likely(!err))
100 break; 206 break;
101 /* touch the sigcontext and try again */ 207 /* touch the sigcontext and try again */
@@ -105,6 +211,17 @@ static int protected_restore_fp_context(struct sigcontext __user *sc)
105 if (err) 211 if (err)
106 break; /* really bad sigcontext */ 212 break; /* really bad sigcontext */
107 } 213 }
214#else
215 /*
216 * EVA does not have FPU EVA instructions so restoring fpu context
217 * directly does not work.
218 */
219 enable_msa();
220 lose_fpu(0);
221 err = restore_fp_context(sc); /* this might fail */
222 if (restore_msa && !err)
223 err = copy_msa_from_sigcontext(sc);
224#endif
108 return err; 225 return err;
109} 226}
110 227
@@ -135,7 +252,8 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
135 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 252 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
136 } 253 }
137 254
138 used_math = !!used_math(); 255 used_math = used_math() ? USEDMATH_FP : 0;
256 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
139 err |= __put_user(used_math, &sc->sc_used_math); 257 err |= __put_user(used_math, &sc->sc_used_math);
140 258
141 if (used_math) { 259 if (used_math) {
@@ -143,7 +261,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
143 * Save FPU state to signal context. Signal handler 261 * Save FPU state to signal context. Signal handler
144 * will "inherit" current FPU state. 262 * will "inherit" current FPU state.
145 */ 263 */
146 err |= protected_save_fp_context(sc); 264 err |= protected_save_fp_context(sc, used_math);
147 } 265 }
148 return err; 266 return err;
149} 267}
@@ -168,14 +286,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
168} 286}
169 287
170static int 288static int
171check_and_restore_fp_context(struct sigcontext __user *sc) 289check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
172{ 290{
173 int err, sig; 291 int err, sig;
174 292
175 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 293 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
176 if (err > 0) 294 if (err > 0)
177 err = 0; 295 err = 0;
178 err |= protected_restore_fp_context(sc); 296 err |= protected_restore_fp_context(sc, used_math);
179 return err ?: sig; 297 return err ?: sig;
180} 298}
181 299
@@ -215,9 +333,10 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
215 if (used_math) { 333 if (used_math) {
216 /* restore fpu context if we have used it before */ 334 /* restore fpu context if we have used it before */
217 if (!err) 335 if (!err)
218 err = check_and_restore_fp_context(sc); 336 err = check_and_restore_fp_context(sc, used_math);
219 } else { 337 } else {
220 /* signal handler may have used FPU. Give it up. */ 338 /* signal handler may have used FPU or MSA. Disable them. */
339 disable_msa();
221 lose_fpu(0); 340 lose_fpu(0);
222 } 341 }
223 342
@@ -591,23 +710,26 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
591} 710}
592 711
593#ifdef CONFIG_SMP 712#ifdef CONFIG_SMP
713#ifndef CONFIG_EVA
594static int smp_save_fp_context(struct sigcontext __user *sc) 714static int smp_save_fp_context(struct sigcontext __user *sc)
595{ 715{
596 return raw_cpu_has_fpu 716 return raw_cpu_has_fpu
597 ? _save_fp_context(sc) 717 ? _save_fp_context(sc)
598 : fpu_emulator_save_context(sc); 718 : copy_fp_to_sigcontext(sc);
599} 719}
600 720
601static int smp_restore_fp_context(struct sigcontext __user *sc) 721static int smp_restore_fp_context(struct sigcontext __user *sc)
602{ 722{
603 return raw_cpu_has_fpu 723 return raw_cpu_has_fpu
604 ? _restore_fp_context(sc) 724 ? _restore_fp_context(sc)
605 : fpu_emulator_restore_context(sc); 725 : copy_fp_from_sigcontext(sc);
606} 726}
727#endif /* CONFIG_EVA */
607#endif 728#endif
608 729
609static int signal_setup(void) 730static int signal_setup(void)
610{ 731{
732#ifndef CONFIG_EVA
611#ifdef CONFIG_SMP 733#ifdef CONFIG_SMP
612 /* For now just do the cpu_has_fpu check when the functions are invoked */ 734 /* For now just do the cpu_has_fpu check when the functions are invoked */
613 save_fp_context = smp_save_fp_context; 735 save_fp_context = smp_save_fp_context;
@@ -617,9 +739,13 @@ static int signal_setup(void)
617 save_fp_context = _save_fp_context; 739 save_fp_context = _save_fp_context;
618 restore_fp_context = _restore_fp_context; 740 restore_fp_context = _restore_fp_context;
619 } else { 741 } else {
620 save_fp_context = fpu_emulator_save_context; 742 save_fp_context = copy_fp_from_sigcontext;
621 restore_fp_context = fpu_emulator_restore_context; 743 restore_fp_context = copy_fp_to_sigcontext;
622 } 744 }
745#endif /* CONFIG_SMP */
746#else
747 save_fp_context = copy_fp_from_sigcontext;;
748 restore_fp_context = copy_fp_to_sigcontext;
623#endif 749#endif
624 750
625 return 0; 751 return 0;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 3d60f7750fa8..299f956e4db3 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -30,6 +30,7 @@
30#include <asm/sim.h> 30#include <asm/sim.h>
31#include <asm/ucontext.h> 31#include <asm/ucontext.h>
32#include <asm/fpu.h> 32#include <asm/fpu.h>
33#include <asm/msa.h>
33#include <asm/war.h> 34#include <asm/war.h>
34#include <asm/vdso.h> 35#include <asm/vdso.h>
35#include <asm/dsp.h> 36#include <asm/dsp.h>
@@ -42,8 +43,8 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
42extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 43extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
43extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 44extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
44 45
45extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc); 46extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
46extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc); 47extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
47 48
48/* 49/*
49 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 50 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
@@ -78,17 +79,96 @@ struct rt_sigframe32 {
78}; 79};
79 80
80/* 81/*
82 * Thread saved context copy to/from a signal context presumed to be on the
83 * user stack, and therefore accessed with appropriate macros from uaccess.h.
84 */
85static int copy_fp_to_sigcontext32(struct sigcontext32 __user *sc)
86{
87 int i;
88 int err = 0;
89 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
90
91 for (i = 0; i < NUM_FPU_REGS; i += inc) {
92 err |=
93 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
94 &sc->sc_fpregs[i]);
95 }
96 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
97
98 return err;
99}
100
101static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
102{
103 int i;
104 int err = 0;
105 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
106 u64 fpr_val;
107
108 for (i = 0; i < NUM_FPU_REGS; i += inc) {
109 err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
110 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
111 }
112 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
113
114 return err;
115}
116
117/*
118 * These functions will save only the upper 64 bits of the vector registers,
119 * since the lower 64 bits have already been saved as the scalar FP context.
120 */
121static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
122{
123 int i;
124 int err = 0;
125
126 for (i = 0; i < NUM_FPU_REGS; i++) {
127 err |=
128 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
129 &sc->sc_msaregs[i]);
130 }
131 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
137{
138 int i;
139 int err = 0;
140 u64 val;
141
142 for (i = 0; i < NUM_FPU_REGS; i++) {
143 err |= __get_user(val, &sc->sc_msaregs[i]);
144 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
145 }
146 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
147
148 return err;
149}
150
151/*
81 * sigcontext handlers 152 * sigcontext handlers
82 */ 153 */
83static int protected_save_fp_context32(struct sigcontext32 __user *sc) 154static int protected_save_fp_context32(struct sigcontext32 __user *sc,
155 unsigned used_math)
84{ 156{
85 int err; 157 int err;
158 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
86 while (1) { 159 while (1) {
87 lock_fpu_owner(); 160 lock_fpu_owner();
88 err = own_fpu_inatomic(1); 161 if (is_fpu_owner()) {
89 if (!err) 162 err = save_fp_context32(sc);
90 err = save_fp_context32(sc); /* this might fail */ 163 if (save_msa && !err)
91 unlock_fpu_owner(); 164 err = _save_msa_context32(sc);
165 unlock_fpu_owner();
166 } else {
167 unlock_fpu_owner();
168 err = copy_fp_to_sigcontext32(sc);
169 if (save_msa && !err)
170 err = copy_msa_to_sigcontext32(sc);
171 }
92 if (likely(!err)) 172 if (likely(!err))
93 break; 173 break;
94 /* touch the sigcontext and try again */ 174 /* touch the sigcontext and try again */
@@ -101,15 +181,29 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
101 return err; 181 return err;
102} 182}
103 183
104static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 184static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
185 unsigned used_math)
105{ 186{
106 int err, tmp __maybe_unused; 187 int err, tmp __maybe_unused;
188 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
107 while (1) { 189 while (1) {
108 lock_fpu_owner(); 190 lock_fpu_owner();
109 err = own_fpu_inatomic(0); 191 if (is_fpu_owner()) {
110 if (!err) 192 err = restore_fp_context32(sc);
111 err = restore_fp_context32(sc); /* this might fail */ 193 if (restore_msa && !err) {
112 unlock_fpu_owner(); 194 enable_msa();
195 err = _restore_msa_context32(sc);
196 } else {
197 /* signal handler may have used MSA */
198 disable_msa();
199 }
200 unlock_fpu_owner();
201 } else {
202 unlock_fpu_owner();
203 err = copy_fp_from_sigcontext32(sc);
204 if (restore_msa && !err)
205 err = copy_msa_from_sigcontext32(sc);
206 }
113 if (likely(!err)) 207 if (likely(!err))
114 break; 208 break;
115 /* touch the sigcontext and try again */ 209 /* touch the sigcontext and try again */
@@ -147,7 +241,8 @@ static int setup_sigcontext32(struct pt_regs *regs,
147 err |= __put_user(mflo3(), &sc->sc_lo3); 241 err |= __put_user(mflo3(), &sc->sc_lo3);
148 } 242 }
149 243
150 used_math = !!used_math(); 244 used_math = used_math() ? USEDMATH_FP : 0;
245 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
151 err |= __put_user(used_math, &sc->sc_used_math); 246 err |= __put_user(used_math, &sc->sc_used_math);
152 247
153 if (used_math) { 248 if (used_math) {
@@ -155,20 +250,21 @@ static int setup_sigcontext32(struct pt_regs *regs,
155 * Save FPU state to signal context. Signal handler 250 * Save FPU state to signal context. Signal handler
156 * will "inherit" current FPU state. 251 * will "inherit" current FPU state.
157 */ 252 */
158 err |= protected_save_fp_context32(sc); 253 err |= protected_save_fp_context32(sc, used_math);
159 } 254 }
160 return err; 255 return err;
161} 256}
162 257
163static int 258static int
164check_and_restore_fp_context32(struct sigcontext32 __user *sc) 259check_and_restore_fp_context32(struct sigcontext32 __user *sc,
260 unsigned used_math)
165{ 261{
166 int err, sig; 262 int err, sig;
167 263
168 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 264 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
169 if (err > 0) 265 if (err > 0)
170 err = 0; 266 err = 0;
171 err |= protected_restore_fp_context32(sc); 267 err |= protected_restore_fp_context32(sc, used_math);
172 return err ?: sig; 268 return err ?: sig;
173} 269}
174 270
@@ -205,9 +301,10 @@ static int restore_sigcontext32(struct pt_regs *regs,
205 if (used_math) { 301 if (used_math) {
206 /* restore fpu context if we have used it before */ 302 /* restore fpu context if we have used it before */
207 if (!err) 303 if (!err)
208 err = check_and_restore_fp_context32(sc); 304 err = check_and_restore_fp_context32(sc, used_math);
209 } else { 305 } else {
210 /* signal handler may have used FPU. Give it up. */ 306 /* signal handler may have used FPU or MSA. Disable them. */
307 disable_msa();
211 lose_fpu(0); 308 lose_fpu(0);
212 } 309 }
213 310
@@ -566,8 +663,8 @@ static int signal32_init(void)
566 save_fp_context32 = _save_fp_context32; 663 save_fp_context32 = _save_fp_context32;
567 restore_fp_context32 = _restore_fp_context32; 664 restore_fp_context32 = _restore_fp_context32;
568 } else { 665 } else {
569 save_fp_context32 = fpu_emulator_save_context32; 666 save_fp_context32 = copy_fp_to_sigcontext32;
570 restore_fp_context32 = fpu_emulator_restore_context32; 667 restore_fp_context32 = copy_fp_from_sigcontext32;
571 } 668 }
572 669
573 return 0; 670 return 0;
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 1b925d8a610c..3ef55fb7ac03 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -39,57 +39,9 @@
39#include <asm/amon.h> 39#include <asm/amon.h>
40#include <asm/gic.h> 40#include <asm/gic.h>
41 41
42static void ipi_call_function(unsigned int cpu)
43{
44 pr_debug("CPU%d: %s cpu %d status %08x\n",
45 smp_processor_id(), __func__, cpu, read_c0_status());
46
47 gic_send_ipi(plat_ipi_call_int_xlate(cpu));
48}
49
50
51static void ipi_resched(unsigned int cpu)
52{
53 pr_debug("CPU%d: %s cpu %d status %08x\n",
54 smp_processor_id(), __func__, cpu, read_c0_status());
55
56 gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
57}
58
59/*
60 * FIXME: This isn't restricted to CMP
61 * The SMVP kernel could use GIC interrupts if available
62 */
63void cmp_send_ipi_single(int cpu, unsigned int action)
64{
65 unsigned long flags;
66
67 local_irq_save(flags);
68
69 switch (action) {
70 case SMP_CALL_FUNCTION:
71 ipi_call_function(cpu);
72 break;
73
74 case SMP_RESCHEDULE_YOURSELF:
75 ipi_resched(cpu);
76 break;
77 }
78
79 local_irq_restore(flags);
80}
81
82static void cmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
83{
84 unsigned int i;
85
86 for_each_cpu(i, mask)
87 cmp_send_ipi_single(i, action);
88}
89
90static void cmp_init_secondary(void) 42static void cmp_init_secondary(void)
91{ 43{
92 struct cpuinfo_mips *c = &current_cpu_data; 44 struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
93 45
94 /* Assume GIC is present */ 46 /* Assume GIC is present */
95 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | 47 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
@@ -97,7 +49,6 @@ static void cmp_init_secondary(void)
97 49
98 /* Enable per-cpu interrupts: platform specific */ 50 /* Enable per-cpu interrupts: platform specific */
99 51
100 c->core = (read_c0_ebase() >> 1) & 0x1ff;
101#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) 52#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
102 if (cpu_has_mipsmt) 53 if (cpu_has_mipsmt)
103 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & 54 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
@@ -210,8 +161,8 @@ void __init cmp_prepare_cpus(unsigned int max_cpus)
210} 161}
211 162
212struct plat_smp_ops cmp_smp_ops = { 163struct plat_smp_ops cmp_smp_ops = {
213 .send_ipi_single = cmp_send_ipi_single, 164 .send_ipi_single = gic_send_ipi_single,
214 .send_ipi_mask = cmp_send_ipi_mask, 165 .send_ipi_mask = gic_send_ipi_mask,
215 .init_secondary = cmp_init_secondary, 166 .init_secondary = cmp_init_secondary,
216 .smp_finish = cmp_smp_finish, 167 .smp_finish = cmp_smp_finish,
217 .cpus_done = cmp_cpus_done, 168 .cpus_done = cmp_cpus_done,
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
new file mode 100644
index 000000000000..536eec0d21b6
--- /dev/null
+++ b/arch/mips/kernel/smp-cps.c
@@ -0,0 +1,335 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/io.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/smp.h>
15#include <linux/types.h>
16
17#include <asm/cacheflush.h>
18#include <asm/gic.h>
19#include <asm/mips-cm.h>
20#include <asm/mips-cpc.h>
21#include <asm/mips_mt.h>
22#include <asm/mipsregs.h>
23#include <asm/smp-cps.h>
24#include <asm/time.h>
25#include <asm/uasm.h>
26
27static DECLARE_BITMAP(core_power, NR_CPUS);
28
29struct boot_config mips_cps_bootcfg;
30
31static void init_core(void)
32{
33 unsigned int nvpes, t;
34 u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status;
35
36 if (!cpu_has_mipsmt)
37 return;
38
39 /* Enter VPE configuration state */
40 dvpe();
41 set_c0_mvpcontrol(MVPCONTROL_VPC);
42
43 /* Retrieve the count of VPEs in this core */
44 mvpconf0 = read_c0_mvpconf0();
45 nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
46 smp_num_siblings = nvpes;
47
48 for (t = 1; t < nvpes; t++) {
49 /* Use a 1:1 mapping of TC index to VPE index */
50 settc(t);
51
52 /* Bind 1 TC to this VPE */
53 tcbind = read_tc_c0_tcbind();
54 tcbind &= ~TCBIND_CURVPE;
55 tcbind |= t << TCBIND_CURVPE_SHIFT;
56 write_tc_c0_tcbind(tcbind);
57
58 /* Set exclusive TC, non-active, master */
59 vpeconf0 = read_vpe_c0_vpeconf0();
60 vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA);
61 vpeconf0 |= t << VPECONF0_XTC_SHIFT;
62 vpeconf0 |= VPECONF0_MVP;
63 write_vpe_c0_vpeconf0(vpeconf0);
64
65 /* Declare TC non-active, non-allocatable & interrupt exempt */
66 tcstatus = read_tc_c0_tcstatus();
67 tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA);
68 tcstatus |= TCSTATUS_IXMT;
69 write_tc_c0_tcstatus(tcstatus);
70
71 /* Halt the TC */
72 write_tc_c0_tchalt(TCHALT_H);
73
74 /* Allow only 1 TC to execute */
75 vpecontrol = read_vpe_c0_vpecontrol();
76 vpecontrol &= ~VPECONTROL_TE;
77 write_vpe_c0_vpecontrol(vpecontrol);
78
79 /* Copy (most of) Status from VPE 0 */
80 status = read_c0_status();
81 status &= ~(ST0_IM | ST0_IE | ST0_KSU);
82 status |= ST0_CU0;
83 write_vpe_c0_status(status);
84
85 /* Copy Config from VPE 0 */
86 write_vpe_c0_config(read_c0_config());
87 write_vpe_c0_config7(read_c0_config7());
88
89 /* Ensure no software interrupts are pending */
90 write_vpe_c0_cause(0);
91
92 /* Sync Count */
93 write_vpe_c0_count(read_c0_count());
94 }
95
96 /* Leave VPE configuration state */
97 clear_c0_mvpcontrol(MVPCONTROL_VPC);
98}
99
100static void __init cps_smp_setup(void)
101{
102 unsigned int ncores, nvpes, core_vpes;
103 int c, v;
104 u32 core_cfg, *entry_code;
105
106 /* Detect & record VPE topology */
107 ncores = mips_cm_numcores();
108 pr_info("VPE topology ");
109 for (c = nvpes = 0; c < ncores; c++) {
110 if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) {
111 write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF);
112 core_cfg = read_gcr_co_config();
113 core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >>
114 CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
115 } else {
116 core_vpes = 1;
117 }
118
119 pr_cont("%c%u", c ? ',' : '{', core_vpes);
120
121 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
122 cpu_data[nvpes + v].core = c;
123#ifdef CONFIG_MIPS_MT_SMP
124 cpu_data[nvpes + v].vpe_id = v;
125#endif
126 }
127
128 nvpes += core_vpes;
129 }
130 pr_cont("} total %u\n", nvpes);
131
132 /* Indicate present CPUs (CPU being synonymous with VPE) */
133 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
134 set_cpu_possible(v, true);
135 set_cpu_present(v, true);
136 __cpu_number_map[v] = v;
137 __cpu_logical_map[v] = v;
138 }
139
140 /* Core 0 is powered up (we're running on it) */
141 bitmap_set(core_power, 0, 1);
142
143 /* Disable MT - we only want to run 1 TC per VPE */
144 if (cpu_has_mipsmt)
145 dmt();
146
147 /* Initialise core 0 */
148 init_core();
149
150 /* Patch the start of mips_cps_core_entry to provide the CM base */
151 entry_code = (u32 *)&mips_cps_core_entry;
152 UASM_i_LA(&entry_code, 3, (long)mips_cm_base);
153
154 /* Make core 0 coherent with everything */
155 write_gcr_cl_coherence(0xff);
156}
157
158static void __init cps_prepare_cpus(unsigned int max_cpus)
159{
160 mips_mt_set_cpuoptions();
161}
162
163static void boot_core(struct boot_config *cfg)
164{
165 u32 access;
166
167 /* Select the appropriate core */
168 write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF);
169
170 /* Set its reset vector */
171 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
172
173 /* Ensure its coherency is disabled */
174 write_gcr_co_coherence(0);
175
176 /* Ensure the core can access the GCRs */
177 access = read_gcr_access();
178 access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core);
179 write_gcr_access(access);
180
181 /* Copy cfg */
182 mips_cps_bootcfg = *cfg;
183
184 if (mips_cpc_present()) {
185 /* Select the appropriate core */
186 write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF);
187
188 /* Reset the core */
189 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
190 } else {
191 /* Take the core out of reset */
192 write_gcr_co_reset_release(0);
193 }
194
195 /* The core is now powered up */
196 bitmap_set(core_power, cfg->core, 1);
197}
198
199static void boot_vpe(void *info)
200{
201 struct boot_config *cfg = info;
202 u32 tcstatus, vpeconf0;
203
204 /* Enter VPE configuration state */
205 dvpe();
206 set_c0_mvpcontrol(MVPCONTROL_VPC);
207
208 settc(cfg->vpe);
209
210 /* Set the TC restart PC */
211 write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
212
213 /* Activate the TC, allow interrupts */
214 tcstatus = read_tc_c0_tcstatus();
215 tcstatus &= ~TCSTATUS_IXMT;
216 tcstatus |= TCSTATUS_A;
217 write_tc_c0_tcstatus(tcstatus);
218
219 /* Clear the TC halt bit */
220 write_tc_c0_tchalt(0);
221
222 /* Activate the VPE */
223 vpeconf0 = read_vpe_c0_vpeconf0();
224 vpeconf0 |= VPECONF0_VPA;
225 write_vpe_c0_vpeconf0(vpeconf0);
226
227 /* Set the stack & global pointer registers */
228 write_tc_gpr_sp(cfg->sp);
229 write_tc_gpr_gp(cfg->gp);
230
231 /* Leave VPE configuration state */
232 clear_c0_mvpcontrol(MVPCONTROL_VPC);
233
234 /* Enable other VPEs to execute */
235 evpe(EVPE_ENABLE);
236}
237
238static void cps_boot_secondary(int cpu, struct task_struct *idle)
239{
240 struct boot_config cfg;
241 unsigned int remote;
242 int err;
243
244 cfg.core = cpu_data[cpu].core;
245 cfg.vpe = cpu_vpe_id(&cpu_data[cpu]);
246 cfg.pc = (unsigned long)&smp_bootstrap;
247 cfg.sp = __KSTK_TOS(idle);
248 cfg.gp = (unsigned long)task_thread_info(idle);
249
250 if (!test_bit(cfg.core, core_power)) {
251 /* Boot a VPE on a powered down core */
252 boot_core(&cfg);
253 return;
254 }
255
256 if (cfg.core != current_cpu_data.core) {
257 /* Boot a VPE on another powered up core */
258 for (remote = 0; remote < NR_CPUS; remote++) {
259 if (cpu_data[remote].core != cfg.core)
260 continue;
261 if (cpu_online(remote))
262 break;
263 }
264 BUG_ON(remote >= NR_CPUS);
265
266 err = smp_call_function_single(remote, boot_vpe, &cfg, 1);
267 if (err)
268 panic("Failed to call remote CPU\n");
269 return;
270 }
271
272 BUG_ON(!cpu_has_mipsmt);
273
274 /* Boot a VPE on this core */
275 boot_vpe(&cfg);
276}
277
278static void cps_init_secondary(void)
279{
280 /* Disable MT - we only want to run 1 TC per VPE */
281 if (cpu_has_mipsmt)
282 dmt();
283
284 /* TODO: revisit this assumption once hotplug is implemented */
285 if (cpu_vpe_id(&current_cpu_data) == 0)
286 init_core();
287
288 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
289 STATUSF_IP6 | STATUSF_IP7);
290}
291
292static void cps_smp_finish(void)
293{
294 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
295
296#ifdef CONFIG_MIPS_MT_FPAFF
297 /* If we have an FPU, enroll ourselves in the FPU-full mask */
298 if (cpu_has_fpu)
299 cpu_set(smp_processor_id(), mt_fpu_cpumask);
300#endif /* CONFIG_MIPS_MT_FPAFF */
301
302 local_irq_enable();
303}
304
305static void cps_cpus_done(void)
306{
307}
308
309static struct plat_smp_ops cps_smp_ops = {
310 .smp_setup = cps_smp_setup,
311 .prepare_cpus = cps_prepare_cpus,
312 .boot_secondary = cps_boot_secondary,
313 .init_secondary = cps_init_secondary,
314 .smp_finish = cps_smp_finish,
315 .send_ipi_single = gic_send_ipi_single,
316 .send_ipi_mask = gic_send_ipi_mask,
317 .cpus_done = cps_cpus_done,
318};
319
320int register_cps_smp_ops(void)
321{
322 if (!mips_cm_present()) {
323 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
324 return -ENODEV;
325 }
326
327 /* check we have a GIC - we need one for IPIs */
328 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
329 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
330 return -ENODEV;
331 }
332
333 register_smp_ops(&cps_smp_ops);
334 return 0;
335}
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
new file mode 100644
index 000000000000..3bb1f92ab525
--- /dev/null
+++ b/arch/mips/kernel/smp-gic.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * Based on smp-cmp.c:
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Author: Chris Dearman (chris@mips.com)
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#include <linux/printk.h>
16
17#include <asm/gic.h>
18#include <asm/smp-ops.h>
19
20void gic_send_ipi_single(int cpu, unsigned int action)
21{
22 unsigned long flags;
23 unsigned int intr;
24
25 pr_debug("CPU%d: %s cpu %d action %u status %08x\n",
26 smp_processor_id(), __func__, cpu, action, read_c0_status());
27
28 local_irq_save(flags);
29
30 switch (action) {
31 case SMP_CALL_FUNCTION:
32 intr = plat_ipi_call_int_xlate(cpu);
33 break;
34
35 case SMP_RESCHEDULE_YOURSELF:
36 intr = plat_ipi_resched_int_xlate(cpu);
37 break;
38
39 default:
40 BUG();
41 }
42
43 gic_send_ipi(intr);
44 local_irq_restore(flags);
45}
46
47void gic_send_ipi_mask(const struct cpumask *mask, unsigned int action)
48{
49 unsigned int i;
50
51 for_each_cpu(i, mask)
52 gic_send_ipi_single(i, action);
53}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 0fb8cefc9114..f8e13149604d 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -113,27 +113,6 @@ static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
113 write_tc_c0_tchalt(TCHALT_H); 113 write_tc_c0_tchalt(TCHALT_H);
114} 114}
115 115
116#ifdef CONFIG_IRQ_GIC
117static void mp_send_ipi_single(int cpu, unsigned int action)
118{
119 unsigned long flags;
120
121 local_irq_save(flags);
122
123 switch (action) {
124 case SMP_CALL_FUNCTION:
125 gic_send_ipi(plat_ipi_call_int_xlate(cpu));
126 break;
127
128 case SMP_RESCHEDULE_YOURSELF:
129 gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
130 break;
131 }
132
133 local_irq_restore(flags);
134}
135#endif
136
137static void vsmp_send_ipi_single(int cpu, unsigned int action) 116static void vsmp_send_ipi_single(int cpu, unsigned int action)
138{ 117{
139 int i; 118 int i;
@@ -142,7 +121,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
142 121
143#ifdef CONFIG_IRQ_GIC 122#ifdef CONFIG_IRQ_GIC
144 if (gic_present) { 123 if (gic_present) {
145 mp_send_ipi_single(cpu, action); 124 gic_send_ipi_single(cpu, action);
146 return; 125 return;
147 } 126 }
148#endif 127#endif
@@ -313,3 +292,25 @@ struct plat_smp_ops vsmp_smp_ops = {
313 .smp_setup = vsmp_smp_setup, 292 .smp_setup = vsmp_smp_setup,
314 .prepare_cpus = vsmp_prepare_cpus, 293 .prepare_cpus = vsmp_prepare_cpus,
315}; 294};
295
296static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
297 unsigned long action_unused, void *data)
298{
299 struct proc_cpuinfo_notifier_args *pcn = data;
300 struct seq_file *m = pcn->m;
301 unsigned long n = pcn->n;
302
303 if (!cpu_has_mipsmt)
304 return NOTIFY_OK;
305
306 seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
307
308 return NOTIFY_OK;
309}
310
311static int __init proc_cpuinfo_notifier_init(void)
312{
313 return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
314}
315
316subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
index c10aa84c9fa9..38635a996cbf 100644
--- a/arch/mips/kernel/smtc-proc.c
+++ b/arch/mips/kernel/smtc-proc.c
@@ -77,3 +77,26 @@ void init_smtc_stats(void)
77 77
78 proc_create("smtc", 0444, NULL, &smtc_proc_fops); 78 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
79} 79}
80
81static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
82 unsigned long action_unused, void *data)
83{
84 struct proc_cpuinfo_notifier_args *pcn = data;
85 struct seq_file *m = pcn->m;
86 unsigned long n = pcn->n;
87
88 if (!cpu_has_mipsmt)
89 return NOTIFY_OK;
90
91 seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
92 seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
93
94 return NOTIFY_OK;
95}
96
97static int __init proc_cpuinfo_notifier_init(void)
98{
99 return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
100}
101
102subsys_initcall(proc_cpuinfo_notifier_init);
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index b242e2c10ea0..67f2495def1c 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -197,16 +197,17 @@ static void probe_spram(char *type,
197} 197}
198void spram_config(void) 198void spram_config(void)
199{ 199{
200 struct cpuinfo_mips *c = &current_cpu_data;
201 unsigned int config0; 200 unsigned int config0;
202 201
203 switch (c->cputype) { 202 switch (current_cpu_type()) {
204 case CPU_24K: 203 case CPU_24K:
205 case CPU_34K: 204 case CPU_34K:
206 case CPU_74K: 205 case CPU_74K:
207 case CPU_1004K: 206 case CPU_1004K:
207 case CPU_1074K:
208 case CPU_INTERAPTIV: 208 case CPU_INTERAPTIV:
209 case CPU_PROAPTIV: 209 case CPU_PROAPTIV:
210 case CPU_P5600:
210 config0 = read_c0_config(); 211 config0 = read_c0_config();
211 /* FIXME: addresses are Malta specific */ 212 /* FIXME: addresses are Malta specific */
212 if (config0 & (1<<24)) { 213 if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b79d13f95bf0..4a4f9dda5658 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -110,7 +110,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
110 110
111 if (cpu_has_llsc && R10000_LLSC_WAR) { 111 if (cpu_has_llsc && R10000_LLSC_WAR) {
112 __asm__ __volatile__ ( 112 __asm__ __volatile__ (
113 " .set mips3 \n" 113 " .set arch=r4000 \n"
114 " li %[err], 0 \n" 114 " li %[err], 0 \n"
115 "1: ll %[old], (%[addr]) \n" 115 "1: ll %[old], (%[addr]) \n"
116 " move %[tmp], %[new] \n" 116 " move %[tmp], %[new] \n"
@@ -135,7 +135,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
135 : "memory"); 135 : "memory");
136 } else if (cpu_has_llsc) { 136 } else if (cpu_has_llsc) {
137 __asm__ __volatile__ ( 137 __asm__ __volatile__ (
138 " .set mips3 \n" 138 " .set arch=r4000 \n"
139 " li %[err], 0 \n" 139 " li %[err], 0 \n"
140 "1: ll %[old], (%[addr]) \n" 140 "1: ll %[old], (%[addr]) \n"
141 " move %[tmp], %[new] \n" 141 " move %[tmp], %[new] \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e0b499694d18..074e857ced28 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -10,6 +10,7 @@
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com 10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki 11 * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. 12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
13 * Copyright (C) 2014, Imagination Technologies Ltd.
13 */ 14 */
14#include <linux/bug.h> 15#include <linux/bug.h>
15#include <linux/compiler.h> 16#include <linux/compiler.h>
@@ -47,6 +48,7 @@
47#include <asm/mipsregs.h> 48#include <asm/mipsregs.h>
48#include <asm/mipsmtregs.h> 49#include <asm/mipsmtregs.h>
49#include <asm/module.h> 50#include <asm/module.h>
51#include <asm/msa.h>
50#include <asm/pgtable.h> 52#include <asm/pgtable.h>
51#include <asm/ptrace.h> 53#include <asm/ptrace.h>
52#include <asm/sections.h> 54#include <asm/sections.h>
@@ -77,8 +79,10 @@ extern asmlinkage void handle_ri_rdhwr(void);
77extern asmlinkage void handle_cpu(void); 79extern asmlinkage void handle_cpu(void);
78extern asmlinkage void handle_ov(void); 80extern asmlinkage void handle_ov(void);
79extern asmlinkage void handle_tr(void); 81extern asmlinkage void handle_tr(void);
82extern asmlinkage void handle_msa_fpe(void);
80extern asmlinkage void handle_fpe(void); 83extern asmlinkage void handle_fpe(void);
81extern asmlinkage void handle_ftlb(void); 84extern asmlinkage void handle_ftlb(void);
85extern asmlinkage void handle_msa(void);
82extern asmlinkage void handle_mdmx(void); 86extern asmlinkage void handle_mdmx(void);
83extern asmlinkage void handle_watch(void); 87extern asmlinkage void handle_watch(void);
84extern asmlinkage void handle_mt(void); 88extern asmlinkage void handle_mt(void);
@@ -861,6 +865,11 @@ asmlinkage void do_bp(struct pt_regs *regs)
861 enum ctx_state prev_state; 865 enum ctx_state prev_state;
862 unsigned long epc; 866 unsigned long epc;
863 u16 instr[2]; 867 u16 instr[2];
868 mm_segment_t seg;
869
870 seg = get_fs();
871 if (!user_mode(regs))
872 set_fs(KERNEL_DS);
864 873
865 prev_state = exception_enter(); 874 prev_state = exception_enter();
866 if (get_isa16_mode(regs->cp0_epc)) { 875 if (get_isa16_mode(regs->cp0_epc)) {
@@ -870,17 +879,19 @@ asmlinkage void do_bp(struct pt_regs *regs)
870 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || 879 if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
871 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) 880 (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
872 goto out_sigsegv; 881 goto out_sigsegv;
873 opcode = (instr[0] << 16) | instr[1]; 882 opcode = (instr[0] << 16) | instr[1];
874 } else { 883 } else {
875 /* MIPS16e mode */ 884 /* MIPS16e mode */
876 if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) 885 if (__get_user(instr[0],
886 (u16 __user *)msk_isa16_mode(epc)))
877 goto out_sigsegv; 887 goto out_sigsegv;
878 bcode = (instr[0] >> 6) & 0x3f; 888 bcode = (instr[0] >> 6) & 0x3f;
879 do_trap_or_bp(regs, bcode, "Break"); 889 do_trap_or_bp(regs, bcode, "Break");
880 goto out; 890 goto out;
881 } 891 }
882 } else { 892 } else {
883 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 893 if (__get_user(opcode,
894 (unsigned int __user *) exception_epc(regs)))
884 goto out_sigsegv; 895 goto out_sigsegv;
885 } 896 }
886 897
@@ -918,6 +929,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
918 do_trap_or_bp(regs, bcode, "Break"); 929 do_trap_or_bp(regs, bcode, "Break");
919 930
920out: 931out:
932 set_fs(seg);
921 exception_exit(prev_state); 933 exception_exit(prev_state);
922 return; 934 return;
923 935
@@ -931,8 +943,13 @@ asmlinkage void do_tr(struct pt_regs *regs)
931 u32 opcode, tcode = 0; 943 u32 opcode, tcode = 0;
932 enum ctx_state prev_state; 944 enum ctx_state prev_state;
933 u16 instr[2]; 945 u16 instr[2];
946 mm_segment_t seg;
934 unsigned long epc = msk_isa16_mode(exception_epc(regs)); 947 unsigned long epc = msk_isa16_mode(exception_epc(regs));
935 948
949 seg = get_fs();
950 if (!user_mode(regs))
951 set_fs(get_ds());
952
936 prev_state = exception_enter(); 953 prev_state = exception_enter();
937 if (get_isa16_mode(regs->cp0_epc)) { 954 if (get_isa16_mode(regs->cp0_epc)) {
938 if (__get_user(instr[0], (u16 __user *)(epc + 0)) || 955 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
@@ -953,6 +970,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
953 do_trap_or_bp(regs, tcode, "Trap"); 970 do_trap_or_bp(regs, tcode, "Trap");
954 971
955out: 972out:
973 set_fs(seg);
956 exception_exit(prev_state); 974 exception_exit(prev_state);
957 return; 975 return;
958 976
@@ -1074,6 +1092,76 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1074 return NOTIFY_OK; 1092 return NOTIFY_OK;
1075} 1093}
1076 1094
1095static int enable_restore_fp_context(int msa)
1096{
1097 int err, was_fpu_owner;
1098
1099 if (!used_math()) {
1100 /* First time FP context user. */
1101 err = init_fpu();
1102 if (msa && !err)
1103 enable_msa();
1104 if (!err)
1105 set_used_math();
1106 return err;
1107 }
1108
1109 /*
1110 * This task has formerly used the FP context.
1111 *
1112 * If this thread has no live MSA vector context then we can simply
1113 * restore the scalar FP context. If it has live MSA vector context
1114 * (that is, it has or may have used MSA since last performing a
1115 * function call) then we'll need to restore the vector context. This
1116 * applies even if we're currently only executing a scalar FP
1117 * instruction. This is because if we were to later execute an MSA
1118 * instruction then we'd either have to:
1119 *
1120 * - Restore the vector context & clobber any registers modified by
1121 * scalar FP instructions between now & then.
1122 *
1123 * or
1124 *
1125 * - Not restore the vector context & lose the most significant bits
1126 * of all vector registers.
1127 *
1128 * Neither of those options is acceptable. We cannot restore the least
1129 * significant bits of the registers now & only restore the most
1130 * significant bits later because the most significant bits of any
1131 * vector registers whose aliased FP register is modified now will have
1132 * been zeroed. We'd have no way to know that when restoring the vector
1133 * context & thus may load an outdated value for the most significant
1134 * bits of a vector register.
1135 */
1136 if (!msa && !thread_msa_context_live())
1137 return own_fpu(1);
1138
1139 /*
1140 * This task is using or has previously used MSA. Thus we require
1141 * that Status.FR == 1.
1142 */
1143 was_fpu_owner = is_fpu_owner();
1144 err = own_fpu(0);
1145 if (err)
1146 return err;
1147
1148 enable_msa();
1149 write_msa_csr(current->thread.fpu.msacsr);
1150 set_thread_flag(TIF_USEDMSA);
1151
1152 /*
1153 * If this is the first time that the task is using MSA and it has
1154 * previously used scalar FP in this time slice then we already nave
1155 * FP context which we shouldn't clobber.
1156 */
1157 if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
1158 return 0;
1159
1160 /* We need to restore the vector context. */
1161 restore_msa(current);
1162 return 0;
1163}
1164
1077asmlinkage void do_cpu(struct pt_regs *regs) 1165asmlinkage void do_cpu(struct pt_regs *regs)
1078{ 1166{
1079 enum ctx_state prev_state; 1167 enum ctx_state prev_state;
@@ -1153,12 +1241,7 @@ asmlinkage void do_cpu(struct pt_regs *regs)
1153 /* Fall through. */ 1241 /* Fall through. */
1154 1242
1155 case 1: 1243 case 1:
1156 if (used_math()) /* Using the FPU again. */ 1244 err = enable_restore_fp_context(0);
1157 err = own_fpu(1);
1158 else { /* First time FPU user. */
1159 err = init_fpu();
1160 set_used_math();
1161 }
1162 1245
1163 if (!raw_cpu_has_fpu || err) { 1246 if (!raw_cpu_has_fpu || err) {
1164 int sig; 1247 int sig;
@@ -1183,6 +1266,37 @@ out:
1183 exception_exit(prev_state); 1266 exception_exit(prev_state);
1184} 1267}
1185 1268
1269asmlinkage void do_msa_fpe(struct pt_regs *regs)
1270{
1271 enum ctx_state prev_state;
1272
1273 prev_state = exception_enter();
1274 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1275 force_sig(SIGFPE, current);
1276 exception_exit(prev_state);
1277}
1278
1279asmlinkage void do_msa(struct pt_regs *regs)
1280{
1281 enum ctx_state prev_state;
1282 int err;
1283
1284 prev_state = exception_enter();
1285
1286 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1287 force_sig(SIGILL, current);
1288 goto out;
1289 }
1290
1291 die_if_kernel("do_msa invoked from kernel context!", regs);
1292
1293 err = enable_restore_fp_context(1);
1294 if (err)
1295 force_sig(SIGILL, current);
1296out:
1297 exception_exit(prev_state);
1298}
1299
1186asmlinkage void do_mdmx(struct pt_regs *regs) 1300asmlinkage void do_mdmx(struct pt_regs *regs)
1187{ 1301{
1188 enum ctx_state prev_state; 1302 enum ctx_state prev_state;
@@ -1337,8 +1451,10 @@ static inline void parity_protection_init(void)
1337 case CPU_34K: 1451 case CPU_34K:
1338 case CPU_74K: 1452 case CPU_74K:
1339 case CPU_1004K: 1453 case CPU_1004K:
1454 case CPU_1074K:
1340 case CPU_INTERAPTIV: 1455 case CPU_INTERAPTIV:
1341 case CPU_PROAPTIV: 1456 case CPU_PROAPTIV:
1457 case CPU_P5600:
1342 { 1458 {
1343#define ERRCTL_PE 0x80000000 1459#define ERRCTL_PE 0x80000000
1344#define ERRCTL_L2P 0x00800000 1460#define ERRCTL_L2P 0x00800000
@@ -2017,6 +2133,7 @@ void __init trap_init(void)
2017 set_except_vector(11, handle_cpu); 2133 set_except_vector(11, handle_cpu);
2018 set_except_vector(12, handle_ov); 2134 set_except_vector(12, handle_ov);
2019 set_except_vector(13, handle_tr); 2135 set_except_vector(13, handle_tr);
2136 set_except_vector(14, handle_msa_fpe);
2020 2137
2021 if (current_cpu_type() == CPU_R6000 || 2138 if (current_cpu_type() == CPU_R6000 ||
2022 current_cpu_type() == CPU_R6000A) { 2139 current_cpu_type() == CPU_R6000A) {
@@ -2040,6 +2157,7 @@ void __init trap_init(void)
2040 set_except_vector(15, handle_fpe); 2157 set_except_vector(15, handle_fpe);
2041 2158
2042 set_except_vector(16, handle_ftlb); 2159 set_except_vector(16, handle_ftlb);
2160 set_except_vector(21, handle_msa);
2043 set_except_vector(22, handle_mdmx); 2161 set_except_vector(22, handle_mdmx);
2044 2162
2045 if (cpu_has_mcheck) 2163 if (cpu_has_mcheck)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index c369a5d35527..2b3517214d6d 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle 8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2014 Imagination Technologies Ltd.
10 * 11 *
11 * This file contains exception handler for address error exception with the 12 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The 13 * special capability to execute faulting instructions in software. The
@@ -110,8 +111,8 @@ extern void show_registers(struct pt_regs *regs);
110#ifdef __BIG_ENDIAN 111#ifdef __BIG_ENDIAN
111#define LoadHW(addr, value, res) \ 112#define LoadHW(addr, value, res) \
112 __asm__ __volatile__ (".set\tnoat\n" \ 113 __asm__ __volatile__ (".set\tnoat\n" \
113 "1:\tlb\t%0, 0(%2)\n" \ 114 "1:\t"user_lb("%0", "0(%2)")"\n" \
114 "2:\tlbu\t$1, 1(%2)\n\t" \ 115 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
115 "sll\t%0, 0x8\n\t" \ 116 "sll\t%0, 0x8\n\t" \
116 "or\t%0, $1\n\t" \ 117 "or\t%0, $1\n\t" \
117 "li\t%1, 0\n" \ 118 "li\t%1, 0\n" \
@@ -130,8 +131,8 @@ extern void show_registers(struct pt_regs *regs);
130 131
131#define LoadW(addr, value, res) \ 132#define LoadW(addr, value, res) \
132 __asm__ __volatile__ ( \ 133 __asm__ __volatile__ ( \
133 "1:\tlwl\t%0, (%2)\n" \ 134 "1:\t"user_lwl("%0", "(%2)")"\n" \
134 "2:\tlwr\t%0, 3(%2)\n\t" \ 135 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
135 "li\t%1, 0\n" \ 136 "li\t%1, 0\n" \
136 "3:\n\t" \ 137 "3:\n\t" \
137 ".insn\n\t" \ 138 ".insn\n\t" \
@@ -149,8 +150,8 @@ extern void show_registers(struct pt_regs *regs);
149#define LoadHWU(addr, value, res) \ 150#define LoadHWU(addr, value, res) \
150 __asm__ __volatile__ ( \ 151 __asm__ __volatile__ ( \
151 ".set\tnoat\n" \ 152 ".set\tnoat\n" \
152 "1:\tlbu\t%0, 0(%2)\n" \ 153 "1:\t"user_lbu("%0", "0(%2)")"\n" \
153 "2:\tlbu\t$1, 1(%2)\n\t" \ 154 "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
154 "sll\t%0, 0x8\n\t" \ 155 "sll\t%0, 0x8\n\t" \
155 "or\t%0, $1\n\t" \ 156 "or\t%0, $1\n\t" \
156 "li\t%1, 0\n" \ 157 "li\t%1, 0\n" \
@@ -170,8 +171,8 @@ extern void show_registers(struct pt_regs *regs);
170 171
171#define LoadWU(addr, value, res) \ 172#define LoadWU(addr, value, res) \
172 __asm__ __volatile__ ( \ 173 __asm__ __volatile__ ( \
173 "1:\tlwl\t%0, (%2)\n" \ 174 "1:\t"user_lwl("%0", "(%2)")"\n" \
174 "2:\tlwr\t%0, 3(%2)\n\t" \ 175 "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
175 "dsll\t%0, %0, 32\n\t" \ 176 "dsll\t%0, %0, 32\n\t" \
176 "dsrl\t%0, %0, 32\n\t" \ 177 "dsrl\t%0, %0, 32\n\t" \
177 "li\t%1, 0\n" \ 178 "li\t%1, 0\n" \
@@ -209,9 +210,9 @@ extern void show_registers(struct pt_regs *regs);
209#define StoreHW(addr, value, res) \ 210#define StoreHW(addr, value, res) \
210 __asm__ __volatile__ ( \ 211 __asm__ __volatile__ ( \
211 ".set\tnoat\n" \ 212 ".set\tnoat\n" \
212 "1:\tsb\t%1, 1(%2)\n\t" \ 213 "1:\t"user_sb("%1", "1(%2)")"\n" \
213 "srl\t$1, %1, 0x8\n" \ 214 "srl\t$1, %1, 0x8\n" \
214 "2:\tsb\t$1, 0(%2)\n\t" \ 215 "2:\t"user_sb("$1", "0(%2)")"\n" \
215 ".set\tat\n\t" \ 216 ".set\tat\n\t" \
216 "li\t%0, 0\n" \ 217 "li\t%0, 0\n" \
217 "3:\n\t" \ 218 "3:\n\t" \
@@ -229,8 +230,8 @@ extern void show_registers(struct pt_regs *regs);
229 230
230#define StoreW(addr, value, res) \ 231#define StoreW(addr, value, res) \
231 __asm__ __volatile__ ( \ 232 __asm__ __volatile__ ( \
232 "1:\tswl\t%1,(%2)\n" \ 233 "1:\t"user_swl("%1", "(%2)")"\n" \
233 "2:\tswr\t%1, 3(%2)\n\t" \ 234 "2:\t"user_swr("%1", "3(%2)")"\n\t" \
234 "li\t%0, 0\n" \ 235 "li\t%0, 0\n" \
235 "3:\n\t" \ 236 "3:\n\t" \
236 ".insn\n\t" \ 237 ".insn\n\t" \
@@ -267,8 +268,8 @@ extern void show_registers(struct pt_regs *regs);
267#ifdef __LITTLE_ENDIAN 268#ifdef __LITTLE_ENDIAN
268#define LoadHW(addr, value, res) \ 269#define LoadHW(addr, value, res) \
269 __asm__ __volatile__ (".set\tnoat\n" \ 270 __asm__ __volatile__ (".set\tnoat\n" \
270 "1:\tlb\t%0, 1(%2)\n" \ 271 "1:\t"user_lb("%0", "1(%2)")"\n" \
271 "2:\tlbu\t$1, 0(%2)\n\t" \ 272 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
272 "sll\t%0, 0x8\n\t" \ 273 "sll\t%0, 0x8\n\t" \
273 "or\t%0, $1\n\t" \ 274 "or\t%0, $1\n\t" \
274 "li\t%1, 0\n" \ 275 "li\t%1, 0\n" \
@@ -287,8 +288,8 @@ extern void show_registers(struct pt_regs *regs);
287 288
288#define LoadW(addr, value, res) \ 289#define LoadW(addr, value, res) \
289 __asm__ __volatile__ ( \ 290 __asm__ __volatile__ ( \
290 "1:\tlwl\t%0, 3(%2)\n" \ 291 "1:\t"user_lwl("%0", "3(%2)")"\n" \
291 "2:\tlwr\t%0, (%2)\n\t" \ 292 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
292 "li\t%1, 0\n" \ 293 "li\t%1, 0\n" \
293 "3:\n\t" \ 294 "3:\n\t" \
294 ".insn\n\t" \ 295 ".insn\n\t" \
@@ -306,8 +307,8 @@ extern void show_registers(struct pt_regs *regs);
306#define LoadHWU(addr, value, res) \ 307#define LoadHWU(addr, value, res) \
307 __asm__ __volatile__ ( \ 308 __asm__ __volatile__ ( \
308 ".set\tnoat\n" \ 309 ".set\tnoat\n" \
309 "1:\tlbu\t%0, 1(%2)\n" \ 310 "1:\t"user_lbu("%0", "1(%2)")"\n" \
310 "2:\tlbu\t$1, 0(%2)\n\t" \ 311 "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
311 "sll\t%0, 0x8\n\t" \ 312 "sll\t%0, 0x8\n\t" \
312 "or\t%0, $1\n\t" \ 313 "or\t%0, $1\n\t" \
313 "li\t%1, 0\n" \ 314 "li\t%1, 0\n" \
@@ -327,8 +328,8 @@ extern void show_registers(struct pt_regs *regs);
327 328
328#define LoadWU(addr, value, res) \ 329#define LoadWU(addr, value, res) \
329 __asm__ __volatile__ ( \ 330 __asm__ __volatile__ ( \
330 "1:\tlwl\t%0, 3(%2)\n" \ 331 "1:\t"user_lwl("%0", "3(%2)")"\n" \
331 "2:\tlwr\t%0, (%2)\n\t" \ 332 "2:\t"user_lwr("%0", "(%2)")"\n\t" \
332 "dsll\t%0, %0, 32\n\t" \ 333 "dsll\t%0, %0, 32\n\t" \
333 "dsrl\t%0, %0, 32\n\t" \ 334 "dsrl\t%0, %0, 32\n\t" \
334 "li\t%1, 0\n" \ 335 "li\t%1, 0\n" \
@@ -366,9 +367,9 @@ extern void show_registers(struct pt_regs *regs);
366#define StoreHW(addr, value, res) \ 367#define StoreHW(addr, value, res) \
367 __asm__ __volatile__ ( \ 368 __asm__ __volatile__ ( \
368 ".set\tnoat\n" \ 369 ".set\tnoat\n" \
369 "1:\tsb\t%1, 0(%2)\n\t" \ 370 "1:\t"user_sb("%1", "0(%2)")"\n" \
370 "srl\t$1,%1, 0x8\n" \ 371 "srl\t$1,%1, 0x8\n" \
371 "2:\tsb\t$1, 1(%2)\n\t" \ 372 "2:\t"user_sb("$1", "1(%2)")"\n" \
372 ".set\tat\n\t" \ 373 ".set\tat\n\t" \
373 "li\t%0, 0\n" \ 374 "li\t%0, 0\n" \
374 "3:\n\t" \ 375 "3:\n\t" \
@@ -386,8 +387,8 @@ extern void show_registers(struct pt_regs *regs);
386 387
387#define StoreW(addr, value, res) \ 388#define StoreW(addr, value, res) \
388 __asm__ __volatile__ ( \ 389 __asm__ __volatile__ ( \
389 "1:\tswl\t%1, 3(%2)\n" \ 390 "1:\t"user_swl("%1", "3(%2)")"\n" \
390 "2:\tswr\t%1, (%2)\n\t" \ 391 "2:\t"user_swr("%1", "(%2)")"\n\t" \
391 "li\t%0, 0\n" \ 392 "li\t%0, 0\n" \
392 "3:\n\t" \ 393 "3:\n\t" \
393 ".insn\n\t" \ 394 ".insn\n\t" \
@@ -430,7 +431,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
430 unsigned long origpc; 431 unsigned long origpc;
431 unsigned long orig31; 432 unsigned long orig31;
432 void __user *fault_addr = NULL; 433 void __user *fault_addr = NULL;
433 434#ifdef CONFIG_EVA
435 mm_segment_t seg;
436#endif
434 origpc = (unsigned long)pc; 437 origpc = (unsigned long)pc;
435 orig31 = regs->regs[31]; 438 orig31 = regs->regs[31];
436 439
@@ -475,6 +478,88 @@ static void emulate_load_store_insn(struct pt_regs *regs,
475 * The remaining opcodes are the ones that are really of 478 * The remaining opcodes are the ones that are really of
476 * interest. 479 * interest.
477 */ 480 */
481#ifdef CONFIG_EVA
482 case spec3_op:
483 /*
484 * we can land here only from kernel accessing user memory,
485 * so we need to "switch" the address limit to user space, so
486 * address check can work properly.
487 */
488 seg = get_fs();
489 set_fs(USER_DS);
490 switch (insn.spec3_format.func) {
491 case lhe_op:
492 if (!access_ok(VERIFY_READ, addr, 2)) {
493 set_fs(seg);
494 goto sigbus;
495 }
496 LoadHW(addr, value, res);
497 if (res) {
498 set_fs(seg);
499 goto fault;
500 }
501 compute_return_epc(regs);
502 regs->regs[insn.spec3_format.rt] = value;
503 break;
504 case lwe_op:
505 if (!access_ok(VERIFY_READ, addr, 4)) {
506 set_fs(seg);
507 goto sigbus;
508 }
509 LoadW(addr, value, res);
510 if (res) {
511 set_fs(seg);
512 goto fault;
513 }
514 compute_return_epc(regs);
515 regs->regs[insn.spec3_format.rt] = value;
516 break;
517 case lhue_op:
518 if (!access_ok(VERIFY_READ, addr, 2)) {
519 set_fs(seg);
520 goto sigbus;
521 }
522 LoadHWU(addr, value, res);
523 if (res) {
524 set_fs(seg);
525 goto fault;
526 }
527 compute_return_epc(regs);
528 regs->regs[insn.spec3_format.rt] = value;
529 break;
530 case she_op:
531 if (!access_ok(VERIFY_WRITE, addr, 2)) {
532 set_fs(seg);
533 goto sigbus;
534 }
535 compute_return_epc(regs);
536 value = regs->regs[insn.spec3_format.rt];
537 StoreHW(addr, value, res);
538 if (res) {
539 set_fs(seg);
540 goto fault;
541 }
542 break;
543 case swe_op:
544 if (!access_ok(VERIFY_WRITE, addr, 4)) {
545 set_fs(seg);
546 goto sigbus;
547 }
548 compute_return_epc(regs);
549 value = regs->regs[insn.spec3_format.rt];
550 StoreW(addr, value, res);
551 if (res) {
552 set_fs(seg);
553 goto fault;
554 }
555 break;
556 default:
557 set_fs(seg);
558 goto sigill;
559 }
560 set_fs(seg);
561 break;
562#endif
478 case lh_op: 563 case lh_op:
479 if (!access_ok(VERIFY_READ, addr, 2)) 564 if (!access_ok(VERIFY_READ, addr, 2))
480 goto sigbus; 565 goto sigbus;
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 4b6274b47f33..e3fec99941a7 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -436,13 +436,6 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
436 sel = inst & 0x7; 436 sel = inst & 0x7;
437 co_bit = (inst >> 25) & 1; 437 co_bit = (inst >> 25) & 1;
438 438
439 /* Verify that the register is valid */
440 if (rd > MIPS_CP0_DESAVE) {
441 printk("Invalid rd: %d\n", rd);
442 er = EMULATE_FAIL;
443 goto done;
444 }
445
446 if (co_bit) { 439 if (co_bit) {
447 op = (inst) & 0xff; 440 op = (inst) & 0xff;
448 441
@@ -1542,8 +1535,15 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1542 } 1535 }
1543 1536
1544 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { 1537 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
1538 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
1545 int rd = (inst & RD) >> 11; 1539 int rd = (inst & RD) >> 11;
1546 int rt = (inst & RT) >> 16; 1540 int rt = (inst & RT) >> 16;
1541 /* If usermode, check RDHWR rd is allowed by guest HWREna */
1542 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
1543 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
1544 rd, opc);
1545 goto emulate_ri;
1546 }
1547 switch (rd) { 1547 switch (rd) {
1548 case 0: /* CPU number */ 1548 case 0: /* CPU number */
1549 arch->gprs[rt] = 0; 1549 arch->gprs[rt] = 0;
@@ -1567,31 +1567,27 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1567 } 1567 }
1568 break; 1568 break;
1569 case 29: 1569 case 29:
1570#if 1
1571 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); 1570 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
1572#else
1573 /* UserLocal not implemented */
1574 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1575#endif
1576 break; 1571 break;
1577 1572
1578 default: 1573 default:
1579 printk("RDHWR not supported\n"); 1574 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
1580 er = EMULATE_FAIL; 1575 goto emulate_ri;
1581 break;
1582 } 1576 }
1583 } else { 1577 } else {
1584 printk("Emulate RI not supported @ %p: %#x\n", opc, inst); 1578 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
1585 er = EMULATE_FAIL; 1579 goto emulate_ri;
1586 } 1580 }
1587 1581
1582 return EMULATE_DONE;
1583
1584emulate_ri:
1588 /* 1585 /*
1589 * Rollback PC only if emulation was unsuccessful 1586 * Rollback PC (if in branch delay slot then the PC already points to
1587 * branch target), and pass the RI exception to the guest OS.
1590 */ 1588 */
1591 if (er == EMULATE_FAIL) { 1589 vcpu->arch.pc = curr_pc;
1592 vcpu->arch.pc = curr_pc; 1590 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1593 }
1594 return er;
1595} 1591}
1596 1592
1597enum emulation_result 1593enum emulation_result
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index 638c5db122c9..2bcd8391bc93 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -175,7 +175,7 @@ static void pvc_proc_cleanup(void)
175 remove_proc_entry("scroll", pvc_display_dir); 175 remove_proc_entry("scroll", pvc_display_dir);
176 remove_proc_entry(DISPLAY_DIR_NAME, NULL); 176 remove_proc_entry(DISPLAY_DIR_NAME, NULL);
177 177
178 del_timer(&timer); 178 del_timer_sync(&timer);
179} 179}
180 180
181static int __init pvc_proc_init(void) 181static int __init pvc_proc_init(void)
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index a6adffbb4e5f..2e4825e48388 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -8,6 +8,7 @@
8 * Copyright (C) 1998, 1999 Ralf Baechle 8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc. 9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007 Maciej W. Rozycki 10 * Copyright (C) 2007 Maciej W. Rozycki
11 * Copyright (C) 2014 Imagination Technologies Ltd.
11 */ 12 */
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <asm/asm.h> 14#include <asm/asm.h>
@@ -296,7 +297,7 @@ LEAF(csum_partial)
296 * checksum and copy routines based on memcpy.S 297 * checksum and copy routines based on memcpy.S
297 * 298 *
298 * csum_partial_copy_nocheck(src, dst, len, sum) 299 * csum_partial_copy_nocheck(src, dst, len, sum)
299 * __csum_partial_copy_user(src, dst, len, sum, errp) 300 * __csum_partial_copy_kernel(src, dst, len, sum, errp)
300 * 301 *
301 * See "Spec" in memcpy.S for details. Unlike __copy_user, all 302 * See "Spec" in memcpy.S for details. Unlike __copy_user, all
302 * function in this file use the standard calling convention. 303 * function in this file use the standard calling convention.
@@ -327,20 +328,58 @@ LEAF(csum_partial)
327 * These handlers do not need to overwrite any data. 328 * These handlers do not need to overwrite any data.
328 */ 329 */
329 330
330#define EXC(inst_reg,addr,handler) \ 331/* Instruction type */
3319: inst_reg, addr; \ 332#define LD_INSN 1
332 .section __ex_table,"a"; \ 333#define ST_INSN 2
333 PTR 9b, handler; \ 334#define LEGACY_MODE 1
334 .previous 335#define EVA_MODE 2
336#define USEROP 1
337#define KERNELOP 2
338
339/*
340 * Wrapper to add an entry in the exception table
341 * in case the insn causes a memory exception.
342 * Arguments:
343 * insn : Load/store instruction
344 * type : Instruction type
345 * reg : Register
346 * addr : Address
347 * handler : Exception handler
348 */
349#define EXC(insn, type, reg, addr, handler) \
350 .if \mode == LEGACY_MODE; \
3519: insn reg, addr; \
352 .section __ex_table,"a"; \
353 PTR 9b, handler; \
354 .previous; \
355 /* This is enabled in EVA mode */ \
356 .else; \
357 /* If loading from user or storing to user */ \
358 .if ((\from == USEROP) && (type == LD_INSN)) || \
359 ((\to == USEROP) && (type == ST_INSN)); \
3609: __BUILD_EVA_INSN(insn##e, reg, addr); \
361 .section __ex_table,"a"; \
362 PTR 9b, handler; \
363 .previous; \
364 .else; \
365 /* EVA without exception */ \
366 insn reg, addr; \
367 .endif; \
368 .endif
369
370#undef LOAD
335 371
336#ifdef USE_DOUBLE 372#ifdef USE_DOUBLE
337 373
338#define LOAD ld 374#define LOADK ld /* No exception */
339#define LOADL ldl 375#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
340#define LOADR ldr 376#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
341#define STOREL sdl 377#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
342#define STORER sdr 378#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
343#define STORE sd 379#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
380#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
381#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
382#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
344#define ADD daddu 383#define ADD daddu
345#define SUB dsubu 384#define SUB dsubu
346#define SRL dsrl 385#define SRL dsrl
@@ -352,12 +391,15 @@ LEAF(csum_partial)
352 391
353#else 392#else
354 393
355#define LOAD lw 394#define LOADK lw /* No exception */
356#define LOADL lwl 395#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
357#define LOADR lwr 396#define LOADBU(reg, addr, handler) EXC(lbu, LD_INSN, reg, addr, handler)
358#define STOREL swl 397#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
359#define STORER swr 398#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
360#define STORE sw 399#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
400#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
401#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
402#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
361#define ADD addu 403#define ADD addu
362#define SUB subu 404#define SUB subu
363#define SRL srl 405#define SRL srl
@@ -396,14 +438,20 @@ LEAF(csum_partial)
396 .set at=v1 438 .set at=v1
397#endif 439#endif
398 440
399LEAF(__csum_partial_copy_user) 441 .macro __BUILD_CSUM_PARTIAL_COPY_USER mode, from, to, __nocheck
442
400 PTR_ADDU AT, src, len /* See (1) above. */ 443 PTR_ADDU AT, src, len /* See (1) above. */
444 /* initialize __nocheck if this the first time we execute this
445 * macro
446 */
401#ifdef CONFIG_64BIT 447#ifdef CONFIG_64BIT
402 move errptr, a4 448 move errptr, a4
403#else 449#else
404 lw errptr, 16(sp) 450 lw errptr, 16(sp)
405#endif 451#endif
406FEXPORT(csum_partial_copy_nocheck) 452 .if \__nocheck == 1
453 FEXPORT(csum_partial_copy_nocheck)
454 .endif
407 move sum, zero 455 move sum, zero
408 move odd, zero 456 move odd, zero
409 /* 457 /*
@@ -419,48 +467,48 @@ FEXPORT(csum_partial_copy_nocheck)
419 */ 467 */
420 sltu t2, len, NBYTES 468 sltu t2, len, NBYTES
421 and t1, dst, ADDRMASK 469 and t1, dst, ADDRMASK
422 bnez t2, .Lcopy_bytes_checklen 470 bnez t2, .Lcopy_bytes_checklen\@
423 and t0, src, ADDRMASK 471 and t0, src, ADDRMASK
424 andi odd, dst, 0x1 /* odd buffer? */ 472 andi odd, dst, 0x1 /* odd buffer? */
425 bnez t1, .Ldst_unaligned 473 bnez t1, .Ldst_unaligned\@
426 nop 474 nop
427 bnez t0, .Lsrc_unaligned_dst_aligned 475 bnez t0, .Lsrc_unaligned_dst_aligned\@
428 /* 476 /*
429 * use delay slot for fall-through 477 * use delay slot for fall-through
430 * src and dst are aligned; need to compute rem 478 * src and dst are aligned; need to compute rem
431 */ 479 */
432.Lboth_aligned: 480.Lboth_aligned\@:
433 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 481 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
434 beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES 482 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
435 nop 483 nop
436 SUB len, 8*NBYTES # subtract here for bgez loop 484 SUB len, 8*NBYTES # subtract here for bgez loop
437 .align 4 485 .align 4
4381: 4861:
439EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 487 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
440EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 488 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
441EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 489 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
442EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 490 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
443EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) 491 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
444EXC( LOAD t5, UNIT(5)(src), .Ll_exc_copy) 492 LOAD(t5, UNIT(5)(src), .Ll_exc_copy\@)
445EXC( LOAD t6, UNIT(6)(src), .Ll_exc_copy) 493 LOAD(t6, UNIT(6)(src), .Ll_exc_copy\@)
446EXC( LOAD t7, UNIT(7)(src), .Ll_exc_copy) 494 LOAD(t7, UNIT(7)(src), .Ll_exc_copy\@)
447 SUB len, len, 8*NBYTES 495 SUB len, len, 8*NBYTES
448 ADD src, src, 8*NBYTES 496 ADD src, src, 8*NBYTES
449EXC( STORE t0, UNIT(0)(dst), .Ls_exc) 497 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
450 ADDC(sum, t0) 498 ADDC(sum, t0)
451EXC( STORE t1, UNIT(1)(dst), .Ls_exc) 499 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
452 ADDC(sum, t1) 500 ADDC(sum, t1)
453EXC( STORE t2, UNIT(2)(dst), .Ls_exc) 501 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
454 ADDC(sum, t2) 502 ADDC(sum, t2)
455EXC( STORE t3, UNIT(3)(dst), .Ls_exc) 503 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
456 ADDC(sum, t3) 504 ADDC(sum, t3)
457EXC( STORE t4, UNIT(4)(dst), .Ls_exc) 505 STORE(t4, UNIT(4)(dst), .Ls_exc\@)
458 ADDC(sum, t4) 506 ADDC(sum, t4)
459EXC( STORE t5, UNIT(5)(dst), .Ls_exc) 507 STORE(t5, UNIT(5)(dst), .Ls_exc\@)
460 ADDC(sum, t5) 508 ADDC(sum, t5)
461EXC( STORE t6, UNIT(6)(dst), .Ls_exc) 509 STORE(t6, UNIT(6)(dst), .Ls_exc\@)
462 ADDC(sum, t6) 510 ADDC(sum, t6)
463EXC( STORE t7, UNIT(7)(dst), .Ls_exc) 511 STORE(t7, UNIT(7)(dst), .Ls_exc\@)
464 ADDC(sum, t7) 512 ADDC(sum, t7)
465 .set reorder /* DADDI_WAR */ 513 .set reorder /* DADDI_WAR */
466 ADD dst, dst, 8*NBYTES 514 ADD dst, dst, 8*NBYTES
@@ -471,44 +519,44 @@ EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
471 /* 519 /*
472 * len == the number of bytes left to copy < 8*NBYTES 520 * len == the number of bytes left to copy < 8*NBYTES
473 */ 521 */
474.Lcleanup_both_aligned: 522.Lcleanup_both_aligned\@:
475#define rem t7 523#define rem t7
476 beqz len, .Ldone 524 beqz len, .Ldone\@
477 sltu t0, len, 4*NBYTES 525 sltu t0, len, 4*NBYTES
478 bnez t0, .Lless_than_4units 526 bnez t0, .Lless_than_4units\@
479 and rem, len, (NBYTES-1) # rem = len % NBYTES 527 and rem, len, (NBYTES-1) # rem = len % NBYTES
480 /* 528 /*
481 * len >= 4*NBYTES 529 * len >= 4*NBYTES
482 */ 530 */
483EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 531 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
484EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 532 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
485EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 533 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
486EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 534 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
487 SUB len, len, 4*NBYTES 535 SUB len, len, 4*NBYTES
488 ADD src, src, 4*NBYTES 536 ADD src, src, 4*NBYTES
489EXC( STORE t0, UNIT(0)(dst), .Ls_exc) 537 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
490 ADDC(sum, t0) 538 ADDC(sum, t0)
491EXC( STORE t1, UNIT(1)(dst), .Ls_exc) 539 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
492 ADDC(sum, t1) 540 ADDC(sum, t1)
493EXC( STORE t2, UNIT(2)(dst), .Ls_exc) 541 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
494 ADDC(sum, t2) 542 ADDC(sum, t2)
495EXC( STORE t3, UNIT(3)(dst), .Ls_exc) 543 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
496 ADDC(sum, t3) 544 ADDC(sum, t3)
497 .set reorder /* DADDI_WAR */ 545 .set reorder /* DADDI_WAR */
498 ADD dst, dst, 4*NBYTES 546 ADD dst, dst, 4*NBYTES
499 beqz len, .Ldone 547 beqz len, .Ldone\@
500 .set noreorder 548 .set noreorder
501.Lless_than_4units: 549.Lless_than_4units\@:
502 /* 550 /*
503 * rem = len % NBYTES 551 * rem = len % NBYTES
504 */ 552 */
505 beq rem, len, .Lcopy_bytes 553 beq rem, len, .Lcopy_bytes\@
506 nop 554 nop
5071: 5551:
508EXC( LOAD t0, 0(src), .Ll_exc) 556 LOAD(t0, 0(src), .Ll_exc\@)
509 ADD src, src, NBYTES 557 ADD src, src, NBYTES
510 SUB len, len, NBYTES 558 SUB len, len, NBYTES
511EXC( STORE t0, 0(dst), .Ls_exc) 559 STORE(t0, 0(dst), .Ls_exc\@)
512 ADDC(sum, t0) 560 ADDC(sum, t0)
513 .set reorder /* DADDI_WAR */ 561 .set reorder /* DADDI_WAR */
514 ADD dst, dst, NBYTES 562 ADD dst, dst, NBYTES
@@ -527,20 +575,20 @@ EXC( STORE t0, 0(dst), .Ls_exc)
527 * more instruction-level parallelism. 575 * more instruction-level parallelism.
528 */ 576 */
529#define bits t2 577#define bits t2
530 beqz len, .Ldone 578 beqz len, .Ldone\@
531 ADD t1, dst, len # t1 is just past last byte of dst 579 ADD t1, dst, len # t1 is just past last byte of dst
532 li bits, 8*NBYTES 580 li bits, 8*NBYTES
533 SLL rem, len, 3 # rem = number of bits to keep 581 SLL rem, len, 3 # rem = number of bits to keep
534EXC( LOAD t0, 0(src), .Ll_exc) 582 LOAD(t0, 0(src), .Ll_exc\@)
535 SUB bits, bits, rem # bits = number of bits to discard 583 SUB bits, bits, rem # bits = number of bits to discard
536 SHIFT_DISCARD t0, t0, bits 584 SHIFT_DISCARD t0, t0, bits
537EXC( STREST t0, -1(t1), .Ls_exc) 585 STREST(t0, -1(t1), .Ls_exc\@)
538 SHIFT_DISCARD_REVERT t0, t0, bits 586 SHIFT_DISCARD_REVERT t0, t0, bits
539 .set reorder 587 .set reorder
540 ADDC(sum, t0) 588 ADDC(sum, t0)
541 b .Ldone 589 b .Ldone\@
542 .set noreorder 590 .set noreorder
543.Ldst_unaligned: 591.Ldst_unaligned\@:
544 /* 592 /*
545 * dst is unaligned 593 * dst is unaligned
546 * t0 = src & ADDRMASK 594 * t0 = src & ADDRMASK
@@ -551,25 +599,25 @@ EXC( STREST t0, -1(t1), .Ls_exc)
551 * Set match = (src and dst have same alignment) 599 * Set match = (src and dst have same alignment)
552 */ 600 */
553#define match rem 601#define match rem
554EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) 602 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
555 ADD t2, zero, NBYTES 603 ADD t2, zero, NBYTES
556EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) 604 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
557 SUB t2, t2, t1 # t2 = number of bytes copied 605 SUB t2, t2, t1 # t2 = number of bytes copied
558 xor match, t0, t1 606 xor match, t0, t1
559EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) 607 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
560 SLL t4, t1, 3 # t4 = number of bits to discard 608 SLL t4, t1, 3 # t4 = number of bits to discard
561 SHIFT_DISCARD t3, t3, t4 609 SHIFT_DISCARD t3, t3, t4
562 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */ 610 /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
563 ADDC(sum, t3) 611 ADDC(sum, t3)
564 beq len, t2, .Ldone 612 beq len, t2, .Ldone\@
565 SUB len, len, t2 613 SUB len, len, t2
566 ADD dst, dst, t2 614 ADD dst, dst, t2
567 beqz match, .Lboth_aligned 615 beqz match, .Lboth_aligned\@
568 ADD src, src, t2 616 ADD src, src, t2
569 617
570.Lsrc_unaligned_dst_aligned: 618.Lsrc_unaligned_dst_aligned\@:
571 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter 619 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
572 beqz t0, .Lcleanup_src_unaligned 620 beqz t0, .Lcleanup_src_unaligned\@
573 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 621 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
5741: 6221:
575/* 623/*
@@ -578,53 +626,53 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
578 * It's OK to load FIRST(N+1) before REST(N) because the two addresses 626 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
579 * are to the same unit (unless src is aligned, but it's not). 627 * are to the same unit (unless src is aligned, but it's not).
580 */ 628 */
581EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 629 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
582EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) 630 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
583 SUB len, len, 4*NBYTES 631 SUB len, len, 4*NBYTES
584EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 632 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
585EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) 633 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
586EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) 634 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
587EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) 635 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
588EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) 636 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
589EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) 637 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
590 ADD src, src, 4*NBYTES 638 ADD src, src, 4*NBYTES
591#ifdef CONFIG_CPU_SB1 639#ifdef CONFIG_CPU_SB1
592 nop # improves slotting 640 nop # improves slotting
593#endif 641#endif
594EXC( STORE t0, UNIT(0)(dst), .Ls_exc) 642 STORE(t0, UNIT(0)(dst), .Ls_exc\@)
595 ADDC(sum, t0) 643 ADDC(sum, t0)
596EXC( STORE t1, UNIT(1)(dst), .Ls_exc) 644 STORE(t1, UNIT(1)(dst), .Ls_exc\@)
597 ADDC(sum, t1) 645 ADDC(sum, t1)
598EXC( STORE t2, UNIT(2)(dst), .Ls_exc) 646 STORE(t2, UNIT(2)(dst), .Ls_exc\@)
599 ADDC(sum, t2) 647 ADDC(sum, t2)
600EXC( STORE t3, UNIT(3)(dst), .Ls_exc) 648 STORE(t3, UNIT(3)(dst), .Ls_exc\@)
601 ADDC(sum, t3) 649 ADDC(sum, t3)
602 .set reorder /* DADDI_WAR */ 650 .set reorder /* DADDI_WAR */
603 ADD dst, dst, 4*NBYTES 651 ADD dst, dst, 4*NBYTES
604 bne len, rem, 1b 652 bne len, rem, 1b
605 .set noreorder 653 .set noreorder
606 654
607.Lcleanup_src_unaligned: 655.Lcleanup_src_unaligned\@:
608 beqz len, .Ldone 656 beqz len, .Ldone\@
609 and rem, len, NBYTES-1 # rem = len % NBYTES 657 and rem, len, NBYTES-1 # rem = len % NBYTES
610 beq rem, len, .Lcopy_bytes 658 beq rem, len, .Lcopy_bytes\@
611 nop 659 nop
6121: 6601:
613EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 661 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
614EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 662 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
615 ADD src, src, NBYTES 663 ADD src, src, NBYTES
616 SUB len, len, NBYTES 664 SUB len, len, NBYTES
617EXC( STORE t0, 0(dst), .Ls_exc) 665 STORE(t0, 0(dst), .Ls_exc\@)
618 ADDC(sum, t0) 666 ADDC(sum, t0)
619 .set reorder /* DADDI_WAR */ 667 .set reorder /* DADDI_WAR */
620 ADD dst, dst, NBYTES 668 ADD dst, dst, NBYTES
621 bne len, rem, 1b 669 bne len, rem, 1b
622 .set noreorder 670 .set noreorder
623 671
624.Lcopy_bytes_checklen: 672.Lcopy_bytes_checklen\@:
625 beqz len, .Ldone 673 beqz len, .Ldone\@
626 nop 674 nop
627.Lcopy_bytes: 675.Lcopy_bytes\@:
628 /* 0 < len < NBYTES */ 676 /* 0 < len < NBYTES */
629#ifdef CONFIG_CPU_LITTLE_ENDIAN 677#ifdef CONFIG_CPU_LITTLE_ENDIAN
630#define SHIFT_START 0 678#define SHIFT_START 0
@@ -637,12 +685,12 @@ EXC( STORE t0, 0(dst), .Ls_exc)
637 li t3, SHIFT_START # shift 685 li t3, SHIFT_START # shift
638/* use .Ll_exc_copy here to return correct sum on fault */ 686/* use .Ll_exc_copy here to return correct sum on fault */
639#define COPY_BYTE(N) \ 687#define COPY_BYTE(N) \
640EXC( lbu t0, N(src), .Ll_exc_copy); \ 688 LOADBU(t0, N(src), .Ll_exc_copy\@); \
641 SUB len, len, 1; \ 689 SUB len, len, 1; \
642EXC( sb t0, N(dst), .Ls_exc); \ 690 STOREB(t0, N(dst), .Ls_exc\@); \
643 SLLV t0, t0, t3; \ 691 SLLV t0, t0, t3; \
644 addu t3, SHIFT_INC; \ 692 addu t3, SHIFT_INC; \
645 beqz len, .Lcopy_bytes_done; \ 693 beqz len, .Lcopy_bytes_done\@; \
646 or t2, t0 694 or t2, t0
647 695
648 COPY_BYTE(0) 696 COPY_BYTE(0)
@@ -653,14 +701,14 @@ EXC( sb t0, N(dst), .Ls_exc); \
653 COPY_BYTE(4) 701 COPY_BYTE(4)
654 COPY_BYTE(5) 702 COPY_BYTE(5)
655#endif 703#endif
656EXC( lbu t0, NBYTES-2(src), .Ll_exc_copy) 704 LOADBU(t0, NBYTES-2(src), .Ll_exc_copy\@)
657 SUB len, len, 1 705 SUB len, len, 1
658EXC( sb t0, NBYTES-2(dst), .Ls_exc) 706 STOREB(t0, NBYTES-2(dst), .Ls_exc\@)
659 SLLV t0, t0, t3 707 SLLV t0, t0, t3
660 or t2, t0 708 or t2, t0
661.Lcopy_bytes_done: 709.Lcopy_bytes_done\@:
662 ADDC(sum, t2) 710 ADDC(sum, t2)
663.Ldone: 711.Ldone\@:
664 /* fold checksum */ 712 /* fold checksum */
665#ifdef USE_DOUBLE 713#ifdef USE_DOUBLE
666 dsll32 v1, sum, 0 714 dsll32 v1, sum, 0
@@ -689,7 +737,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
689 jr ra 737 jr ra
690 .set noreorder 738 .set noreorder
691 739
692.Ll_exc_copy: 740.Ll_exc_copy\@:
693 /* 741 /*
694 * Copy bytes from src until faulting load address (or until a 742 * Copy bytes from src until faulting load address (or until a
695 * lb faults) 743 * lb faults)
@@ -700,11 +748,11 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
700 * 748 *
701 * Assumes src < THREAD_BUADDR($28) 749 * Assumes src < THREAD_BUADDR($28)
702 */ 750 */
703 LOAD t0, TI_TASK($28) 751 LOADK t0, TI_TASK($28)
704 li t2, SHIFT_START 752 li t2, SHIFT_START
705 LOAD t0, THREAD_BUADDR(t0) 753 LOADK t0, THREAD_BUADDR(t0)
7061: 7541:
707EXC( lbu t1, 0(src), .Ll_exc) 755 LOADBU(t1, 0(src), .Ll_exc\@)
708 ADD src, src, 1 756 ADD src, src, 1
709 sb t1, 0(dst) # can't fault -- we're copy_from_user 757 sb t1, 0(dst) # can't fault -- we're copy_from_user
710 SLLV t1, t1, t2 758 SLLV t1, t1, t2
@@ -714,10 +762,10 @@ EXC( lbu t1, 0(src), .Ll_exc)
714 ADD dst, dst, 1 762 ADD dst, dst, 1
715 bne src, t0, 1b 763 bne src, t0, 1b
716 .set noreorder 764 .set noreorder
717.Ll_exc: 765.Ll_exc\@:
718 LOAD t0, TI_TASK($28) 766 LOADK t0, TI_TASK($28)
719 nop 767 nop
720 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address 768 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
721 nop 769 nop
722 SUB len, AT, t0 # len number of uncopied bytes 770 SUB len, AT, t0 # len number of uncopied bytes
723 /* 771 /*
@@ -733,7 +781,7 @@ EXC( lbu t1, 0(src), .Ll_exc)
733 */ 781 */
734 .set reorder /* DADDI_WAR */ 782 .set reorder /* DADDI_WAR */
735 SUB src, len, 1 783 SUB src, len, 1
736 beqz len, .Ldone 784 beqz len, .Ldone\@
737 .set noreorder 785 .set noreorder
7381: sb zero, 0(dst) 7861: sb zero, 0(dst)
739 ADD dst, dst, 1 787 ADD dst, dst, 1
@@ -748,13 +796,31 @@ EXC( lbu t1, 0(src), .Ll_exc)
748 SUB src, src, v1 796 SUB src, src, v1
749#endif 797#endif
750 li v1, -EFAULT 798 li v1, -EFAULT
751 b .Ldone 799 b .Ldone\@
752 sw v1, (errptr) 800 sw v1, (errptr)
753 801
754.Ls_exc: 802.Ls_exc\@:
755 li v0, -1 /* invalid checksum */ 803 li v0, -1 /* invalid checksum */
756 li v1, -EFAULT 804 li v1, -EFAULT
757 jr ra 805 jr ra
758 sw v1, (errptr) 806 sw v1, (errptr)
759 .set pop 807 .set pop
760 END(__csum_partial_copy_user) 808 .endm
809
810LEAF(__csum_partial_copy_kernel)
811#ifndef CONFIG_EVA
812FEXPORT(__csum_partial_copy_to_user)
813FEXPORT(__csum_partial_copy_from_user)
814#endif
815__BUILD_CSUM_PARTIAL_COPY_USER LEGACY_MODE USEROP USEROP 1
816END(__csum_partial_copy_kernel)
817
818#ifdef CONFIG_EVA
819LEAF(__csum_partial_copy_to_user)
820__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE KERNELOP USEROP 0
821END(__csum_partial_copy_to_user)
822
823LEAF(__csum_partial_copy_from_user)
824__BUILD_CSUM_PARTIAL_COPY_USER EVA_MODE USEROP KERNELOP 0
825END(__csum_partial_copy_from_user)
826#endif
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c5c40dad0bbf..c17ef80cf65a 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -10,6 +10,7 @@
10 * Copyright (C) 2002 Broadcom, Inc. 10 * Copyright (C) 2002 Broadcom, Inc.
11 * memcpy/copy_user author: Mark Vandevoorde 11 * memcpy/copy_user author: Mark Vandevoorde
12 * Copyright (C) 2007 Maciej W. Rozycki 12 * Copyright (C) 2007 Maciej W. Rozycki
13 * Copyright (C) 2014 Imagination Technologies Ltd.
13 * 14 *
14 * Mnemonic names for arguments to memcpy/__copy_user 15 * Mnemonic names for arguments to memcpy/__copy_user
15 */ 16 */
@@ -85,11 +86,51 @@
85 * they're not protected. 86 * they're not protected.
86 */ 87 */
87 88
88#define EXC(inst_reg,addr,handler) \ 89/* Instruction type */
899: inst_reg, addr; \ 90#define LD_INSN 1
90 .section __ex_table,"a"; \ 91#define ST_INSN 2
91 PTR 9b, handler; \ 92/* Pretech type */
92 .previous 93#define SRC_PREFETCH 1
94#define DST_PREFETCH 2
95#define LEGACY_MODE 1
96#define EVA_MODE 2
97#define USEROP 1
98#define KERNELOP 2
99
100/*
101 * Wrapper to add an entry in the exception table
102 * in case the insn causes a memory exception.
103 * Arguments:
104 * insn : Load/store instruction
105 * type : Instruction type
106 * reg : Register
107 * addr : Address
108 * handler : Exception handler
109 */
110
111#define EXC(insn, type, reg, addr, handler) \
112 .if \mode == LEGACY_MODE; \
1139: insn reg, addr; \
114 .section __ex_table,"a"; \
115 PTR 9b, handler; \
116 .previous; \
117 /* This is assembled in EVA mode */ \
118 .else; \
119 /* If loading from user or storing to user */ \
120 .if ((\from == USEROP) && (type == LD_INSN)) || \
121 ((\to == USEROP) && (type == ST_INSN)); \
1229: __BUILD_EVA_INSN(insn##e, reg, addr); \
123 .section __ex_table,"a"; \
124 PTR 9b, handler; \
125 .previous; \
126 .else; \
127 /* \
128 * Still in EVA, but no need for \
129 * exception handler or EVA insn \
130 */ \
131 insn reg, addr; \
132 .endif; \
133 .endif
93 134
94/* 135/*
95 * Only on the 64-bit kernel we can made use of 64-bit registers. 136 * Only on the 64-bit kernel we can made use of 64-bit registers.
@@ -100,12 +141,13 @@
100 141
101#ifdef USE_DOUBLE 142#ifdef USE_DOUBLE
102 143
103#define LOAD ld 144#define LOADK ld /* No exception */
104#define LOADL ldl 145#define LOAD(reg, addr, handler) EXC(ld, LD_INSN, reg, addr, handler)
105#define LOADR ldr 146#define LOADL(reg, addr, handler) EXC(ldl, LD_INSN, reg, addr, handler)
106#define STOREL sdl 147#define LOADR(reg, addr, handler) EXC(ldr, LD_INSN, reg, addr, handler)
107#define STORER sdr 148#define STOREL(reg, addr, handler) EXC(sdl, ST_INSN, reg, addr, handler)
108#define STORE sd 149#define STORER(reg, addr, handler) EXC(sdr, ST_INSN, reg, addr, handler)
150#define STORE(reg, addr, handler) EXC(sd, ST_INSN, reg, addr, handler)
109#define ADD daddu 151#define ADD daddu
110#define SUB dsubu 152#define SUB dsubu
111#define SRL dsrl 153#define SRL dsrl
@@ -136,12 +178,13 @@
136 178
137#else 179#else
138 180
139#define LOAD lw 181#define LOADK lw /* No exception */
140#define LOADL lwl 182#define LOAD(reg, addr, handler) EXC(lw, LD_INSN, reg, addr, handler)
141#define LOADR lwr 183#define LOADL(reg, addr, handler) EXC(lwl, LD_INSN, reg, addr, handler)
142#define STOREL swl 184#define LOADR(reg, addr, handler) EXC(lwr, LD_INSN, reg, addr, handler)
143#define STORER swr 185#define STOREL(reg, addr, handler) EXC(swl, ST_INSN, reg, addr, handler)
144#define STORE sw 186#define STORER(reg, addr, handler) EXC(swr, ST_INSN, reg, addr, handler)
187#define STORE(reg, addr, handler) EXC(sw, ST_INSN, reg, addr, handler)
145#define ADD addu 188#define ADD addu
146#define SUB subu 189#define SUB subu
147#define SRL srl 190#define SRL srl
@@ -154,6 +197,33 @@
154 197
155#endif /* USE_DOUBLE */ 198#endif /* USE_DOUBLE */
156 199
200#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
201#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
202
203#define _PREF(hint, addr, type) \
204 .if \mode == LEGACY_MODE; \
205 PREF(hint, addr); \
206 .else; \
207 .if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
208 ((\to == USEROP) && (type == DST_PREFETCH)); \
209 /* \
210 * PREFE has only 9 bits for the offset \
211 * compared to PREF which has 16, so it may \
212 * need to use the $at register but this \
213 * register should remain intact because it's \
214 * used later on. Therefore use $v1. \
215 */ \
216 .set at=v1; \
217 PREFE(hint, addr); \
218 .set noat; \
219 .else; \
220 PREF(hint, addr); \
221 .endif; \
222 .endif
223
224#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
225#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
226
157#ifdef CONFIG_CPU_LITTLE_ENDIAN 227#ifdef CONFIG_CPU_LITTLE_ENDIAN
158#define LDFIRST LOADR 228#define LDFIRST LOADR
159#define LDREST LOADL 229#define LDREST LOADL
@@ -182,27 +252,23 @@
182 .set at=v1 252 .set at=v1
183#endif 253#endif
184 254
185/*
186 * t6 is used as a flag to note inatomic mode.
187 */
188LEAF(__copy_user_inatomic)
189 b __copy_user_common
190 li t6, 1
191 END(__copy_user_inatomic)
192
193/*
194 * A combined memcpy/__copy_user
195 * __copy_user sets len to 0 for success; else to an upper bound of
196 * the number of uncopied bytes.
197 * memcpy sets v0 to dst.
198 */
199 .align 5 255 .align 5
200LEAF(memcpy) /* a0=dst a1=src a2=len */ 256
201 move v0, dst /* return value */ 257 /*
202.L__memcpy: 258 * Macro to build the __copy_user common code
203FEXPORT(__copy_user) 259 * Arguements:
204 li t6, 0 /* not inatomic */ 260 * mode : LEGACY_MODE or EVA_MODE
205__copy_user_common: 261 * from : Source operand. USEROP or KERNELOP
262 * to : Destination operand. USEROP or KERNELOP
263 */
264 .macro __BUILD_COPY_USER mode, from, to
265
266 /* initialize __memcpy if this the first time we execute this macro */
267 .ifnotdef __memcpy
268 .set __memcpy, 1
269 .hidden __memcpy /* make sure it does not leak */
270 .endif
271
206 /* 272 /*
207 * Note: dst & src may be unaligned, len may be 0 273 * Note: dst & src may be unaligned, len may be 0
208 * Temps 274 * Temps
@@ -217,94 +283,94 @@ __copy_user_common:
217 * 283 *
218 * If len < NBYTES use byte operations. 284 * If len < NBYTES use byte operations.
219 */ 285 */
220 PREF( 0, 0(src) ) 286 PREFS( 0, 0(src) )
221 PREF( 1, 0(dst) ) 287 PREFD( 1, 0(dst) )
222 sltu t2, len, NBYTES 288 sltu t2, len, NBYTES
223 and t1, dst, ADDRMASK 289 and t1, dst, ADDRMASK
224 PREF( 0, 1*32(src) ) 290 PREFS( 0, 1*32(src) )
225 PREF( 1, 1*32(dst) ) 291 PREFD( 1, 1*32(dst) )
226 bnez t2, .Lcopy_bytes_checklen 292 bnez t2, .Lcopy_bytes_checklen\@
227 and t0, src, ADDRMASK 293 and t0, src, ADDRMASK
228 PREF( 0, 2*32(src) ) 294 PREFS( 0, 2*32(src) )
229 PREF( 1, 2*32(dst) ) 295 PREFD( 1, 2*32(dst) )
230 bnez t1, .Ldst_unaligned 296 bnez t1, .Ldst_unaligned\@
231 nop 297 nop
232 bnez t0, .Lsrc_unaligned_dst_aligned 298 bnez t0, .Lsrc_unaligned_dst_aligned\@
233 /* 299 /*
234 * use delay slot for fall-through 300 * use delay slot for fall-through
235 * src and dst are aligned; need to compute rem 301 * src and dst are aligned; need to compute rem
236 */ 302 */
237.Lboth_aligned: 303.Lboth_aligned\@:
238 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter 304 SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
239 beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES 305 beqz t0, .Lcleanup_both_aligned\@ # len < 8*NBYTES
240 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES) 306 and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
241 PREF( 0, 3*32(src) ) 307 PREFS( 0, 3*32(src) )
242 PREF( 1, 3*32(dst) ) 308 PREFD( 1, 3*32(dst) )
243 .align 4 309 .align 4
2441: 3101:
245 R10KCBARRIER(0(ra)) 311 R10KCBARRIER(0(ra))
246EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 312 LOAD(t0, UNIT(0)(src), .Ll_exc\@)
247EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 313 LOAD(t1, UNIT(1)(src), .Ll_exc_copy\@)
248EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 314 LOAD(t2, UNIT(2)(src), .Ll_exc_copy\@)
249EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 315 LOAD(t3, UNIT(3)(src), .Ll_exc_copy\@)
250 SUB len, len, 8*NBYTES 316 SUB len, len, 8*NBYTES
251EXC( LOAD t4, UNIT(4)(src), .Ll_exc_copy) 317 LOAD(t4, UNIT(4)(src), .Ll_exc_copy\@)
252EXC( LOAD t7, UNIT(5)(src), .Ll_exc_copy) 318 LOAD(t7, UNIT(5)(src), .Ll_exc_copy\@)
253EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u) 319 STORE(t0, UNIT(0)(dst), .Ls_exc_p8u\@)
254EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u) 320 STORE(t1, UNIT(1)(dst), .Ls_exc_p7u\@)
255EXC( LOAD t0, UNIT(6)(src), .Ll_exc_copy) 321 LOAD(t0, UNIT(6)(src), .Ll_exc_copy\@)
256EXC( LOAD t1, UNIT(7)(src), .Ll_exc_copy) 322 LOAD(t1, UNIT(7)(src), .Ll_exc_copy\@)
257 ADD src, src, 8*NBYTES 323 ADD src, src, 8*NBYTES
258 ADD dst, dst, 8*NBYTES 324 ADD dst, dst, 8*NBYTES
259EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u) 325 STORE(t2, UNIT(-6)(dst), .Ls_exc_p6u\@)
260EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u) 326 STORE(t3, UNIT(-5)(dst), .Ls_exc_p5u\@)
261EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u) 327 STORE(t4, UNIT(-4)(dst), .Ls_exc_p4u\@)
262EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u) 328 STORE(t7, UNIT(-3)(dst), .Ls_exc_p3u\@)
263EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u) 329 STORE(t0, UNIT(-2)(dst), .Ls_exc_p2u\@)
264EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u) 330 STORE(t1, UNIT(-1)(dst), .Ls_exc_p1u\@)
265 PREF( 0, 8*32(src) ) 331 PREFS( 0, 8*32(src) )
266 PREF( 1, 8*32(dst) ) 332 PREFD( 1, 8*32(dst) )
267 bne len, rem, 1b 333 bne len, rem, 1b
268 nop 334 nop
269 335
270 /* 336 /*
271 * len == rem == the number of bytes left to copy < 8*NBYTES 337 * len == rem == the number of bytes left to copy < 8*NBYTES
272 */ 338 */
273.Lcleanup_both_aligned: 339.Lcleanup_both_aligned\@:
274 beqz len, .Ldone 340 beqz len, .Ldone\@
275 sltu t0, len, 4*NBYTES 341 sltu t0, len, 4*NBYTES
276 bnez t0, .Lless_than_4units 342 bnez t0, .Lless_than_4units\@
277 and rem, len, (NBYTES-1) # rem = len % NBYTES 343 and rem, len, (NBYTES-1) # rem = len % NBYTES
278 /* 344 /*
279 * len >= 4*NBYTES 345 * len >= 4*NBYTES
280 */ 346 */
281EXC( LOAD t0, UNIT(0)(src), .Ll_exc) 347 LOAD( t0, UNIT(0)(src), .Ll_exc\@)
282EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy) 348 LOAD( t1, UNIT(1)(src), .Ll_exc_copy\@)
283EXC( LOAD t2, UNIT(2)(src), .Ll_exc_copy) 349 LOAD( t2, UNIT(2)(src), .Ll_exc_copy\@)
284EXC( LOAD t3, UNIT(3)(src), .Ll_exc_copy) 350 LOAD( t3, UNIT(3)(src), .Ll_exc_copy\@)
285 SUB len, len, 4*NBYTES 351 SUB len, len, 4*NBYTES
286 ADD src, src, 4*NBYTES 352 ADD src, src, 4*NBYTES
287 R10KCBARRIER(0(ra)) 353 R10KCBARRIER(0(ra))
288EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) 354 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
289EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) 355 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
290EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) 356 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
291EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) 357 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
292 .set reorder /* DADDI_WAR */ 358 .set reorder /* DADDI_WAR */
293 ADD dst, dst, 4*NBYTES 359 ADD dst, dst, 4*NBYTES
294 beqz len, .Ldone 360 beqz len, .Ldone\@
295 .set noreorder 361 .set noreorder
296.Lless_than_4units: 362.Lless_than_4units\@:
297 /* 363 /*
298 * rem = len % NBYTES 364 * rem = len % NBYTES
299 */ 365 */
300 beq rem, len, .Lcopy_bytes 366 beq rem, len, .Lcopy_bytes\@
301 nop 367 nop
3021: 3681:
303 R10KCBARRIER(0(ra)) 369 R10KCBARRIER(0(ra))
304EXC( LOAD t0, 0(src), .Ll_exc) 370 LOAD(t0, 0(src), .Ll_exc\@)
305 ADD src, src, NBYTES 371 ADD src, src, NBYTES
306 SUB len, len, NBYTES 372 SUB len, len, NBYTES
307EXC( STORE t0, 0(dst), .Ls_exc_p1u) 373 STORE(t0, 0(dst), .Ls_exc_p1u\@)
308 .set reorder /* DADDI_WAR */ 374 .set reorder /* DADDI_WAR */
309 ADD dst, dst, NBYTES 375 ADD dst, dst, NBYTES
310 bne rem, len, 1b 376 bne rem, len, 1b
@@ -322,17 +388,17 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
322 * more instruction-level parallelism. 388 * more instruction-level parallelism.
323 */ 389 */
324#define bits t2 390#define bits t2
325 beqz len, .Ldone 391 beqz len, .Ldone\@
326 ADD t1, dst, len # t1 is just past last byte of dst 392 ADD t1, dst, len # t1 is just past last byte of dst
327 li bits, 8*NBYTES 393 li bits, 8*NBYTES
328 SLL rem, len, 3 # rem = number of bits to keep 394 SLL rem, len, 3 # rem = number of bits to keep
329EXC( LOAD t0, 0(src), .Ll_exc) 395 LOAD(t0, 0(src), .Ll_exc\@)
330 SUB bits, bits, rem # bits = number of bits to discard 396 SUB bits, bits, rem # bits = number of bits to discard
331 SHIFT_DISCARD t0, t0, bits 397 SHIFT_DISCARD t0, t0, bits
332EXC( STREST t0, -1(t1), .Ls_exc) 398 STREST(t0, -1(t1), .Ls_exc\@)
333 jr ra 399 jr ra
334 move len, zero 400 move len, zero
335.Ldst_unaligned: 401.Ldst_unaligned\@:
336 /* 402 /*
337 * dst is unaligned 403 * dst is unaligned
338 * t0 = src & ADDRMASK 404 * t0 = src & ADDRMASK
@@ -343,25 +409,25 @@ EXC( STREST t0, -1(t1), .Ls_exc)
343 * Set match = (src and dst have same alignment) 409 * Set match = (src and dst have same alignment)
344 */ 410 */
345#define match rem 411#define match rem
346EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc) 412 LDFIRST(t3, FIRST(0)(src), .Ll_exc\@)
347 ADD t2, zero, NBYTES 413 ADD t2, zero, NBYTES
348EXC( LDREST t3, REST(0)(src), .Ll_exc_copy) 414 LDREST(t3, REST(0)(src), .Ll_exc_copy\@)
349 SUB t2, t2, t1 # t2 = number of bytes copied 415 SUB t2, t2, t1 # t2 = number of bytes copied
350 xor match, t0, t1 416 xor match, t0, t1
351 R10KCBARRIER(0(ra)) 417 R10KCBARRIER(0(ra))
352EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc) 418 STFIRST(t3, FIRST(0)(dst), .Ls_exc\@)
353 beq len, t2, .Ldone 419 beq len, t2, .Ldone\@
354 SUB len, len, t2 420 SUB len, len, t2
355 ADD dst, dst, t2 421 ADD dst, dst, t2
356 beqz match, .Lboth_aligned 422 beqz match, .Lboth_aligned\@
357 ADD src, src, t2 423 ADD src, src, t2
358 424
359.Lsrc_unaligned_dst_aligned: 425.Lsrc_unaligned_dst_aligned\@:
360 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter 426 SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
361 PREF( 0, 3*32(src) ) 427 PREFS( 0, 3*32(src) )
362 beqz t0, .Lcleanup_src_unaligned 428 beqz t0, .Lcleanup_src_unaligned\@
363 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES 429 and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
364 PREF( 1, 3*32(dst) ) 430 PREFD( 1, 3*32(dst) )
3651: 4311:
366/* 432/*
367 * Avoid consecutive LD*'s to the same register since some mips 433 * Avoid consecutive LD*'s to the same register since some mips
@@ -370,58 +436,58 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
370 * are to the same unit (unless src is aligned, but it's not). 436 * are to the same unit (unless src is aligned, but it's not).
371 */ 437 */
372 R10KCBARRIER(0(ra)) 438 R10KCBARRIER(0(ra))
373EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 439 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
374EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy) 440 LDFIRST(t1, FIRST(1)(src), .Ll_exc_copy\@)
375 SUB len, len, 4*NBYTES 441 SUB len, len, 4*NBYTES
376EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 442 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
377EXC( LDREST t1, REST(1)(src), .Ll_exc_copy) 443 LDREST(t1, REST(1)(src), .Ll_exc_copy\@)
378EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy) 444 LDFIRST(t2, FIRST(2)(src), .Ll_exc_copy\@)
379EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy) 445 LDFIRST(t3, FIRST(3)(src), .Ll_exc_copy\@)
380EXC( LDREST t2, REST(2)(src), .Ll_exc_copy) 446 LDREST(t2, REST(2)(src), .Ll_exc_copy\@)
381EXC( LDREST t3, REST(3)(src), .Ll_exc_copy) 447 LDREST(t3, REST(3)(src), .Ll_exc_copy\@)
382 PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed) 448 PREFS( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
383 ADD src, src, 4*NBYTES 449 ADD src, src, 4*NBYTES
384#ifdef CONFIG_CPU_SB1 450#ifdef CONFIG_CPU_SB1
385 nop # improves slotting 451 nop # improves slotting
386#endif 452#endif
387EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u) 453 STORE(t0, UNIT(0)(dst), .Ls_exc_p4u\@)
388EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u) 454 STORE(t1, UNIT(1)(dst), .Ls_exc_p3u\@)
389EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u) 455 STORE(t2, UNIT(2)(dst), .Ls_exc_p2u\@)
390EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u) 456 STORE(t3, UNIT(3)(dst), .Ls_exc_p1u\@)
391 PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) 457 PREFD( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
392 .set reorder /* DADDI_WAR */ 458 .set reorder /* DADDI_WAR */
393 ADD dst, dst, 4*NBYTES 459 ADD dst, dst, 4*NBYTES
394 bne len, rem, 1b 460 bne len, rem, 1b
395 .set noreorder 461 .set noreorder
396 462
397.Lcleanup_src_unaligned: 463.Lcleanup_src_unaligned\@:
398 beqz len, .Ldone 464 beqz len, .Ldone\@
399 and rem, len, NBYTES-1 # rem = len % NBYTES 465 and rem, len, NBYTES-1 # rem = len % NBYTES
400 beq rem, len, .Lcopy_bytes 466 beq rem, len, .Lcopy_bytes\@
401 nop 467 nop
4021: 4681:
403 R10KCBARRIER(0(ra)) 469 R10KCBARRIER(0(ra))
404EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc) 470 LDFIRST(t0, FIRST(0)(src), .Ll_exc\@)
405EXC( LDREST t0, REST(0)(src), .Ll_exc_copy) 471 LDREST(t0, REST(0)(src), .Ll_exc_copy\@)
406 ADD src, src, NBYTES 472 ADD src, src, NBYTES
407 SUB len, len, NBYTES 473 SUB len, len, NBYTES
408EXC( STORE t0, 0(dst), .Ls_exc_p1u) 474 STORE(t0, 0(dst), .Ls_exc_p1u\@)
409 .set reorder /* DADDI_WAR */ 475 .set reorder /* DADDI_WAR */
410 ADD dst, dst, NBYTES 476 ADD dst, dst, NBYTES
411 bne len, rem, 1b 477 bne len, rem, 1b
412 .set noreorder 478 .set noreorder
413 479
414.Lcopy_bytes_checklen: 480.Lcopy_bytes_checklen\@:
415 beqz len, .Ldone 481 beqz len, .Ldone\@
416 nop 482 nop
417.Lcopy_bytes: 483.Lcopy_bytes\@:
418 /* 0 < len < NBYTES */ 484 /* 0 < len < NBYTES */
419 R10KCBARRIER(0(ra)) 485 R10KCBARRIER(0(ra))
420#define COPY_BYTE(N) \ 486#define COPY_BYTE(N) \
421EXC( lb t0, N(src), .Ll_exc); \ 487 LOADB(t0, N(src), .Ll_exc\@); \
422 SUB len, len, 1; \ 488 SUB len, len, 1; \
423 beqz len, .Ldone; \ 489 beqz len, .Ldone\@; \
424EXC( sb t0, N(dst), .Ls_exc_p1) 490 STOREB(t0, N(dst), .Ls_exc_p1\@)
425 491
426 COPY_BYTE(0) 492 COPY_BYTE(0)
427 COPY_BYTE(1) 493 COPY_BYTE(1)
@@ -431,16 +497,19 @@ EXC( sb t0, N(dst), .Ls_exc_p1)
431 COPY_BYTE(4) 497 COPY_BYTE(4)
432 COPY_BYTE(5) 498 COPY_BYTE(5)
433#endif 499#endif
434EXC( lb t0, NBYTES-2(src), .Ll_exc) 500 LOADB(t0, NBYTES-2(src), .Ll_exc\@)
435 SUB len, len, 1 501 SUB len, len, 1
436 jr ra 502 jr ra
437EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1) 503 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
438.Ldone: 504.Ldone\@:
439 jr ra 505 jr ra
440 nop 506 .if __memcpy == 1
441 END(memcpy) 507 END(memcpy)
508 .set __memcpy, 0
509 .hidden __memcpy
510 .endif
442 511
443.Ll_exc_copy: 512.Ll_exc_copy\@:
444 /* 513 /*
445 * Copy bytes from src until faulting load address (or until a 514 * Copy bytes from src until faulting load address (or until a
446 * lb faults) 515 * lb faults)
@@ -451,24 +520,24 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc_p1)
451 * 520 *
452 * Assumes src < THREAD_BUADDR($28) 521 * Assumes src < THREAD_BUADDR($28)
453 */ 522 */
454 LOAD t0, TI_TASK($28) 523 LOADK t0, TI_TASK($28)
455 nop 524 nop
456 LOAD t0, THREAD_BUADDR(t0) 525 LOADK t0, THREAD_BUADDR(t0)
4571: 5261:
458EXC( lb t1, 0(src), .Ll_exc) 527 LOADB(t1, 0(src), .Ll_exc\@)
459 ADD src, src, 1 528 ADD src, src, 1
460 sb t1, 0(dst) # can't fault -- we're copy_from_user 529 sb t1, 0(dst) # can't fault -- we're copy_from_user
461 .set reorder /* DADDI_WAR */ 530 .set reorder /* DADDI_WAR */
462 ADD dst, dst, 1 531 ADD dst, dst, 1
463 bne src, t0, 1b 532 bne src, t0, 1b
464 .set noreorder 533 .set noreorder
465.Ll_exc: 534.Ll_exc\@:
466 LOAD t0, TI_TASK($28) 535 LOADK t0, TI_TASK($28)
467 nop 536 nop
468 LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address 537 LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
469 nop 538 nop
470 SUB len, AT, t0 # len number of uncopied bytes 539 SUB len, AT, t0 # len number of uncopied bytes
471 bnez t6, .Ldone /* Skip the zeroing part if inatomic */ 540 bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */
472 /* 541 /*
473 * Here's where we rely on src and dst being incremented in tandem, 542 * Here's where we rely on src and dst being incremented in tandem,
474 * See (3) above. 543 * See (3) above.
@@ -482,7 +551,7 @@ EXC( lb t1, 0(src), .Ll_exc)
482 */ 551 */
483 .set reorder /* DADDI_WAR */ 552 .set reorder /* DADDI_WAR */
484 SUB src, len, 1 553 SUB src, len, 1
485 beqz len, .Ldone 554 beqz len, .Ldone\@
486 .set noreorder 555 .set noreorder
4871: sb zero, 0(dst) 5561: sb zero, 0(dst)
488 ADD dst, dst, 1 557 ADD dst, dst, 1
@@ -503,7 +572,7 @@ EXC( lb t1, 0(src), .Ll_exc)
503 572
504#define SEXC(n) \ 573#define SEXC(n) \
505 .set reorder; /* DADDI_WAR */ \ 574 .set reorder; /* DADDI_WAR */ \
506.Ls_exc_p ## n ## u: \ 575.Ls_exc_p ## n ## u\@: \
507 ADD len, len, n*NBYTES; \ 576 ADD len, len, n*NBYTES; \
508 jr ra; \ 577 jr ra; \
509 .set noreorder 578 .set noreorder
@@ -517,14 +586,15 @@ SEXC(3)
517SEXC(2) 586SEXC(2)
518SEXC(1) 587SEXC(1)
519 588
520.Ls_exc_p1: 589.Ls_exc_p1\@:
521 .set reorder /* DADDI_WAR */ 590 .set reorder /* DADDI_WAR */
522 ADD len, len, 1 591 ADD len, len, 1
523 jr ra 592 jr ra
524 .set noreorder 593 .set noreorder
525.Ls_exc: 594.Ls_exc\@:
526 jr ra 595 jr ra
527 nop 596 nop
597 .endm
528 598
529 .align 5 599 .align 5
530LEAF(memmove) 600LEAF(memmove)
@@ -575,3 +645,71 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
575 jr ra 645 jr ra
576 move a2, zero 646 move a2, zero
577 END(__rmemcpy) 647 END(__rmemcpy)
648
649/*
650 * t6 is used as a flag to note inatomic mode.
651 */
652LEAF(__copy_user_inatomic)
653 b __copy_user_common
654 li t6, 1
655 END(__copy_user_inatomic)
656
657/*
658 * A combined memcpy/__copy_user
659 * __copy_user sets len to 0 for success; else to an upper bound of
660 * the number of uncopied bytes.
661 * memcpy sets v0 to dst.
662 */
663 .align 5
664LEAF(memcpy) /* a0=dst a1=src a2=len */
665 move v0, dst /* return value */
666.L__memcpy:
667FEXPORT(__copy_user)
668 li t6, 0 /* not inatomic */
669__copy_user_common:
670 /* Legacy Mode, user <-> user */
671 __BUILD_COPY_USER LEGACY_MODE USEROP USEROP
672
673#ifdef CONFIG_EVA
674
675/*
676 * For EVA we need distinct symbols for reading and writing to user space.
677 * This is because we need to use specific EVA instructions to perform the
678 * virtual <-> physical translation when a virtual address is actually in user
679 * space
680 */
681
682LEAF(__copy_user_inatomic_eva)
683 b __copy_from_user_common
684 li t6, 1
685 END(__copy_user_inatomic_eva)
686
687/*
688 * __copy_from_user (EVA)
689 */
690
691LEAF(__copy_from_user_eva)
692 li t6, 0 /* not inatomic */
693__copy_from_user_common:
694 __BUILD_COPY_USER EVA_MODE USEROP KERNELOP
695END(__copy_from_user_eva)
696
697
698
699/*
700 * __copy_to_user (EVA)
701 */
702
703LEAF(__copy_to_user_eva)
704__BUILD_COPY_USER EVA_MODE KERNELOP USEROP
705END(__copy_to_user_eva)
706
707/*
708 * __copy_in_user (EVA)
709 */
710
711LEAF(__copy_in_user_eva)
712__BUILD_COPY_USER EVA_MODE USEROP USEROP
713END(__copy_in_user_eva)
714
715#endif
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 0580194e7402..7b0e5462ca51 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -34,13 +34,27 @@
34#define FILLPTRG t0 34#define FILLPTRG t0
35#endif 35#endif
36 36
37#define LEGACY_MODE 1
38#define EVA_MODE 2
39
40/*
41 * No need to protect it with EVA #ifdefery. The generated block of code
42 * will never be assembled if EVA is not enabled.
43 */
44#define __EVAFY(insn, reg, addr) __BUILD_EVA_INSN(insn##e, reg, addr)
45#define ___BUILD_EVA_INSN(insn, reg, addr) __EVAFY(insn, reg, addr)
46
37#define EX(insn,reg,addr,handler) \ 47#define EX(insn,reg,addr,handler) \
389: insn reg, addr; \ 48 .if \mode == LEGACY_MODE; \
499: insn reg, addr; \
50 .else; \
519: ___BUILD_EVA_INSN(insn, reg, addr); \
52 .endif; \
39 .section __ex_table,"a"; \ 53 .section __ex_table,"a"; \
40 PTR 9b, handler; \ 54 PTR 9b, handler; \
41 .previous 55 .previous
42 56
43 .macro f_fill64 dst, offset, val, fixup 57 .macro f_fill64 dst, offset, val, fixup, mode
44 EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) 58 EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
45 EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) 59 EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
46 EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) 60 EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup)
@@ -63,34 +77,24 @@
63#endif 77#endif
64 .endm 78 .endm
65 79
66/*
67 * memset(void *s, int c, size_t n)
68 *
69 * a0: start of area to clear
70 * a1: char to fill with
71 * a2: size of area to clear
72 */
73 .set noreorder 80 .set noreorder
74 .align 5 81 .align 5
75LEAF(memset)
76 beqz a1, 1f
77 move v0, a0 /* result */
78 82
79 andi a1, 0xff /* spread fillword */ 83 /*
80 LONG_SLL t1, a1, 8 84 * Macro to generate the __bzero{,_user} symbol
81 or a1, t1 85 * Arguments:
82 LONG_SLL t1, a1, 16 86 * mode: LEGACY_MODE or EVA_MODE
83#if LONGSIZE == 8 87 */
84 or a1, t1 88 .macro __BUILD_BZERO mode
85 LONG_SLL t1, a1, 32 89 /* Initialize __memset if this is the first time we call this macro */
86#endif 90 .ifnotdef __memset
87 or a1, t1 91 .set __memset, 1
881: 92 .hidden __memset /* Make sure it does not leak */
93 .endif
89 94
90FEXPORT(__bzero)
91 sltiu t0, a2, STORSIZE /* very small region? */ 95 sltiu t0, a2, STORSIZE /* very small region? */
92 bnez t0, .Lsmall_memset 96 bnez t0, .Lsmall_memset\@
93 andi t0, a0, STORMASK /* aligned? */ 97 andi t0, a0, STORMASK /* aligned? */
94 98
95#ifdef CONFIG_CPU_MICROMIPS 99#ifdef CONFIG_CPU_MICROMIPS
96 move t8, a1 /* used by 'swp' instruction */ 100 move t8, a1 /* used by 'swp' instruction */
@@ -98,39 +102,39 @@ FEXPORT(__bzero)
98#endif 102#endif
99#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 103#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
100 beqz t0, 1f 104 beqz t0, 1f
101 PTR_SUBU t0, STORSIZE /* alignment in bytes */ 105 PTR_SUBU t0, STORSIZE /* alignment in bytes */
102#else 106#else
103 .set noat 107 .set noat
104 li AT, STORSIZE 108 li AT, STORSIZE
105 beqz t0, 1f 109 beqz t0, 1f
106 PTR_SUBU t0, AT /* alignment in bytes */ 110 PTR_SUBU t0, AT /* alignment in bytes */
107 .set at 111 .set at
108#endif 112#endif
109 113
110 R10KCBARRIER(0(ra)) 114 R10KCBARRIER(0(ra))
111#ifdef __MIPSEB__ 115#ifdef __MIPSEB__
112 EX(LONG_S_L, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */ 116 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
113#endif 117#endif
114#ifdef __MIPSEL__ 118#ifdef __MIPSEL__
115 EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */ 119 EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
116#endif 120#endif
117 PTR_SUBU a0, t0 /* long align ptr */ 121 PTR_SUBU a0, t0 /* long align ptr */
118 PTR_ADDU a2, t0 /* correct size */ 122 PTR_ADDU a2, t0 /* correct size */
119 123
1201: ori t1, a2, 0x3f /* # of full blocks */ 1241: ori t1, a2, 0x3f /* # of full blocks */
121 xori t1, 0x3f 125 xori t1, 0x3f
122 beqz t1, .Lmemset_partial /* no block to fill */ 126 beqz t1, .Lmemset_partial\@ /* no block to fill */
123 andi t0, a2, 0x40-STORSIZE 127 andi t0, a2, 0x40-STORSIZE
124 128
125 PTR_ADDU t1, a0 /* end address */ 129 PTR_ADDU t1, a0 /* end address */
126 .set reorder 130 .set reorder
1271: PTR_ADDIU a0, 64 1311: PTR_ADDIU a0, 64
128 R10KCBARRIER(0(ra)) 132 R10KCBARRIER(0(ra))
129 f_fill64 a0, -64, FILL64RG, .Lfwd_fixup 133 f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode
130 bne t1, a0, 1b 134 bne t1, a0, 1b
131 .set noreorder 135 .set noreorder
132 136
133.Lmemset_partial: 137.Lmemset_partial\@:
134 R10KCBARRIER(0(ra)) 138 R10KCBARRIER(0(ra))
135 PTR_LA t1, 2f /* where to start */ 139 PTR_LA t1, 2f /* where to start */
136#ifdef CONFIG_CPU_MICROMIPS 140#ifdef CONFIG_CPU_MICROMIPS
@@ -145,60 +149,100 @@ FEXPORT(__bzero)
145 .set at 149 .set at
146#endif 150#endif
147 jr t1 151 jr t1
148 PTR_ADDU a0, t0 /* dest ptr */ 152 PTR_ADDU a0, t0 /* dest ptr */
149 153
150 .set push 154 .set push
151 .set noreorder 155 .set noreorder
152 .set nomacro 156 .set nomacro
153 f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */ 157 /* ... but first do longs ... */
158 f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode
1542: .set pop 1592: .set pop
155 andi a2, STORMASK /* At most one long to go */ 160 andi a2, STORMASK /* At most one long to go */
156 161
157 beqz a2, 1f 162 beqz a2, 1f
158 PTR_ADDU a0, a2 /* What's left */ 163 PTR_ADDU a0, a2 /* What's left */
159 R10KCBARRIER(0(ra)) 164 R10KCBARRIER(0(ra))
160#ifdef __MIPSEB__ 165#ifdef __MIPSEB__
161 EX(LONG_S_R, a1, -1(a0), .Llast_fixup) 166 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
162#endif 167#endif
163#ifdef __MIPSEL__ 168#ifdef __MIPSEL__
164 EX(LONG_S_L, a1, -1(a0), .Llast_fixup) 169 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
165#endif 170#endif
1661: jr ra 1711: jr ra
167 move a2, zero 172 move a2, zero
168 173
169.Lsmall_memset: 174.Lsmall_memset\@:
170 beqz a2, 2f 175 beqz a2, 2f
171 PTR_ADDU t1, a0, a2 176 PTR_ADDU t1, a0, a2
172 177
1731: PTR_ADDIU a0, 1 /* fill bytewise */ 1781: PTR_ADDIU a0, 1 /* fill bytewise */
174 R10KCBARRIER(0(ra)) 179 R10KCBARRIER(0(ra))
175 bne t1, a0, 1b 180 bne t1, a0, 1b
176 sb a1, -1(a0) 181 sb a1, -1(a0)
177 182
1782: jr ra /* done */ 1832: jr ra /* done */
179 move a2, zero 184 move a2, zero
185 .if __memset == 1
180 END(memset) 186 END(memset)
187 .set __memset, 0
188 .hidden __memset
189 .endif
181 190
182.Lfirst_fixup: 191.Lfirst_fixup\@:
183 jr ra 192 jr ra
184 nop 193 nop
185 194
186.Lfwd_fixup: 195.Lfwd_fixup\@:
187 PTR_L t0, TI_TASK($28) 196 PTR_L t0, TI_TASK($28)
188 andi a2, 0x3f 197 andi a2, 0x3f
189 LONG_L t0, THREAD_BUADDR(t0) 198 LONG_L t0, THREAD_BUADDR(t0)
190 LONG_ADDU a2, t1 199 LONG_ADDU a2, t1
191 jr ra 200 jr ra
192 LONG_SUBU a2, t0 201 LONG_SUBU a2, t0
193 202
194.Lpartial_fixup: 203.Lpartial_fixup\@:
195 PTR_L t0, TI_TASK($28) 204 PTR_L t0, TI_TASK($28)
196 andi a2, STORMASK 205 andi a2, STORMASK
197 LONG_L t0, THREAD_BUADDR(t0) 206 LONG_L t0, THREAD_BUADDR(t0)
198 LONG_ADDU a2, t1 207 LONG_ADDU a2, t1
199 jr ra 208 jr ra
200 LONG_SUBU a2, t0 209 LONG_SUBU a2, t0
201 210
202.Llast_fixup: 211.Llast_fixup\@:
203 jr ra 212 jr ra
204 andi v1, a2, STORMASK 213 andi v1, a2, STORMASK
214
215 .endm
216
217/*
218 * memset(void *s, int c, size_t n)
219 *
220 * a0: start of area to clear
221 * a1: char to fill with
222 * a2: size of area to clear
223 */
224
225LEAF(memset)
226 beqz a1, 1f
227 move v0, a0 /* result */
228
229 andi a1, 0xff /* spread fillword */
230 LONG_SLL t1, a1, 8
231 or a1, t1
232 LONG_SLL t1, a1, 16
233#if LONGSIZE == 8
234 or a1, t1
235 LONG_SLL t1, a1, 32
236#endif
237 or a1, t1
2381:
239#ifndef CONFIG_EVA
240FEXPORT(__bzero)
241#endif
242 __BUILD_BZERO LEGACY_MODE
243
244#ifdef CONFIG_EVA
245LEAF(__bzero)
246 __BUILD_BZERO EVA_MODE
247END(__bzero)
248#endif
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index e362dcdc69d1..bef65c98df59 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -22,19 +22,43 @@
22 * 22 *
23 * Return 0 for error 23 * Return 0 for error
24 */ 24 */
25LEAF(__strlen_user_asm) 25 .macro __BUILD_STRLEN_ASM func
26LEAF(__strlen_\func\()_asm)
26 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 27 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
27 and v0, a0 28 and v0, a0
28 bnez v0, .Lfault 29 bnez v0, .Lfault\@
29 30
30FEXPORT(__strlen_user_nocheck_asm) 31FEXPORT(__strlen_\func\()_nocheck_asm)
31 move v0, a0 32 move v0, a0
321: EX(lbu, v1, (v0), .Lfault) 33.ifeqs "\func", "kernel"
341: EX(lbu, v1, (v0), .Lfault\@)
35.else
361: EX(lbue, v1, (v0), .Lfault\@)
37.endif
33 PTR_ADDIU v0, 1 38 PTR_ADDIU v0, 1
34 bnez v1, 1b 39 bnez v1, 1b
35 PTR_SUBU v0, a0 40 PTR_SUBU v0, a0
36 jr ra 41 jr ra
37 END(__strlen_user_asm) 42 END(__strlen_\func\()_asm)
38 43
39.Lfault: move v0, zero 44.Lfault\@: move v0, zero
40 jr ra 45 jr ra
46 .endm
47
48#ifndef CONFIG_EVA
49 /* Set aliases */
50 .global __strlen_user_asm
51 .global __strlen_user_nocheck_asm
52 .set __strlen_user_asm, __strlen_kernel_asm
53 .set __strlen_user_nocheck_asm, __strlen_kernel_nocheck_asm
54#endif
55
56__BUILD_STRLEN_ASM kernel
57
58#ifdef CONFIG_EVA
59
60 .set push
61 .set eva
62__BUILD_STRLEN_ASM user
63 .set pop
64#endif
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 92870b6b53ea..d3301cd1e9a5 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -28,16 +28,21 @@
28 * it happens at most some bytes of the exceptions handlers will be copied. 28 * it happens at most some bytes of the exceptions handlers will be copied.
29 */ 29 */
30 30
31LEAF(__strncpy_from_user_asm) 31 .macro __BUILD_STRNCPY_ASM func
32LEAF(__strncpy_from_\func\()_asm)
32 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 33 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
33 and v0, a1 34 and v0, a1
34 bnez v0, .Lfault 35 bnez v0, .Lfault\@
35 36
36FEXPORT(__strncpy_from_user_nocheck_asm) 37FEXPORT(__strncpy_from_\func\()_nocheck_asm)
37 .set noreorder 38 .set noreorder
38 move t0, zero 39 move t0, zero
39 move v1, a1 40 move v1, a1
401: EX(lbu, v0, (v1), .Lfault) 41.ifeqs "\func","kernel"
421: EX(lbu, v0, (v1), .Lfault\@)
43.else
441: EX(lbue, v0, (v1), .Lfault\@)
45.endif
41 PTR_ADDIU v1, 1 46 PTR_ADDIU v1, 1
42 R10KCBARRIER(0(ra)) 47 R10KCBARRIER(0(ra))
43 beqz v0, 2f 48 beqz v0, 2f
@@ -47,15 +52,34 @@ FEXPORT(__strncpy_from_user_nocheck_asm)
47 PTR_ADDIU a0, 1 52 PTR_ADDIU a0, 1
482: PTR_ADDU v0, a1, t0 532: PTR_ADDU v0, a1, t0
49 xor v0, a1 54 xor v0, a1
50 bltz v0, .Lfault 55 bltz v0, .Lfault\@
51 nop 56 nop
52 jr ra # return n 57 jr ra # return n
53 move v0, t0 58 move v0, t0
54 END(__strncpy_from_user_asm) 59 END(__strncpy_from_\func\()_asm)
55 60
56.Lfault: jr ra 61.Lfault\@: jr ra
57 li v0, -EFAULT 62 li v0, -EFAULT
58 63
59 .section __ex_table,"a" 64 .section __ex_table,"a"
60 PTR 1b, .Lfault 65 PTR 1b, .Lfault\@
61 .previous 66 .previous
67
68 .endm
69
70#ifndef CONFIG_EVA
71 /* Set aliases */
72 .global __strncpy_from_user_asm
73 .global __strncpy_from_user_nocheck_asm
74 .set __strncpy_from_user_asm, __strncpy_from_kernel_asm
75 .set __strncpy_from_user_nocheck_asm, __strncpy_from_kernel_nocheck_asm
76#endif
77
78__BUILD_STRNCPY_ASM kernel
79
80#ifdef CONFIG_EVA
81 .set push
82 .set eva
83__BUILD_STRNCPY_ASM user
84 .set pop
85#endif
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index fcacea5e61f1..f3af6995e2a6 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -25,22 +25,46 @@
25 * bytes. There's nothing secret there. On 64-bit accessing beyond 25 * bytes. There's nothing secret there. On 64-bit accessing beyond
26 * the maximum is a tad hairier ... 26 * the maximum is a tad hairier ...
27 */ 27 */
28LEAF(__strnlen_user_asm) 28 .macro __BUILD_STRNLEN_ASM func
29LEAF(__strnlen_\func\()_asm)
29 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? 30 LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
30 and v0, a0 31 and v0, a0
31 bnez v0, .Lfault 32 bnez v0, .Lfault\@
32 33
33FEXPORT(__strnlen_user_nocheck_asm) 34FEXPORT(__strnlen_\func\()_nocheck_asm)
34 move v0, a0 35 move v0, a0
35 PTR_ADDU a1, a0 # stop pointer 36 PTR_ADDU a1, a0 # stop pointer
361: beq v0, a1, 1f # limit reached? 371: beq v0, a1, 1f # limit reached?
37 EX(lb, t0, (v0), .Lfault) 38.ifeqs "\func", "kernel"
39 EX(lb, t0, (v0), .Lfault\@)
40.else
41 EX(lbe, t0, (v0), .Lfault\@)
42.endif
38 PTR_ADDIU v0, 1 43 PTR_ADDIU v0, 1
39 bnez t0, 1b 44 bnez t0, 1b
401: PTR_SUBU v0, a0 451: PTR_SUBU v0, a0
41 jr ra 46 jr ra
42 END(__strnlen_user_asm) 47 END(__strnlen_\func\()_asm)
43 48
44.Lfault: 49.Lfault\@:
45 move v0, zero 50 move v0, zero
46 jr ra 51 jr ra
52 .endm
53
54#ifndef CONFIG_EVA
55 /* Set aliases */
56 .global __strnlen_user_asm
57 .global __strnlen_user_nocheck_asm
58 .set __strnlen_user_asm, __strnlen_kernel_asm
59 .set __strnlen_user_nocheck_asm, __strnlen_kernel_nocheck_asm
60#endif
61
62__BUILD_STRNLEN_ASM kernel
63
64#ifdef CONFIG_EVA
65
66 .set push
67 .set eva
68__BUILD_STRNLEN_ASM user
69 .set pop
70#endif
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 263beb9322a8..7397be226a06 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -59,6 +59,36 @@ config LEMOTE_MACH2F
59 59
60 These family machines include fuloong2f mini PC, yeeloong2f notebook, 60 These family machines include fuloong2f mini PC, yeeloong2f notebook,
61 LingLoong allinone PC and so forth. 61 LingLoong allinone PC and so forth.
62
63config LEMOTE_MACH3A
64 bool "Lemote Loongson 3A family machines"
65 select ARCH_SPARSEMEM_ENABLE
66 select GENERIC_ISA_DMA_SUPPORT_BROKEN
67 select GENERIC_HARDIRQS_NO__DO_IRQ
68 select BOOT_ELF32
69 select BOARD_SCACHE
70 select CSRC_R4K
71 select CEVT_R4K
72 select CPU_HAS_WB
73 select HW_HAS_PCI
74 select ISA
75 select HT_PCI
76 select I8259
77 select IRQ_CPU
78 select NR_CPUS_DEFAULT_4
79 select SYS_HAS_CPU_LOONGSON3
80 select SYS_HAS_EARLY_PRINTK
81 select SYS_SUPPORTS_SMP
82 select SYS_SUPPORTS_HOTPLUG_CPU
83 select SYS_SUPPORTS_64BIT_KERNEL
84 select SYS_SUPPORTS_HIGHMEM
85 select SYS_SUPPORTS_LITTLE_ENDIAN
86 select LOONGSON_MC146818
87 select ZONE_DMA32
88 select LEFI_FIRMWARE_INTERFACE
89 help
90 Lemote Loongson 3A family machines utilize the 3A revision of
91 Loongson processor and RS780/SBX00 chipset.
62endchoice 92endchoice
63 93
64config CS5536 94config CS5536
@@ -86,8 +116,25 @@ config LOONGSON_UART_BASE
86 default y 116 default y
87 depends on EARLY_PRINTK || SERIAL_8250 117 depends on EARLY_PRINTK || SERIAL_8250
88 118
119config IOMMU_HELPER
120 bool
121
122config NEED_SG_DMA_LENGTH
123 bool
124
125config SWIOTLB
126 bool "Soft IOMMU Support for All-Memory DMA"
127 default y
128 depends on CPU_LOONGSON3
129 select IOMMU_HELPER
130 select NEED_SG_DMA_LENGTH
131 select NEED_DMA_MAP_STATE
132
89config LOONGSON_MC146818 133config LOONGSON_MC146818
90 bool 134 bool
91 default n 135 default n
92 136
137config LEFI_FIRMWARE_INTERFACE
138 bool
139
93endif # MACH_LOONGSON 140endif # MACH_LOONGSON
diff --git a/arch/mips/loongson/Makefile b/arch/mips/loongson/Makefile
index 0dc0055754cd..7429994e7604 100644
--- a/arch/mips/loongson/Makefile
+++ b/arch/mips/loongson/Makefile
@@ -15,3 +15,9 @@ obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
15# 15#
16 16
17obj-$(CONFIG_LEMOTE_MACH2F) += lemote-2f/ 17obj-$(CONFIG_LEMOTE_MACH2F) += lemote-2f/
18
19#
20# All Loongson-3 family machines
21#
22
23obj-$(CONFIG_CPU_LOONGSON3) += loongson-3/
diff --git a/arch/mips/loongson/Platform b/arch/mips/loongson/Platform
index 29692e5433b1..6205372b6c2d 100644
--- a/arch/mips/loongson/Platform
+++ b/arch/mips/loongson/Platform
@@ -30,3 +30,4 @@ platform-$(CONFIG_MACH_LOONGSON) += loongson/
30cflags-$(CONFIG_MACH_LOONGSON) += -I$(srctree)/arch/mips/include/asm/mach-loongson -mno-branch-likely 30cflags-$(CONFIG_MACH_LOONGSON) += -I$(srctree)/arch/mips/include/asm/mach-loongson -mno-branch-likely
31load-$(CONFIG_LEMOTE_FULOONG2E) += 0xffffffff80100000 31load-$(CONFIG_LEMOTE_FULOONG2E) += 0xffffffff80100000
32load-$(CONFIG_LEMOTE_MACH2F) += 0xffffffff80200000 32load-$(CONFIG_LEMOTE_MACH2F) += 0xffffffff80200000
33load-$(CONFIG_CPU_LOONGSON3) += 0xffffffff80200000
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 9e4484ccbb03..0bb9cc9dc621 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -26,3 +26,8 @@ obj-$(CONFIG_CS5536) += cs5536/
26# 26#
27 27
28obj-$(CONFIG_LOONGSON_SUSPEND) += pm.o 28obj-$(CONFIG_LOONGSON_SUSPEND) += pm.o
29
30#
31# Big Memory (SWIOTLB) Support
32#
33obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
diff --git a/arch/mips/loongson/common/dma-swiotlb.c b/arch/mips/loongson/common/dma-swiotlb.c
new file mode 100644
index 000000000000..c2be01f91575
--- /dev/null
+++ b/arch/mips/loongson/common/dma-swiotlb.c
@@ -0,0 +1,136 @@
1#include <linux/mm.h>
2#include <linux/init.h>
3#include <linux/dma-mapping.h>
4#include <linux/scatterlist.h>
5#include <linux/swiotlb.h>
6#include <linux/bootmem.h>
7
8#include <asm/bootinfo.h>
9#include <boot_param.h>
10#include <dma-coherence.h>
11
12static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
13 dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
14{
15 void *ret;
16
17 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
18 return ret;
19
20 /* ignore region specifiers */
21 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
22
23#ifdef CONFIG_ISA
24 if (dev == NULL)
25 gfp |= __GFP_DMA;
26 else
27#endif
28#ifdef CONFIG_ZONE_DMA
29 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
30 gfp |= __GFP_DMA;
31 else
32#endif
33#ifdef CONFIG_ZONE_DMA32
34 if (dev->coherent_dma_mask < DMA_BIT_MASK(40))
35 gfp |= __GFP_DMA32;
36 else
37#endif
38 ;
39 gfp |= __GFP_NORETRY;
40
41 ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
42 mb();
43 return ret;
44}
45
46static void loongson_dma_free_coherent(struct device *dev, size_t size,
47 void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
48{
49 int order = get_order(size);
50
51 if (dma_release_from_coherent(dev, order, vaddr))
52 return;
53
54 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
55}
56
57static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
58 unsigned long offset, size_t size,
59 enum dma_data_direction dir,
60 struct dma_attrs *attrs)
61{
62 dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
63 dir, attrs);
64 mb();
65 return daddr;
66}
67
68static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
69 int nents, enum dma_data_direction dir,
70 struct dma_attrs *attrs)
71{
72 int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
73 mb();
74
75 return r;
76}
77
78static void loongson_dma_sync_single_for_device(struct device *dev,
79 dma_addr_t dma_handle, size_t size,
80 enum dma_data_direction dir)
81{
82 swiotlb_sync_single_for_device(dev, dma_handle, size, dir);
83 mb();
84}
85
86static void loongson_dma_sync_sg_for_device(struct device *dev,
87 struct scatterlist *sg, int nents,
88 enum dma_data_direction dir)
89{
90 swiotlb_sync_sg_for_device(dev, sg, nents, dir);
91 mb();
92}
93
94static int loongson_dma_set_mask(struct device *dev, u64 mask)
95{
96 if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
97 *dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
98 return -EIO;
99 }
100
101 *dev->dma_mask = mask;
102
103 return 0;
104}
105
106dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
107{
108 return paddr;
109}
110
111phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
112{
113 return daddr;
114}
115
116static struct dma_map_ops loongson_dma_map_ops = {
117 .alloc = loongson_dma_alloc_coherent,
118 .free = loongson_dma_free_coherent,
119 .map_page = loongson_dma_map_page,
120 .unmap_page = swiotlb_unmap_page,
121 .map_sg = loongson_dma_map_sg,
122 .unmap_sg = swiotlb_unmap_sg_attrs,
123 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
124 .sync_single_for_device = loongson_dma_sync_single_for_device,
125 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
126 .sync_sg_for_device = loongson_dma_sync_sg_for_device,
127 .mapping_error = swiotlb_dma_mapping_error,
128 .dma_supported = swiotlb_dma_supported,
129 .set_dma_mask = loongson_dma_set_mask
130};
131
132void __init plat_swiotlb_setup(void)
133{
134 swiotlb_init(1);
135 mips_dma_map_ops = &loongson_dma_map_ops;
136}
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index 0a18fcf2d372..0c543eae49bf 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -18,29 +18,30 @@
18 * option) any later version. 18 * option) any later version.
19 */ 19 */
20#include <linux/module.h> 20#include <linux/module.h>
21
22#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
23
24#include <loongson.h> 22#include <loongson.h>
23#include <boot_param.h>
25 24
26unsigned long cpu_clock_freq; 25u32 cpu_clock_freq;
27EXPORT_SYMBOL(cpu_clock_freq); 26EXPORT_SYMBOL(cpu_clock_freq);
28unsigned long memsize, highmemsize; 27struct efi_memory_map_loongson *loongson_memmap;
28struct loongson_system_configuration loongson_sysconf;
29 29
30#define parse_even_earlier(res, option, p) \ 30#define parse_even_earlier(res, option, p) \
31do { \ 31do { \
32 unsigned int tmp __maybe_unused; \ 32 unsigned int tmp __maybe_unused; \
33 \ 33 \
34 if (strncmp(option, (char *)p, strlen(option)) == 0) \ 34 if (strncmp(option, (char *)p, strlen(option)) == 0) \
35 tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ 35 tmp = kstrtou32((char *)p + strlen(option"="), 10, &res); \
36} while (0) 36} while (0)
37 37
38void __init prom_init_env(void) 38void __init prom_init_env(void)
39{ 39{
40 /* pmon passes arguments in 32bit pointers */ 40 /* pmon passes arguments in 32bit pointers */
41 int *_prom_envp;
42 unsigned long bus_clock;
43 unsigned int processor_id; 41 unsigned int processor_id;
42
43#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
44 int *_prom_envp;
44 long l; 45 long l;
45 46
46 /* firmware arguments are initialized in head.S */ 47 /* firmware arguments are initialized in head.S */
@@ -48,7 +49,6 @@ void __init prom_init_env(void)
48 49
49 l = (long)*_prom_envp; 50 l = (long)*_prom_envp;
50 while (l != 0) { 51 while (l != 0) {
51 parse_even_earlier(bus_clock, "busclock", l);
52 parse_even_earlier(cpu_clock_freq, "cpuclock", l); 52 parse_even_earlier(cpu_clock_freq, "cpuclock", l);
53 parse_even_earlier(memsize, "memsize", l); 53 parse_even_earlier(memsize, "memsize", l);
54 parse_even_earlier(highmemsize, "highmemsize", l); 54 parse_even_earlier(highmemsize, "highmemsize", l);
@@ -57,8 +57,48 @@ void __init prom_init_env(void)
57 } 57 }
58 if (memsize == 0) 58 if (memsize == 0)
59 memsize = 256; 59 memsize = 256;
60 if (bus_clock == 0) 60 pr_info("memsize=%u, highmemsize=%u\n", memsize, highmemsize);
61 bus_clock = 66000000; 61#else
62 struct boot_params *boot_p;
63 struct loongson_params *loongson_p;
64 struct efi_cpuinfo_loongson *ecpu;
65 struct irq_source_routing_table *eirq_source;
66
67 /* firmware arguments are initialized in head.S */
68 boot_p = (struct boot_params *)fw_arg2;
69 loongson_p = &(boot_p->efi.smbios.lp);
70
71 ecpu = (struct efi_cpuinfo_loongson *)
72 ((u64)loongson_p + loongson_p->cpu_offset);
73 eirq_source = (struct irq_source_routing_table *)
74 ((u64)loongson_p + loongson_p->irq_offset);
75 loongson_memmap = (struct efi_memory_map_loongson *)
76 ((u64)loongson_p + loongson_p->memory_offset);
77
78 cpu_clock_freq = ecpu->cpu_clock_freq;
79 loongson_sysconf.cputype = ecpu->cputype;
80 loongson_sysconf.nr_cpus = ecpu->nr_cpus;
81 if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0)
82 loongson_sysconf.nr_cpus = NR_CPUS;
83
84 loongson_sysconf.pci_mem_start_addr = eirq_source->pci_mem_start_addr;
85 loongson_sysconf.pci_mem_end_addr = eirq_source->pci_mem_end_addr;
86 loongson_sysconf.pci_io_base = eirq_source->pci_io_start_addr;
87 loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits;
88 if (loongson_sysconf.dma_mask_bits < 32 ||
89 loongson_sysconf.dma_mask_bits > 64)
90 loongson_sysconf.dma_mask_bits = 32;
91
92 loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm;
93 loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown;
94 loongson_sysconf.suspend_addr = boot_p->reset_system.DoSuspend;
95
96 loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
97 loongson_sysconf.vgabios_addr = boot_p->efi.smbios.vga_bios;
98 pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n",
99 loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr,
100 loongson_sysconf.vgabios_addr);
101#endif
62 if (cpu_clock_freq == 0) { 102 if (cpu_clock_freq == 0) {
63 processor_id = (&current_cpu_data)->processor_id; 103 processor_id = (&current_cpu_data)->processor_id;
64 switch (processor_id & PRID_REV_MASK) { 104 switch (processor_id & PRID_REV_MASK) {
@@ -68,12 +108,13 @@ void __init prom_init_env(void)
68 case PRID_REV_LOONGSON2F: 108 case PRID_REV_LOONGSON2F:
69 cpu_clock_freq = 797000000; 109 cpu_clock_freq = 797000000;
70 break; 110 break;
111 case PRID_REV_LOONGSON3A:
112 cpu_clock_freq = 900000000;
113 break;
71 default: 114 default:
72 cpu_clock_freq = 100000000; 115 cpu_clock_freq = 100000000;
73 break; 116 break;
74 } 117 }
75 } 118 }
76 119 pr_info("CpuClock = %u\n", cpu_clock_freq);
77 pr_info("busclock=%ld, cpuclock=%ld, memsize=%ld, highmemsize=%ld\n",
78 bus_clock, cpu_clock_freq, memsize, highmemsize);
79} 120}
diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c
index ae7af1fd5d59..f37fe5413b73 100644
--- a/arch/mips/loongson/common/init.c
+++ b/arch/mips/loongson/common/init.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12#include <asm/smp-ops.h>
12 13
13#include <loongson.h> 14#include <loongson.h>
14 15
@@ -17,10 +18,6 @@ unsigned long __maybe_unused _loongson_addrwincfg_base;
17 18
18void __init prom_init(void) 19void __init prom_init(void)
19{ 20{
20 /* init base address of io space */
21 set_io_port_base((unsigned long)
22 ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
23
24#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG 21#ifdef CONFIG_CPU_SUPPORTS_ADDRWINCFG
25 _loongson_addrwincfg_base = (unsigned long) 22 _loongson_addrwincfg_base = (unsigned long)
26 ioremap(LOONGSON_ADDRWINCFG_BASE, LOONGSON_ADDRWINCFG_SIZE); 23 ioremap(LOONGSON_ADDRWINCFG_BASE, LOONGSON_ADDRWINCFG_SIZE);
@@ -28,10 +25,16 @@ void __init prom_init(void)
28 25
29 prom_init_cmdline(); 26 prom_init_cmdline();
30 prom_init_env(); 27 prom_init_env();
28
29 /* init base address of io space */
30 set_io_port_base((unsigned long)
31 ioremap(LOONGSON_PCIIO_BASE, LOONGSON_PCIIO_SIZE));
32
31 prom_init_memory(); 33 prom_init_memory();
32 34
33 /*init the uart base address */ 35 /*init the uart base address */
34 prom_init_uart_base(); 36 prom_init_uart_base();
37 register_smp_ops(&loongson3_smp_ops);
35} 38}
36 39
37void __init prom_free_prom_memory(void) 40void __init prom_free_prom_memory(void)
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 4becd4f9ef2e..1a4797984b8d 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -27,6 +27,10 @@ static const char *system_types[] = {
27 [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f", 27 [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
28 [MACH_LEMOTE_NAS] "lemote-nas-2f", 28 [MACH_LEMOTE_NAS] "lemote-nas-2f",
29 [MACH_LEMOTE_LL2F] "lemote-lynloong-2f", 29 [MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
30 [MACH_LEMOTE_A1004] "lemote-3a-notebook-a1004",
31 [MACH_LEMOTE_A1101] "lemote-3a-itx-a1101",
32 [MACH_LEMOTE_A1201] "lemote-2gq-notebook-a1201",
33 [MACH_LEMOTE_A1205] "lemote-2gq-aio-a1205",
30 [MACH_LOONGSON_END] NULL, 34 [MACH_LOONGSON_END] NULL,
31}; 35};
32 36
diff --git a/arch/mips/loongson/common/mem.c b/arch/mips/loongson/common/mem.c
index 8626a42f5b94..b01d52473da8 100644
--- a/arch/mips/loongson/common/mem.c
+++ b/arch/mips/loongson/common/mem.c
@@ -11,9 +11,14 @@
11#include <asm/bootinfo.h> 11#include <asm/bootinfo.h>
12 12
13#include <loongson.h> 13#include <loongson.h>
14#include <boot_param.h>
14#include <mem.h> 15#include <mem.h>
15#include <pci.h> 16#include <pci.h>
16 17
18#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
19
20u32 memsize, highmemsize;
21
17void __init prom_init_memory(void) 22void __init prom_init_memory(void)
18{ 23{
19 add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM); 24 add_memory_region(0x0, (memsize << 20), BOOT_MEM_RAM);
@@ -49,6 +54,43 @@ void __init prom_init_memory(void)
49#endif /* !CONFIG_64BIT */ 54#endif /* !CONFIG_64BIT */
50} 55}
51 56
57#else /* CONFIG_LEFI_FIRMWARE_INTERFACE */
58
59void __init prom_init_memory(void)
60{
61 int i;
62 u32 node_id;
63 u32 mem_type;
64
65 /* parse memory information */
66 for (i = 0; i < loongson_memmap->nr_map; i++) {
67 node_id = loongson_memmap->map[i].node_id;
68 mem_type = loongson_memmap->map[i].mem_type;
69
70 if (node_id == 0) {
71 switch (mem_type) {
72 case SYSTEM_RAM_LOW:
73 add_memory_region(loongson_memmap->map[i].mem_start,
74 (u64)loongson_memmap->map[i].mem_size << 20,
75 BOOT_MEM_RAM);
76 break;
77 case SYSTEM_RAM_HIGH:
78 add_memory_region(loongson_memmap->map[i].mem_start,
79 (u64)loongson_memmap->map[i].mem_size << 20,
80 BOOT_MEM_RAM);
81 break;
82 case MEM_RESERVED:
83 add_memory_region(loongson_memmap->map[i].mem_start,
84 (u64)loongson_memmap->map[i].mem_size << 20,
85 BOOT_MEM_RESERVED);
86 break;
87 }
88 }
89 }
90}
91
92#endif /* CONFIG_LEFI_FIRMWARE_INTERFACE */
93
52/* override of arch/mips/mm/cache.c: __uncached_access */ 94/* override of arch/mips/mm/cache.c: __uncached_access */
53int __uncached_access(struct file *file, unsigned long addr) 95int __uncached_access(struct file *file, unsigned long addr)
54{ 96{
diff --git a/arch/mips/loongson/common/pci.c b/arch/mips/loongson/common/pci.c
index fa7784459721..003ab4e618b3 100644
--- a/arch/mips/loongson/common/pci.c
+++ b/arch/mips/loongson/common/pci.c
@@ -11,6 +11,7 @@
11 11
12#include <pci.h> 12#include <pci.h>
13#include <loongson.h> 13#include <loongson.h>
14#include <boot_param.h>
14 15
15static struct resource loongson_pci_mem_resource = { 16static struct resource loongson_pci_mem_resource = {
16 .name = "pci memory space", 17 .name = "pci memory space",
@@ -82,7 +83,10 @@ static int __init pcibios_init(void)
82 setup_pcimap(); 83 setup_pcimap();
83 84
84 loongson_pci_controller.io_map_base = mips_io_port_base; 85 loongson_pci_controller.io_map_base = mips_io_port_base;
85 86#ifdef CONFIG_LEFI_FIRMWARE_INTERFACE
87 loongson_pci_mem_resource.start = loongson_sysconf.pci_mem_start_addr;
88 loongson_pci_mem_resource.end = loongson_sysconf.pci_mem_end_addr;
89#endif
86 register_pci_controller(&loongson_pci_controller); 90 register_pci_controller(&loongson_pci_controller);
87 91
88 return 0; 92 return 0;
diff --git a/arch/mips/loongson/common/reset.c b/arch/mips/loongson/common/reset.c
index 65bfbb5d06f4..a60715e11306 100644
--- a/arch/mips/loongson/common/reset.c
+++ b/arch/mips/loongson/common/reset.c
@@ -16,6 +16,7 @@
16#include <asm/reboot.h> 16#include <asm/reboot.h>
17 17
18#include <loongson.h> 18#include <loongson.h>
19#include <boot_param.h>
19 20
20static inline void loongson_reboot(void) 21static inline void loongson_reboot(void)
21{ 22{
@@ -37,17 +38,37 @@ static inline void loongson_reboot(void)
37 38
38static void loongson_restart(char *command) 39static void loongson_restart(char *command)
39{ 40{
41#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
40 /* do preparation for reboot */ 42 /* do preparation for reboot */
41 mach_prepare_reboot(); 43 mach_prepare_reboot();
42 44
43 /* reboot via jumping to boot base address */ 45 /* reboot via jumping to boot base address */
44 loongson_reboot(); 46 loongson_reboot();
47#else
48 void (*fw_restart)(void) = (void *)loongson_sysconf.restart_addr;
49
50 fw_restart();
51 while (1) {
52 if (cpu_wait)
53 cpu_wait();
54 }
55#endif
45} 56}
46 57
47static void loongson_poweroff(void) 58static void loongson_poweroff(void)
48{ 59{
60#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
49 mach_prepare_shutdown(); 61 mach_prepare_shutdown();
50 unreachable(); 62 unreachable();
63#else
64 void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
65
66 fw_poweroff();
67 while (1) {
68 if (cpu_wait)
69 cpu_wait();
70 }
71#endif
51} 72}
52 73
53static void loongson_halt(void) 74static void loongson_halt(void)
diff --git a/arch/mips/loongson/common/serial.c b/arch/mips/loongson/common/serial.c
index 5f2b78ae97cc..bd2b7095b6dc 100644
--- a/arch/mips/loongson/common/serial.c
+++ b/arch/mips/loongson/common/serial.c
@@ -19,19 +19,19 @@
19#include <loongson.h> 19#include <loongson.h>
20#include <machine.h> 20#include <machine.h>
21 21
22#define PORT(int) \ 22#define PORT(int, clk) \
23{ \ 23{ \
24 .irq = int, \ 24 .irq = int, \
25 .uartclk = 1843200, \ 25 .uartclk = clk, \
26 .iotype = UPIO_PORT, \ 26 .iotype = UPIO_PORT, \
27 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \ 27 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
28 .regshift = 0, \ 28 .regshift = 0, \
29} 29}
30 30
31#define PORT_M(int) \ 31#define PORT_M(int, clk) \
32{ \ 32{ \
33 .irq = MIPS_CPU_IRQ_BASE + (int), \ 33 .irq = MIPS_CPU_IRQ_BASE + (int), \
34 .uartclk = 3686400, \ 34 .uartclk = clk, \
35 .iotype = UPIO_MEM, \ 35 .iotype = UPIO_MEM, \
36 .membase = (void __iomem *)NULL, \ 36 .membase = (void __iomem *)NULL, \
37 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \ 37 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST, \
@@ -40,13 +40,17 @@
40 40
41static struct plat_serial8250_port uart8250_data[][2] = { 41static struct plat_serial8250_port uart8250_data[][2] = {
42 [MACH_LOONGSON_UNKNOWN] {}, 42 [MACH_LOONGSON_UNKNOWN] {},
43 [MACH_LEMOTE_FL2E] {PORT(4), {} }, 43 [MACH_LEMOTE_FL2E] {PORT(4, 1843200), {} },
44 [MACH_LEMOTE_FL2F] {PORT(3), {} }, 44 [MACH_LEMOTE_FL2F] {PORT(3, 1843200), {} },
45 [MACH_LEMOTE_ML2F7] {PORT_M(3), {} }, 45 [MACH_LEMOTE_ML2F7] {PORT_M(3, 3686400), {} },
46 [MACH_LEMOTE_YL2F89] {PORT_M(3), {} }, 46 [MACH_LEMOTE_YL2F89] {PORT_M(3, 3686400), {} },
47 [MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} }, 47 [MACH_DEXXON_GDIUM2F10] {PORT_M(3, 3686400), {} },
48 [MACH_LEMOTE_NAS] {PORT_M(3), {} }, 48 [MACH_LEMOTE_NAS] {PORT_M(3, 3686400), {} },
49 [MACH_LEMOTE_LL2F] {PORT(3), {} }, 49 [MACH_LEMOTE_LL2F] {PORT(3, 1843200), {} },
50 [MACH_LEMOTE_A1004] {PORT_M(2, 33177600), {} },
51 [MACH_LEMOTE_A1101] {PORT_M(2, 25000000), {} },
52 [MACH_LEMOTE_A1201] {PORT_M(2, 25000000), {} },
53 [MACH_LEMOTE_A1205] {PORT_M(2, 25000000), {} },
50 [MACH_LOONGSON_END] {}, 54 [MACH_LOONGSON_END] {},
51}; 55};
52 56
diff --git a/arch/mips/loongson/common/setup.c b/arch/mips/loongson/common/setup.c
index 8223f8acfd59..bb4ac922e47a 100644
--- a/arch/mips/loongson/common/setup.c
+++ b/arch/mips/loongson/common/setup.c
@@ -18,9 +18,6 @@
18#include <linux/screen_info.h> 18#include <linux/screen_info.h>
19#endif 19#endif
20 20
21void (*__wbflush)(void);
22EXPORT_SYMBOL(__wbflush);
23
24static void wbflush_loongson(void) 21static void wbflush_loongson(void)
25{ 22{
26 asm(".set\tpush\n\t" 23 asm(".set\tpush\n\t"
@@ -32,10 +29,11 @@ static void wbflush_loongson(void)
32 ".set mips0\n\t"); 29 ".set mips0\n\t");
33} 30}
34 31
32void (*__wbflush)(void) = wbflush_loongson;
33EXPORT_SYMBOL(__wbflush);
34
35void __init plat_mem_setup(void) 35void __init plat_mem_setup(void)
36{ 36{
37 __wbflush = wbflush_loongson;
38
39#ifdef CONFIG_VT 37#ifdef CONFIG_VT
40#if defined(CONFIG_VGA_CONSOLE) 38#if defined(CONFIG_VGA_CONSOLE)
41 conswitchp = &vga_con; 39 conswitchp = &vga_con;
diff --git a/arch/mips/loongson/common/uart_base.c b/arch/mips/loongson/common/uart_base.c
index e192ad021edc..1e1eeea73fde 100644
--- a/arch/mips/loongson/common/uart_base.c
+++ b/arch/mips/loongson/common/uart_base.c
@@ -35,9 +35,16 @@ void prom_init_loongson_uart_base(void)
35 case MACH_DEXXON_GDIUM2F10: 35 case MACH_DEXXON_GDIUM2F10:
36 case MACH_LEMOTE_NAS: 36 case MACH_LEMOTE_NAS:
37 default: 37 default:
38 /* The CPU provided serial port */ 38 /* The CPU provided serial port (LPC) */
39 loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8; 39 loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8;
40 break; 40 break;
41 case MACH_LEMOTE_A1004:
42 case MACH_LEMOTE_A1101:
43 case MACH_LEMOTE_A1201:
44 case MACH_LEMOTE_A1205:
45 /* The CPU provided serial port (CPU) */
46 loongson_uart_base = LOONGSON_REG_BASE + 0x1e0;
47 break;
41 } 48 }
42 49
43 _loongson_uart_base = 50 _loongson_uart_base =
diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
index aed32b88576c..e1f427f4f5f3 100644
--- a/arch/mips/loongson/lemote-2f/clock.c
+++ b/arch/mips/loongson/lemote-2f/clock.c
@@ -28,16 +28,16 @@ enum {
28}; 28};
29 29
30struct cpufreq_frequency_table loongson2_clockmod_table[] = { 30struct cpufreq_frequency_table loongson2_clockmod_table[] = {
31 {DC_RESV, CPUFREQ_ENTRY_INVALID}, 31 {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
32 {DC_ZERO, CPUFREQ_ENTRY_INVALID}, 32 {0, DC_ZERO, CPUFREQ_ENTRY_INVALID},
33 {DC_25PT, 0}, 33 {0, DC_25PT, 0},
34 {DC_37PT, 0}, 34 {0, DC_37PT, 0},
35 {DC_50PT, 0}, 35 {0, DC_50PT, 0},
36 {DC_62PT, 0}, 36 {0, DC_62PT, 0},
37 {DC_75PT, 0}, 37 {0, DC_75PT, 0},
38 {DC_87PT, 0}, 38 {0, DC_87PT, 0},
39 {DC_DISABLE, 0}, 39 {0, DC_DISABLE, 0},
40 {DC_RESV, CPUFREQ_TABLE_END}, 40 {0, DC_RESV, CPUFREQ_TABLE_END},
41}; 41};
42EXPORT_SYMBOL_GPL(loongson2_clockmod_table); 42EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
43 43
diff --git a/arch/mips/loongson/loongson-3/Makefile b/arch/mips/loongson/loongson-3/Makefile
new file mode 100644
index 000000000000..70152b252ddc
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/Makefile
@@ -0,0 +1,6 @@
1#
2# Makefile for Loongson-3 family machines
3#
4obj-y += irq.o
5
6obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
new file mode 100644
index 000000000000..f240828181ff
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/irq.c
@@ -0,0 +1,126 @@
1#include <loongson.h>
2#include <irq.h>
3#include <linux/interrupt.h>
4#include <linux/module.h>
5
6#include <asm/irq_cpu.h>
7#include <asm/i8259.h>
8#include <asm/mipsregs.h>
9
10unsigned int ht_irq[] = {1, 3, 4, 5, 6, 7, 8, 12, 14, 15};
11
12static void ht_irqdispatch(void)
13{
14 unsigned int i, irq;
15
16 irq = LOONGSON_HT1_INT_VECTOR(0);
17 LOONGSON_HT1_INT_VECTOR(0) = irq; /* Acknowledge the IRQs */
18
19 for (i = 0; i < ARRAY_SIZE(ht_irq); i++) {
20 if (irq & (0x1 << ht_irq[i]))
21 do_IRQ(ht_irq[i]);
22 }
23}
24
25void mach_irq_dispatch(unsigned int pending)
26{
27 if (pending & CAUSEF_IP7)
28 do_IRQ(LOONGSON_TIMER_IRQ);
29#if defined(CONFIG_SMP)
30 else if (pending & CAUSEF_IP6)
31 loongson3_ipi_interrupt(NULL);
32#endif
33 else if (pending & CAUSEF_IP3)
34 ht_irqdispatch();
35 else if (pending & CAUSEF_IP2)
36 do_IRQ(LOONGSON_UART_IRQ);
37 else {
38 pr_err("%s : spurious interrupt\n", __func__);
39 spurious_interrupt();
40 }
41}
42
43static struct irqaction cascade_irqaction = {
44 .handler = no_action,
45 .name = "cascade",
46};
47
48static inline void mask_loongson_irq(struct irq_data *d)
49{
50 clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
51 irq_disable_hazard();
52
53 /* Workaround: UART IRQ may deliver to any core */
54 if (d->irq == LOONGSON_UART_IRQ) {
55 int cpu = smp_processor_id();
56
57 LOONGSON_INT_ROUTER_INTENCLR = 1 << 10;
58 LOONGSON_INT_ROUTER_LPC = 0x10 + (1<<cpu);
59 }
60}
61
62static inline void unmask_loongson_irq(struct irq_data *d)
63{
64 /* Workaround: UART IRQ may deliver to any core */
65 if (d->irq == LOONGSON_UART_IRQ) {
66 int cpu = smp_processor_id();
67
68 LOONGSON_INT_ROUTER_INTENSET = 1 << 10;
69 LOONGSON_INT_ROUTER_LPC = 0x10 + (1<<cpu);
70 }
71
72 set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
73 irq_enable_hazard();
74}
75
76 /* For MIPS IRQs which shared by all cores */
77static struct irq_chip loongson_irq_chip = {
78 .name = "Loongson",
79 .irq_ack = mask_loongson_irq,
80 .irq_mask = mask_loongson_irq,
81 .irq_mask_ack = mask_loongson_irq,
82 .irq_unmask = unmask_loongson_irq,
83 .irq_eoi = unmask_loongson_irq,
84};
85
86void irq_router_init(void)
87{
88 int i;
89
90 /* route LPC int to cpu core0 int 0 */
91 LOONGSON_INT_ROUTER_LPC = LOONGSON_INT_CORE0_INT0;
92 /* route HT1 int0 ~ int7 to cpu core0 INT1*/
93 for (i = 0; i < 8; i++)
94 LOONGSON_INT_ROUTER_HT1(i) = LOONGSON_INT_CORE0_INT1;
95 /* enable HT1 interrupt */
96 LOONGSON_HT1_INTN_EN(0) = 0xffffffff;
97 /* enable router interrupt intenset */
98 LOONGSON_INT_ROUTER_INTENSET =
99 LOONGSON_INT_ROUTER_INTEN | (0xffff << 16) | 0x1 << 10;
100}
101
102void __init mach_init_irq(void)
103{
104 clear_c0_status(ST0_IM | ST0_BEV);
105
106 irq_router_init();
107 mips_cpu_irq_init();
108 init_i8259_irqs();
109 irq_set_chip_and_handler(LOONGSON_UART_IRQ,
110 &loongson_irq_chip, handle_level_irq);
111
112 /* setup HT1 irq */
113 setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
114
115 set_c0_status(STATUSF_IP2 | STATUSF_IP6);
116}
117
118#ifdef CONFIG_HOTPLUG_CPU
119
120void fixup_irqs(void)
121{
122 irq_cpu_offline();
123 clear_c0_status(ST0_IM);
124}
125
126#endif
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
new file mode 100644
index 000000000000..c665fe16d4c9
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/smp.c
@@ -0,0 +1,443 @@
1/*
2 * Copyright (C) 2010, 2011, 2012, Lemote, Inc.
3 * Author: Chen Huacai, chenhc@lemote.com
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/init.h>
18#include <linux/cpu.h>
19#include <linux/sched.h>
20#include <linux/smp.h>
21#include <linux/cpufreq.h>
22#include <asm/processor.h>
23#include <asm/time.h>
24#include <asm/clock.h>
25#include <asm/tlbflush.h>
26#include <asm/cacheflush.h>
27#include <loongson.h>
28
29#include "smp.h"
30
31DEFINE_PER_CPU(int, cpu_state);
32DEFINE_PER_CPU(uint32_t, core0_c0count);
33
34/* read a 32bit value from ipi register */
35#define loongson3_ipi_read32(addr) readl(addr)
36/* read a 64bit value from ipi register */
37#define loongson3_ipi_read64(addr) readq(addr)
38/* write a 32bit value to ipi register */
39#define loongson3_ipi_write32(action, addr) \
40 do { \
41 writel(action, addr); \
42 __wbflush(); \
43 } while (0)
44/* write a 64bit value to ipi register */
45#define loongson3_ipi_write64(action, addr) \
46 do { \
47 writeq(action, addr); \
48 __wbflush(); \
49 } while (0)
50
51static void *ipi_set0_regs[] = {
52 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + SET0),
53 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + SET0),
54 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + SET0),
55 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + SET0),
56 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + SET0),
57 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + SET0),
58 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + SET0),
59 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + SET0),
60 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + SET0),
61 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + SET0),
62 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + SET0),
63 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + SET0),
64 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + SET0),
65 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + SET0),
66 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + SET0),
67 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + SET0),
68};
69
70static void *ipi_clear0_regs[] = {
71 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + CLEAR0),
72 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + CLEAR0),
73 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + CLEAR0),
74 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + CLEAR0),
75 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + CLEAR0),
76 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + CLEAR0),
77 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + CLEAR0),
78 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + CLEAR0),
79 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + CLEAR0),
80 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + CLEAR0),
81 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + CLEAR0),
82 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + CLEAR0),
83 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + CLEAR0),
84 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + CLEAR0),
85 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + CLEAR0),
86 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + CLEAR0),
87};
88
89static void *ipi_status0_regs[] = {
90 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + STATUS0),
91 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + STATUS0),
92 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + STATUS0),
93 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + STATUS0),
94 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + STATUS0),
95 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + STATUS0),
96 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + STATUS0),
97 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + STATUS0),
98 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + STATUS0),
99 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + STATUS0),
100 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + STATUS0),
101 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + STATUS0),
102 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + STATUS0),
103 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + STATUS0),
104 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + STATUS0),
105 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + STATUS0),
106};
107
108static void *ipi_en0_regs[] = {
109 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + EN0),
110 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + EN0),
111 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + EN0),
112 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + EN0),
113 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + EN0),
114 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + EN0),
115 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + EN0),
116 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + EN0),
117 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + EN0),
118 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + EN0),
119 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + EN0),
120 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + EN0),
121 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + EN0),
122 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + EN0),
123 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + EN0),
124 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + EN0),
125};
126
127static void *ipi_mailbox_buf[] = {
128 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE0_OFFSET + BUF),
129 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE1_OFFSET + BUF),
130 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE2_OFFSET + BUF),
131 (void *)(SMP_CORE_GROUP0_BASE + SMP_CORE3_OFFSET + BUF),
132 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE0_OFFSET + BUF),
133 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE1_OFFSET + BUF),
134 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE2_OFFSET + BUF),
135 (void *)(SMP_CORE_GROUP1_BASE + SMP_CORE3_OFFSET + BUF),
136 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE0_OFFSET + BUF),
137 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE1_OFFSET + BUF),
138 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE2_OFFSET + BUF),
139 (void *)(SMP_CORE_GROUP2_BASE + SMP_CORE3_OFFSET + BUF),
140 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE0_OFFSET + BUF),
141 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE1_OFFSET + BUF),
142 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE2_OFFSET + BUF),
143 (void *)(SMP_CORE_GROUP3_BASE + SMP_CORE3_OFFSET + BUF),
144};
145
146/*
147 * Simple enough, just poke the appropriate ipi register
148 */
149static void loongson3_send_ipi_single(int cpu, unsigned int action)
150{
151 loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]);
152}
153
154static void
155loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
156{
157 unsigned int i;
158
159 for_each_cpu(i, mask)
160 loongson3_ipi_write32((u32)action, ipi_set0_regs[i]);
161}
162
163void loongson3_ipi_interrupt(struct pt_regs *regs)
164{
165 int i, cpu = smp_processor_id();
166 unsigned int action, c0count;
167
168 /* Load the ipi register to figure out what we're supposed to do */
169 action = loongson3_ipi_read32(ipi_status0_regs[cpu]);
170
171 /* Clear the ipi register to clear the interrupt */
172 loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu]);
173
174 if (action & SMP_RESCHEDULE_YOURSELF)
175 scheduler_ipi();
176
177 if (action & SMP_CALL_FUNCTION)
178 smp_call_function_interrupt();
179
180 if (action & SMP_ASK_C0COUNT) {
181 BUG_ON(cpu != 0);
182 c0count = read_c0_count();
183 for (i = 1; i < loongson_sysconf.nr_cpus; i++)
184 per_cpu(core0_c0count, i) = c0count;
185 }
186}
187
188#define MAX_LOOPS 1111
189/*
190 * SMP init and finish on secondary CPUs
191 */
192static void loongson3_init_secondary(void)
193{
194 int i;
195 uint32_t initcount;
196 unsigned int cpu = smp_processor_id();
197 unsigned int imask = STATUSF_IP7 | STATUSF_IP6 |
198 STATUSF_IP3 | STATUSF_IP2;
199
200 /* Set interrupt mask, but don't enable */
201 change_c0_status(ST0_IM, imask);
202
203 for (i = 0; i < loongson_sysconf.nr_cpus; i++)
204 loongson3_ipi_write32(0xffffffff, ipi_en0_regs[i]);
205
206 per_cpu(cpu_state, cpu) = CPU_ONLINE;
207
208 i = 0;
209 __get_cpu_var(core0_c0count) = 0;
210 loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
211 while (!__get_cpu_var(core0_c0count)) {
212 i++;
213 cpu_relax();
214 }
215
216 if (i > MAX_LOOPS)
217 i = MAX_LOOPS;
218 initcount = __get_cpu_var(core0_c0count) + i;
219 write_c0_count(initcount);
220}
221
222static void loongson3_smp_finish(void)
223{
224 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
225 local_irq_enable();
226 loongson3_ipi_write64(0,
227 (void *)(ipi_mailbox_buf[smp_processor_id()]+0x0));
228 pr_info("CPU#%d finished, CP0_ST=%x\n",
229 smp_processor_id(), read_c0_status());
230}
231
232static void __init loongson3_smp_setup(void)
233{
234 int i, num;
235
236 init_cpu_possible(cpu_none_mask);
237 set_cpu_possible(0, true);
238
239 __cpu_number_map[0] = 0;
240 __cpu_logical_map[0] = 0;
241
242 /* For unified kernel, NR_CPUS is the maximum possible value,
243 * loongson_sysconf.nr_cpus is the really present value */
244 for (i = 1, num = 0; i < loongson_sysconf.nr_cpus; i++) {
245 set_cpu_possible(i, true);
246 __cpu_number_map[i] = ++num;
247 __cpu_logical_map[num] = i;
248 }
249 pr_info("Detected %i available secondary CPU(s)\n", num);
250}
251
252static void __init loongson3_prepare_cpus(unsigned int max_cpus)
253{
254 init_cpu_present(cpu_possible_mask);
255 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
256}
257
258/*
259 * Setup the PC, SP, and GP of a secondary processor and start it runing!
260 */
261static void loongson3_boot_secondary(int cpu, struct task_struct *idle)
262{
263 unsigned long startargs[4];
264
265 pr_info("Booting CPU#%d...\n", cpu);
266
267 /* startargs[] are initial PC, SP and GP for secondary CPU */
268 startargs[0] = (unsigned long)&smp_bootstrap;
269 startargs[1] = (unsigned long)__KSTK_TOS(idle);
270 startargs[2] = (unsigned long)task_thread_info(idle);
271 startargs[3] = 0;
272
273 pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
274 cpu, startargs[0], startargs[1], startargs[2]);
275
276 loongson3_ipi_write64(startargs[3], (void *)(ipi_mailbox_buf[cpu]+0x18));
277 loongson3_ipi_write64(startargs[2], (void *)(ipi_mailbox_buf[cpu]+0x10));
278 loongson3_ipi_write64(startargs[1], (void *)(ipi_mailbox_buf[cpu]+0x8));
279 loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0));
280}
281
282/*
283 * Final cleanup after all secondaries booted
284 */
285static void __init loongson3_cpus_done(void)
286{
287}
288
289#ifdef CONFIG_HOTPLUG_CPU
290
291static int loongson3_cpu_disable(void)
292{
293 unsigned long flags;
294 unsigned int cpu = smp_processor_id();
295
296 if (cpu == 0)
297 return -EBUSY;
298
299 set_cpu_online(cpu, false);
300 cpu_clear(cpu, cpu_callin_map);
301 local_irq_save(flags);
302 fixup_irqs();
303 local_irq_restore(flags);
304 flush_cache_all();
305 local_flush_tlb_all();
306
307 return 0;
308}
309
310
311static void loongson3_cpu_die(unsigned int cpu)
312{
313 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
314 cpu_relax();
315
316 mb();
317}
318
319/* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
320 * flush all L1 entries at first. Then, another core (usually Core 0) can
321 * safely disable the clock of the target core. loongson3_play_dead() is
322 * called via CKSEG1 (uncached and unmmaped) */
323static void loongson3_play_dead(int *state_addr)
324{
325 register int val;
326 register long cpuid, core, node, count;
327 register void *addr, *base, *initfunc;
328
329 __asm__ __volatile__(
330 " .set push \n"
331 " .set noreorder \n"
332 " li %[addr], 0x80000000 \n" /* KSEG0 */
333 "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
334 " cache 0, 1(%[addr]) \n"
335 " cache 0, 2(%[addr]) \n"
336 " cache 0, 3(%[addr]) \n"
337 " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
338 " cache 1, 1(%[addr]) \n"
339 " cache 1, 2(%[addr]) \n"
340 " cache 1, 3(%[addr]) \n"
341 " addiu %[sets], %[sets], -1 \n"
342 " bnez %[sets], 1b \n"
343 " addiu %[addr], %[addr], 0x20 \n"
344 " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
345 " sw %[val], (%[state_addr]) \n"
346 " sync \n"
347 " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
348 " .set pop \n"
349 : [addr] "=&r" (addr), [val] "=&r" (val)
350 : [state_addr] "r" (state_addr),
351 [sets] "r" (cpu_data[smp_processor_id()].dcache.sets));
352
353 __asm__ __volatile__(
354 " .set push \n"
355 " .set noreorder \n"
356 " .set mips64 \n"
357 " mfc0 %[cpuid], $15, 1 \n"
358 " andi %[cpuid], 0x3ff \n"
359 " dli %[base], 0x900000003ff01000 \n"
360 " andi %[core], %[cpuid], 0x3 \n"
361 " sll %[core], 8 \n" /* get core id */
362 " or %[base], %[base], %[core] \n"
363 " andi %[node], %[cpuid], 0xc \n"
364 " dsll %[node], 42 \n" /* get node id */
365 " or %[base], %[base], %[node] \n"
366 "1: li %[count], 0x100 \n" /* wait for init loop */
367 "2: bnez %[count], 2b \n" /* limit mailbox access */
368 " addiu %[count], -1 \n"
369 " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
370 " beqz %[initfunc], 1b \n"
371 " nop \n"
372 " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
373 " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
374 " ld $a1, 0x38(%[base]) \n"
375 " jr %[initfunc] \n" /* jump to initial PC */
376 " nop \n"
377 " .set pop \n"
378 : [core] "=&r" (core), [node] "=&r" (node),
379 [base] "=&r" (base), [cpuid] "=&r" (cpuid),
380 [count] "=&r" (count), [initfunc] "=&r" (initfunc)
381 : /* No Input */
382 : "a1");
383}
384
385void play_dead(void)
386{
387 int *state_addr;
388 unsigned int cpu = smp_processor_id();
389 void (*play_dead_at_ckseg1)(int *);
390
391 idle_task_exit();
392 play_dead_at_ckseg1 =
393 (void *)CKSEG1ADDR((unsigned long)loongson3_play_dead);
394 state_addr = &per_cpu(cpu_state, cpu);
395 mb();
396 play_dead_at_ckseg1(state_addr);
397}
398
399#define CPU_POST_DEAD_FROZEN (CPU_POST_DEAD | CPU_TASKS_FROZEN)
400static int loongson3_cpu_callback(struct notifier_block *nfb,
401 unsigned long action, void *hcpu)
402{
403 unsigned int cpu = (unsigned long)hcpu;
404
405 switch (action) {
406 case CPU_POST_DEAD:
407 case CPU_POST_DEAD_FROZEN:
408 pr_info("Disable clock for CPU#%d\n", cpu);
409 LOONGSON_CHIPCFG0 &= ~(1 << (12 + cpu));
410 break;
411 case CPU_UP_PREPARE:
412 case CPU_UP_PREPARE_FROZEN:
413 pr_info("Enable clock for CPU#%d\n", cpu);
414 LOONGSON_CHIPCFG0 |= 1 << (12 + cpu);
415 break;
416 }
417
418 return NOTIFY_OK;
419}
420
421static int register_loongson3_notifier(void)
422{
423 hotcpu_notifier(loongson3_cpu_callback, 0);
424 return 0;
425}
426early_initcall(register_loongson3_notifier);
427
428#endif
429
430struct plat_smp_ops loongson3_smp_ops = {
431 .send_ipi_single = loongson3_send_ipi_single,
432 .send_ipi_mask = loongson3_send_ipi_mask,
433 .init_secondary = loongson3_init_secondary,
434 .smp_finish = loongson3_smp_finish,
435 .cpus_done = loongson3_cpus_done,
436 .boot_secondary = loongson3_boot_secondary,
437 .smp_setup = loongson3_smp_setup,
438 .prepare_cpus = loongson3_prepare_cpus,
439#ifdef CONFIG_HOTPLUG_CPU
440 .cpu_disable = loongson3_cpu_disable,
441 .cpu_die = loongson3_cpu_die,
442#endif
443};
diff --git a/arch/mips/loongson/loongson-3/smp.h b/arch/mips/loongson/loongson-3/smp.h
new file mode 100644
index 000000000000..3453e8c4f2f0
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/smp.h
@@ -0,0 +1,29 @@
1#ifndef __LOONGSON_SMP_H_
2#define __LOONGSON_SMP_H_
3
4/* for Loongson-3A smp support */
5
6/* 4 groups(nodes) in maximum in numa case */
7#define SMP_CORE_GROUP0_BASE 0x900000003ff01000
8#define SMP_CORE_GROUP1_BASE 0x900010003ff01000
9#define SMP_CORE_GROUP2_BASE 0x900020003ff01000
10#define SMP_CORE_GROUP3_BASE 0x900030003ff01000
11
12/* 4 cores in each group(node) */
13#define SMP_CORE0_OFFSET 0x000
14#define SMP_CORE1_OFFSET 0x100
15#define SMP_CORE2_OFFSET 0x200
16#define SMP_CORE3_OFFSET 0x300
17
18/* ipi registers offsets */
19#define STATUS0 0x00
20#define EN0 0x04
21#define SET0 0x08
22#define CLEAR0 0x0c
23#define STATUS1 0x10
24#define MASK1 0x14
25#define SET1 0x18
26#define CLEAR1 0x1c
27#define BUF 0x20
28
29#endif
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 0b4e2e38294b..7b3c9acae689 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -876,20 +876,43 @@ static inline int cop1_64bit(struct pt_regs *xcp)
876#endif 876#endif
877} 877}
878 878
879#define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \ 879#define SIFROMREG(si, x) do { \
880 (int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32)) 880 if (cop1_64bit(xcp)) \
881 881 (si) = get_fpr32(&ctx->fpr[x], 0); \
882#define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \ 882 else \
883 cop1_64bit(xcp) || !(x & 1) ? \ 883 (si) = get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \
884 ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \ 884} while (0)
885 ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32) 885
886 886#define SITOREG(si, x) do { \
887#define SIFROMHREG(si, x) ((si) = (int)(ctx->fpr[x] >> 32)) 887 if (cop1_64bit(xcp)) { \
888#define SITOHREG(si, x) (ctx->fpr[x] = \ 888 unsigned i; \
889 ctx->fpr[x] << 32 >> 32 | (u64)(si) << 32) 889 set_fpr32(&ctx->fpr[x], 0, si); \
890 890 for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
891#define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)]) 891 set_fpr32(&ctx->fpr[x], i, 0); \
892#define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di)) 892 } else { \
893 set_fpr32(&ctx->fpr[(x) & ~1], (x) & 1, si); \
894 } \
895} while (0)
896
897#define SIFROMHREG(si, x) ((si) = get_fpr32(&ctx->fpr[x], 1))
898
899#define SITOHREG(si, x) do { \
900 unsigned i; \
901 set_fpr32(&ctx->fpr[x], 1, si); \
902 for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
903 set_fpr32(&ctx->fpr[x], i, 0); \
904} while (0)
905
906#define DIFROMREG(di, x) \
907 ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0))
908
909#define DITOREG(di, x) do { \
910 unsigned fpr, i; \
911 fpr = (x) & ~(cop1_64bit(xcp) == 0); \
912 set_fpr64(&ctx->fpr[fpr], 0, di); \
913 for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \
914 set_fpr64(&ctx->fpr[fpr], i, 0); \
915} while (0)
893 916
894#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x) 917#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
895#define SPTOREG(sp, x) SITOREG((sp).bits, x) 918#define SPTOREG(sp, x) SITOREG((sp).bits, x)
@@ -1960,15 +1983,18 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
1960 1983
1961#if defined(__mips64) 1984#if defined(__mips64)
1962 case l_fmt:{ 1985 case l_fmt:{
1986 u64 bits;
1987 DIFROMREG(bits, MIPSInst_FS(ir));
1988
1963 switch (MIPSInst_FUNC(ir)) { 1989 switch (MIPSInst_FUNC(ir)) {
1964 case fcvts_op: 1990 case fcvts_op:
1965 /* convert long to single precision real */ 1991 /* convert long to single precision real */
1966 rv.s = ieee754sp_flong(ctx->fpr[MIPSInst_FS(ir)]); 1992 rv.s = ieee754sp_flong(bits);
1967 rfmt = s_fmt; 1993 rfmt = s_fmt;
1968 goto copcsr; 1994 goto copcsr;
1969 case fcvtd_op: 1995 case fcvtd_op:
1970 /* convert long to double precision real */ 1996 /* convert long to double precision real */
1971 rv.d = ieee754dp_flong(ctx->fpr[MIPSInst_FS(ir)]); 1997 rv.d = ieee754dp_flong(bits);
1972 rfmt = d_fmt; 1998 rfmt = d_fmt;
1973 goto copcsr; 1999 goto copcsr;
1974 default: 2000 default:
diff --git a/arch/mips/math-emu/kernel_linkage.c b/arch/mips/math-emu/kernel_linkage.c
index 3aeae07ed5b8..eb58a85b3157 100644
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -40,78 +40,6 @@ void fpu_emulator_init_fpu(void)
40 } 40 }
41 41
42 current->thread.fpu.fcr31 = 0; 42 current->thread.fpu.fcr31 = 0;
43 for (i = 0; i < 32; i++) { 43 for (i = 0; i < 32; i++)
44 current->thread.fpu.fpr[i] = SIGNALLING_NAN; 44 set_fpr64(&current->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
45 }
46}
47
48
49/*
50 * Emulator context save/restore to/from a signal context
51 * presumed to be on the user stack, and therefore accessed
52 * with appropriate macros from uaccess.h
53 */
54
55int fpu_emulator_save_context(struct sigcontext __user *sc)
56{
57 int i;
58 int err = 0;
59
60 for (i = 0; i < 32; i++) {
61 err |=
62 __put_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
63 }
64 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
65
66 return err;
67}
68
69int fpu_emulator_restore_context(struct sigcontext __user *sc)
70{
71 int i;
72 int err = 0;
73
74 for (i = 0; i < 32; i++) {
75 err |=
76 __get_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
77 }
78 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
79
80 return err;
81}
82
83#ifdef CONFIG_64BIT
84/*
85 * This is the o32 version
86 */
87
88int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
89{
90 int i;
91 int err = 0;
92 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
93
94 for (i = 0; i < 32; i += inc) {
95 err |=
96 __put_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
97 }
98 err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
99
100 return err;
101}
102
103int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
104{
105 int i;
106 int err = 0;
107 int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
108
109 for (i = 0; i < 32; i += inc) {
110 err |=
111 __get_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
112 }
113 err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
114
115 return err;
116} 45}
117#endif
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index c14259edd53f..1c74a6ad072a 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -57,7 +57,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
57 preempt_enable(); 57 preempt_enable();
58} 58}
59 59
60#if defined(CONFIG_MIPS_CMP) 60#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
61#define cpu_has_safe_index_cacheops 0 61#define cpu_has_safe_index_cacheops 0
62#else 62#else
63#define cpu_has_safe_index_cacheops 1 63#define cpu_has_safe_index_cacheops 1
@@ -123,6 +123,28 @@ static void r4k_blast_dcache_page_setup(void)
123 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; 123 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
124} 124}
125 125
126#ifndef CONFIG_EVA
127#define r4k_blast_dcache_user_page r4k_blast_dcache_page
128#else
129
130static void (*r4k_blast_dcache_user_page)(unsigned long addr);
131
132static void r4k_blast_dcache_user_page_setup(void)
133{
134 unsigned long dc_lsize = cpu_dcache_line_size();
135
136 if (dc_lsize == 0)
137 r4k_blast_dcache_user_page = (void *)cache_noop;
138 else if (dc_lsize == 16)
139 r4k_blast_dcache_user_page = blast_dcache16_user_page;
140 else if (dc_lsize == 32)
141 r4k_blast_dcache_user_page = blast_dcache32_user_page;
142 else if (dc_lsize == 64)
143 r4k_blast_dcache_user_page = blast_dcache64_user_page;
144}
145
146#endif
147
126static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 148static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
127 149
128static void r4k_blast_dcache_page_indexed_setup(void) 150static void r4k_blast_dcache_page_indexed_setup(void)
@@ -245,6 +267,27 @@ static void r4k_blast_icache_page_setup(void)
245 r4k_blast_icache_page = blast_icache64_page; 267 r4k_blast_icache_page = blast_icache64_page;
246} 268}
247 269
270#ifndef CONFIG_EVA
271#define r4k_blast_icache_user_page r4k_blast_icache_page
272#else
273
274static void (*r4k_blast_icache_user_page)(unsigned long addr);
275
276static void __cpuinit r4k_blast_icache_user_page_setup(void)
277{
278 unsigned long ic_lsize = cpu_icache_line_size();
279
280 if (ic_lsize == 0)
281 r4k_blast_icache_user_page = (void *)cache_noop;
282 else if (ic_lsize == 16)
283 r4k_blast_icache_user_page = blast_icache16_user_page;
284 else if (ic_lsize == 32)
285 r4k_blast_icache_user_page = blast_icache32_user_page;
286 else if (ic_lsize == 64)
287 r4k_blast_icache_user_page = blast_icache64_user_page;
288}
289
290#endif
248 291
249static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 292static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
250 293
@@ -355,6 +398,7 @@ static inline void local_r4k___flush_cache_all(void * args)
355{ 398{
356 switch (current_cpu_type()) { 399 switch (current_cpu_type()) {
357 case CPU_LOONGSON2: 400 case CPU_LOONGSON2:
401 case CPU_LOONGSON3:
358 case CPU_R4000SC: 402 case CPU_R4000SC:
359 case CPU_R4000MC: 403 case CPU_R4000MC:
360 case CPU_R4400SC: 404 case CPU_R4400SC:
@@ -519,7 +563,8 @@ static inline void local_r4k_flush_cache_page(void *args)
519 } 563 }
520 564
521 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { 565 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
522 r4k_blast_dcache_page(addr); 566 vaddr ? r4k_blast_dcache_page(addr) :
567 r4k_blast_dcache_user_page(addr);
523 if (exec && !cpu_icache_snoops_remote_store) 568 if (exec && !cpu_icache_snoops_remote_store)
524 r4k_blast_scache_page(addr); 569 r4k_blast_scache_page(addr);
525 } 570 }
@@ -530,7 +575,8 @@ static inline void local_r4k_flush_cache_page(void *args)
530 if (cpu_context(cpu, mm) != 0) 575 if (cpu_context(cpu, mm) != 0)
531 drop_mmu_context(mm, cpu); 576 drop_mmu_context(mm, cpu);
532 } else 577 } else
533 r4k_blast_icache_page(addr); 578 vaddr ? r4k_blast_icache_page(addr) :
579 r4k_blast_icache_user_page(addr);
534 } 580 }
535 581
536 if (vaddr) { 582 if (vaddr) {
@@ -595,6 +641,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
595 break; 641 break;
596 } 642 }
597 } 643 }
644#ifdef CONFIG_EVA
645 /*
646 * Due to all possible segment mappings, there might cache aliases
647 * caused by the bootloader being in non-EVA mode, and the CPU switching
648 * to EVA during early kernel init. It's best to flush the scache
649 * to avoid having secondary cores fetching stale data and lead to
650 * kernel crashes.
651 */
652 bc_wback_inv(start, (end - start));
653 __sync();
654#endif
598} 655}
599 656
600static inline void local_r4k_flush_icache_range_ipi(void *args) 657static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -617,7 +674,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
617 instruction_hazard(); 674 instruction_hazard();
618} 675}
619 676
620#ifdef CONFIG_DMA_NONCOHERENT 677#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
621 678
622static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) 679static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
623{ 680{
@@ -688,7 +745,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
688 bc_inv(addr, size); 745 bc_inv(addr, size);
689 __sync(); 746 __sync();
690} 747}
691#endif /* CONFIG_DMA_NONCOHERENT */ 748#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
692 749
693/* 750/*
694 * While we're protected against bad userland addresses we don't care 751 * While we're protected against bad userland addresses we don't care
@@ -1010,6 +1067,33 @@ static void probe_pcache(void)
1010 c->dcache.waybit = 0; 1067 c->dcache.waybit = 0;
1011 break; 1068 break;
1012 1069
1070 case CPU_LOONGSON3:
1071 config1 = read_c0_config1();
1072 lsize = (config1 >> 19) & 7;
1073 if (lsize)
1074 c->icache.linesz = 2 << lsize;
1075 else
1076 c->icache.linesz = 0;
1077 c->icache.sets = 64 << ((config1 >> 22) & 7);
1078 c->icache.ways = 1 + ((config1 >> 16) & 7);
1079 icache_size = c->icache.sets *
1080 c->icache.ways *
1081 c->icache.linesz;
1082 c->icache.waybit = 0;
1083
1084 lsize = (config1 >> 10) & 7;
1085 if (lsize)
1086 c->dcache.linesz = 2 << lsize;
1087 else
1088 c->dcache.linesz = 0;
1089 c->dcache.sets = 64 << ((config1 >> 13) & 7);
1090 c->dcache.ways = 1 + ((config1 >> 7) & 7);
1091 dcache_size = c->dcache.sets *
1092 c->dcache.ways *
1093 c->dcache.linesz;
1094 c->dcache.waybit = 0;
1095 break;
1096
1013 default: 1097 default:
1014 if (!(config & MIPS_CONF_M)) 1098 if (!(config & MIPS_CONF_M))
1015 panic("Don't know how to probe P-caches on this cpu."); 1099 panic("Don't know how to probe P-caches on this cpu.");
@@ -1113,13 +1197,21 @@ static void probe_pcache(void)
1113 case CPU_34K: 1197 case CPU_34K:
1114 case CPU_74K: 1198 case CPU_74K:
1115 case CPU_1004K: 1199 case CPU_1004K:
1200 case CPU_1074K:
1116 case CPU_INTERAPTIV: 1201 case CPU_INTERAPTIV:
1202 case CPU_P5600:
1117 case CPU_PROAPTIV: 1203 case CPU_PROAPTIV:
1118 if (current_cpu_type() == CPU_74K) 1204 case CPU_M5150:
1205 if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K))
1119 alias_74k_erratum(c); 1206 alias_74k_erratum(c);
1120 if ((read_c0_config7() & (1 << 16))) { 1207 if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
1121 /* effectively physically indexed dcache, 1208 (c->icache.waysize > PAGE_SIZE))
1122 thus no virtual aliases. */ 1209 c->icache.flags |= MIPS_CACHE_ALIASES;
1210 if (read_c0_config7() & MIPS_CONF7_AR) {
1211 /*
1212 * Effectively physically indexed dcache,
1213 * thus no virtual aliases.
1214 */
1123 c->dcache.flags |= MIPS_CACHE_PINDEX; 1215 c->dcache.flags |= MIPS_CACHE_PINDEX;
1124 break; 1216 break;
1125 } 1217 }
@@ -1239,6 +1331,33 @@ static void __init loongson2_sc_init(void)
1239 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1331 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1240} 1332}
1241 1333
1334static void __init loongson3_sc_init(void)
1335{
1336 struct cpuinfo_mips *c = &current_cpu_data;
1337 unsigned int config2, lsize;
1338
1339 config2 = read_c0_config2();
1340 lsize = (config2 >> 4) & 15;
1341 if (lsize)
1342 c->scache.linesz = 2 << lsize;
1343 else
1344 c->scache.linesz = 0;
1345 c->scache.sets = 64 << ((config2 >> 8) & 15);
1346 c->scache.ways = 1 + (config2 & 15);
1347
1348 scache_size = c->scache.sets *
1349 c->scache.ways *
1350 c->scache.linesz;
1351 /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */
1352 scache_size *= 4;
1353 c->scache.waybit = 0;
1354 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1355 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1356 if (scache_size)
1357 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1358 return;
1359}
1360
1242extern int r5k_sc_init(void); 1361extern int r5k_sc_init(void);
1243extern int rm7k_sc_init(void); 1362extern int rm7k_sc_init(void);
1244extern int mips_sc_init(void); 1363extern int mips_sc_init(void);
@@ -1291,6 +1410,10 @@ static void setup_scache(void)
1291 loongson2_sc_init(); 1410 loongson2_sc_init();
1292 return; 1411 return;
1293 1412
1413 case CPU_LOONGSON3:
1414 loongson3_sc_init();
1415 return;
1416
1294 case CPU_XLP: 1417 case CPU_XLP:
1295 /* don't need to worry about L2, fully coherent */ 1418 /* don't need to worry about L2, fully coherent */
1296 return; 1419 return;
@@ -1461,6 +1584,10 @@ void r4k_cache_init(void)
1461 r4k_blast_scache_page_setup(); 1584 r4k_blast_scache_page_setup();
1462 r4k_blast_scache_page_indexed_setup(); 1585 r4k_blast_scache_page_indexed_setup();
1463 r4k_blast_scache_setup(); 1586 r4k_blast_scache_setup();
1587#ifdef CONFIG_EVA
1588 r4k_blast_dcache_user_page_setup();
1589 r4k_blast_icache_user_page_setup();
1590#endif
1464 1591
1465 /* 1592 /*
1466 * Some MIPS32 and MIPS64 processors have physically indexed caches. 1593 * Some MIPS32 and MIPS64 processors have physically indexed caches.
@@ -1492,7 +1619,7 @@ void r4k_cache_init(void)
1492 flush_icache_range = r4k_flush_icache_range; 1619 flush_icache_range = r4k_flush_icache_range;
1493 local_flush_icache_range = local_r4k_flush_icache_range; 1620 local_flush_icache_range = local_r4k_flush_icache_range;
1494 1621
1495#if defined(CONFIG_DMA_NONCOHERENT) 1622#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
1496 if (coherentio) { 1623 if (coherentio) {
1497 _dma_cache_wback_inv = (void *)cache_noop; 1624 _dma_cache_wback_inv = (void *)cache_noop;
1498 _dma_cache_wback = (void *)cache_noop; 1625 _dma_cache_wback = (void *)cache_noop;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index fde7e56d13fe..e422b38d3113 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -49,7 +49,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
49EXPORT_SYMBOL(flush_data_cache_page); 49EXPORT_SYMBOL(flush_data_cache_page);
50EXPORT_SYMBOL(flush_icache_all); 50EXPORT_SYMBOL(flush_icache_all);
51 51
52#ifdef CONFIG_DMA_NONCOHERENT 52#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
53 53
54/* DMA cache operations. */ 54/* DMA cache operations. */
55void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); 55void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
@@ -58,7 +58,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size);
58 58
59EXPORT_SYMBOL(_dma_cache_wback_inv); 59EXPORT_SYMBOL(_dma_cache_wback_inv);
60 60
61#endif /* CONFIG_DMA_NONCOHERENT */ 61#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
62 62
63/* 63/*
64 * We could optimize the case where the cache argument is not BCACHE but 64 * We could optimize the case where the cache argument is not BCACHE but
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 6b59617760c1..4fc74c78265a 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -422,10 +422,20 @@ void free_initrd_mem(unsigned long start, unsigned long end)
422} 422}
423#endif 423#endif
424 424
425void (*free_init_pages_eva)(void *begin, void *end) = NULL;
426
425void __init_refok free_initmem(void) 427void __init_refok free_initmem(void)
426{ 428{
427 prom_free_prom_memory(); 429 prom_free_prom_memory();
428 free_initmem_default(POISON_FREE_INITMEM); 430 /*
431 * Let the platform define a specific function to free the
432 * init section since EVA may have used any possible mapping
433 * between virtual and physical addresses.
434 */
435 if (free_init_pages_eva)
436 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
437 else
438 free_initmem_default(POISON_FREE_INITMEM);
429} 439}
430 440
431#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 441#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 7a56aee5fce7..99eb8fabab60 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -76,8 +76,10 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
76 case CPU_34K: 76 case CPU_34K:
77 case CPU_74K: 77 case CPU_74K:
78 case CPU_1004K: 78 case CPU_1004K:
79 case CPU_1074K:
79 case CPU_INTERAPTIV: 80 case CPU_INTERAPTIV:
80 case CPU_PROAPTIV: 81 case CPU_PROAPTIV:
82 case CPU_P5600:
81 case CPU_BMIPS5000: 83 case CPU_BMIPS5000:
82 if (config2 & (1 << 12)) 84 if (config2 & (1 << 12))
83 return 0; 85 return 0;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index ae4ca2450707..eeaf50f5df2b 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -48,13 +48,14 @@ extern void build_tlb_refill_handler(void);
48#endif /* CONFIG_MIPS_MT_SMTC */ 48#endif /* CONFIG_MIPS_MT_SMTC */
49 49
50/* 50/*
51 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, 51 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
52 * unfortrunately, itlb is not totally transparent to software. 52 * unfortunately, itlb is not totally transparent to software.
53 */ 53 */
54static inline void flush_itlb(void) 54static inline void flush_itlb(void)
55{ 55{
56 switch (current_cpu_type()) { 56 switch (current_cpu_type()) {
57 case CPU_LOONGSON2: 57 case CPU_LOONGSON2:
58 case CPU_LOONGSON3:
58 write_c0_diag(4); 59 write_c0_diag(4);
59 break; 60 break;
60 default: 61 default:
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index b234b1b5ccad..ee88367ab3ad 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -509,7 +509,10 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
509 switch (current_cpu_type()) { 509 switch (current_cpu_type()) {
510 case CPU_M14KC: 510 case CPU_M14KC:
511 case CPU_74K: 511 case CPU_74K:
512 case CPU_1074K:
512 case CPU_PROAPTIV: 513 case CPU_PROAPTIV:
514 case CPU_P5600:
515 case CPU_M5150:
513 break; 516 break;
514 517
515 default: 518 default:
@@ -579,6 +582,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
579 case CPU_BMIPS4380: 582 case CPU_BMIPS4380:
580 case CPU_BMIPS5000: 583 case CPU_BMIPS5000:
581 case CPU_LOONGSON2: 584 case CPU_LOONGSON2:
585 case CPU_LOONGSON3:
582 case CPU_R5500: 586 case CPU_R5500:
583 if (m4kc_tlbp_war()) 587 if (m4kc_tlbp_war())
584 uasm_i_nop(p); 588 uasm_i_nop(p);
@@ -621,7 +625,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
621 625
622 default: 626 default:
623 panic("No TLB refill handler yet (CPU type: %d)", 627 panic("No TLB refill handler yet (CPU type: %d)",
624 current_cpu_data.cputype); 628 current_cpu_type());
625 break; 629 break;
626 } 630 }
627} 631}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index fcebfced26d0..4f9e44d358b7 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -20,7 +20,8 @@
20#include <asm/smp-ops.h> 20#include <asm/smp-ops.h>
21#include <asm/traps.h> 21#include <asm/traps.h>
22#include <asm/fw/fw.h> 22#include <asm/fw/fw.h>
23#include <asm/gcmpregs.h> 23#include <asm/mips-cm.h>
24#include <asm/mips-cpc.h>
24#include <asm/mips-boards/generic.h> 25#include <asm/mips-boards/generic.h>
25#include <asm/mips-boards/malta.h> 26#include <asm/mips-boards/malta.h>
26 27
@@ -110,6 +111,11 @@ static void __init mips_ejtag_setup(void)
110 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 111 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
111} 112}
112 113
114phys_t mips_cpc_default_phys_base(void)
115{
116 return CPC_BASE_ADDR;
117}
118
113extern struct plat_smp_ops msmtc_smp_ops; 119extern struct plat_smp_ops msmtc_smp_ops;
114 120
115void __init prom_init(void) 121void __init prom_init(void)
@@ -238,10 +244,23 @@ mips_pci_controller:
238 MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF | 244 MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
239 MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF); 245 MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
240#endif 246#endif
247#ifndef CONFIG_EVA
241 /* Fix up target memory mapping. */ 248 /* Fix up target memory mapping. */
242 MSC_READ(MSC01_PCI_BAR0, mask); 249 MSC_READ(MSC01_PCI_BAR0, mask);
243 MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK); 250 MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);
251#else
252 /*
253 * Setup the Malta max (2GB) memory for PCI DMA in host bridge
254 * in transparent addressing mode, starting from 0x80000000.
255 */
256 mask = PHYS_OFFSET | (1<<3);
257 MSC_WRITE(MSC01_PCI_BAR0, mask);
244 258
259 mask = PHYS_OFFSET;
260 MSC_WRITE(MSC01_PCI_HEAD4, mask);
261 MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
262 MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
263#endif
245 /* Don't handle target retries indefinitely. */ 264 /* Don't handle target retries indefinitely. */
246 if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) == 265 if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
247 MSC01_PCI_CFG_MAXRTRY_MSK) 266 MSC01_PCI_CFG_MAXRTRY_MSK)
@@ -276,10 +295,13 @@ mips_pci_controller:
276 console_config(); 295 console_config();
277#endif 296#endif
278 /* Early detection of CMP support */ 297 /* Early detection of CMP support */
279 if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) 298 mips_cm_probe();
280 if (!register_cmp_smp_ops()) 299 mips_cpc_probe();
281 return;
282 300
301 if (!register_cps_smp_ops())
302 return;
303 if (!register_cmp_smp_ops())
304 return;
283 if (!register_vsmp_smp_ops()) 305 if (!register_vsmp_smp_ops())
284 return; 306 return;
285 307
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 2242181a6284..b71ee809191a 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -26,6 +26,7 @@
26#include <asm/i8259.h> 26#include <asm/i8259.h>
27#include <asm/irq_cpu.h> 27#include <asm/irq_cpu.h>
28#include <asm/irq_regs.h> 28#include <asm/irq_regs.h>
29#include <asm/mips-cm.h>
29#include <asm/mips-boards/malta.h> 30#include <asm/mips-boards/malta.h>
30#include <asm/mips-boards/maltaint.h> 31#include <asm/mips-boards/maltaint.h>
31#include <asm/gt64120.h> 32#include <asm/gt64120.h>
@@ -33,13 +34,10 @@
33#include <asm/mips-boards/msc01_pci.h> 34#include <asm/mips-boards/msc01_pci.h>
34#include <asm/msc01_ic.h> 35#include <asm/msc01_ic.h>
35#include <asm/gic.h> 36#include <asm/gic.h>
36#include <asm/gcmpregs.h>
37#include <asm/setup.h> 37#include <asm/setup.h>
38#include <asm/rtlx.h> 38#include <asm/rtlx.h>
39 39
40int gcmp_present = -1;
41static unsigned long _msc01_biu_base; 40static unsigned long _msc01_biu_base;
42static unsigned long _gcmp_base;
43static unsigned int ipi_map[NR_CPUS]; 41static unsigned int ipi_map[NR_CPUS];
44 42
45static DEFINE_RAW_SPINLOCK(mips_irq_lock); 43static DEFINE_RAW_SPINLOCK(mips_irq_lock);
@@ -288,10 +286,6 @@ asmlinkage void plat_irq_dispatch(void)
288 286
289#ifdef CONFIG_MIPS_MT_SMP 287#ifdef CONFIG_MIPS_MT_SMP
290 288
291
292#define GIC_MIPS_CPU_IPI_RESCHED_IRQ 3
293#define GIC_MIPS_CPU_IPI_CALL_IRQ 4
294
295#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */ 289#define MIPS_CPU_IPI_RESCHED_IRQ 0 /* SW int 0 for resched */
296#define C_RESCHED C_SW0 290#define C_RESCHED C_SW0
297#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for resched */ 291#define MIPS_CPU_IPI_CALL_IRQ 1 /* SW int 1 for resched */
@@ -308,6 +302,13 @@ static void ipi_call_dispatch(void)
308 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ); 302 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
309} 303}
310 304
305#endif /* CONFIG_MIPS_MT_SMP */
306
307#ifdef CONFIG_MIPS_GIC_IPI
308
309#define GIC_MIPS_CPU_IPI_RESCHED_IRQ 3
310#define GIC_MIPS_CPU_IPI_CALL_IRQ 4
311
311static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 312static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
312{ 313{
313#ifdef CONFIG_MIPS_VPE_APSP_API_CMP 314#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
@@ -338,7 +339,7 @@ static struct irqaction irq_call = {
338 .flags = IRQF_PERCPU, 339 .flags = IRQF_PERCPU,
339 .name = "IPI_call" 340 .name = "IPI_call"
340}; 341};
341#endif /* CONFIG_MIPS_MT_SMP */ 342#endif /* CONFIG_MIPS_GIC_IPI */
342 343
343static int gic_resched_int_base; 344static int gic_resched_int_base;
344static int gic_call_int_base; 345static int gic_call_int_base;
@@ -418,49 +419,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
418}; 419};
419#undef X 420#undef X
420 421
421/* 422#ifdef CONFIG_MIPS_GIC_IPI
422 * GCMP needs to be detected before any SMP initialisation
423 */
424int __init gcmp_probe(unsigned long addr, unsigned long size)
425{
426 if ((mips_revision_sconid != MIPS_REVISION_SCON_ROCIT) &&
427 (mips_revision_sconid != MIPS_REVISION_SCON_GT64120)) {
428 gcmp_present = 0;
429 pr_debug("GCMP NOT present\n");
430 return gcmp_present;
431 }
432
433 if (gcmp_present >= 0)
434 return gcmp_present;
435
436 _gcmp_base = (unsigned long) ioremap_nocache(GCMP_BASE_ADDR,
437 GCMP_ADDRSPACE_SZ);
438 _msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE,
439 MSC01_BIU_ADDRSPACE_SZ);
440 gcmp_present = ((GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) ==
441 GCMP_BASE_ADDR);
442
443 if (gcmp_present)
444 pr_debug("GCMP present\n");
445 return gcmp_present;
446}
447
448/* Return the number of IOCU's present */
449int __init gcmp_niocu(void)
450{
451 return gcmp_present ? ((GCMPGCB(GC) & GCMP_GCB_GC_NUMIOCU_MSK) >>
452 GCMP_GCB_GC_NUMIOCU_SHF) : 0;
453}
454
455/* Set GCMP region attributes */
456void __init gcmp_setregion(int region, unsigned long base,
457 unsigned long mask, int type)
458{
459 GCMPGCBn(CMxBASE, region) = base;
460 GCMPGCBn(CMxMASK, region) = mask | type;
461}
462
463#if defined(CONFIG_MIPS_MT_SMP)
464static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin) 423static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin)
465{ 424{
466 int intr = baseintr + cpu; 425 int intr = baseintr + cpu;
@@ -496,8 +455,8 @@ void __init arch_init_irq(void)
496 if (!cpu_has_veic) 455 if (!cpu_has_veic)
497 mips_cpu_irq_init(); 456 mips_cpu_irq_init();
498 457
499 if (gcmp_present) { 458 if (mips_cm_present()) {
500 GCMPGCB(GICBA) = GIC_BASE_ADDR | GCMP_GCB_GICBA_EN_MSK; 459 write_gcr_gic_base(GIC_BASE_ADDR | CM_GCR_GIC_BASE_GICEN_MSK);
501 gic_present = 1; 460 gic_present = 1;
502 } else { 461 } else {
503 if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) { 462 if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) {
@@ -576,7 +535,7 @@ void __init arch_init_irq(void)
576 if (gic_present) { 535 if (gic_present) {
577 /* FIXME */ 536 /* FIXME */
578 int i; 537 int i;
579#if defined(CONFIG_MIPS_MT_SMP) 538#if defined(CONFIG_MIPS_GIC_IPI)
580 gic_call_int_base = GIC_NUM_INTRS - 539 gic_call_int_base = GIC_NUM_INTRS -
581 (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids; 540 (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
582 gic_resched_int_base = gic_call_int_base - nr_cpu_ids; 541 gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
@@ -584,14 +543,14 @@ void __init arch_init_irq(void)
584#endif 543#endif
585 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, 544 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
586 ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE); 545 ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
587 if (!gcmp_present) { 546 if (!mips_cm_present()) {
588 /* Enable the GIC */ 547 /* Enable the GIC */
589 i = REG(_msc01_biu_base, MSC01_SC_CFG); 548 i = REG(_msc01_biu_base, MSC01_SC_CFG);
590 REG(_msc01_biu_base, MSC01_SC_CFG) = 549 REG(_msc01_biu_base, MSC01_SC_CFG) =
591 (i | (0x1 << MSC01_SC_CFG_GICENA_SHF)); 550 (i | (0x1 << MSC01_SC_CFG_GICENA_SHF));
592 pr_debug("GIC Enabled\n"); 551 pr_debug("GIC Enabled\n");
593 } 552 }
594#if defined(CONFIG_MIPS_MT_SMP) 553#if defined(CONFIG_MIPS_GIC_IPI)
595 /* set up ipi interrupts */ 554 /* set up ipi interrupts */
596 if (cpu_has_vint) { 555 if (cpu_has_vint) {
597 set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch); 556 set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch);
@@ -708,16 +667,16 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
708 /* This duplicates the handling in do_be which seems wrong */ 667 /* This duplicates the handling in do_be which seems wrong */
709 int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL; 668 int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
710 669
711 if (gcmp_present) { 670 if (mips_cm_present()) {
712 unsigned long cm_error = GCMPGCB(GCMEC); 671 unsigned long cm_error = read_gcr_error_cause();
713 unsigned long cm_addr = GCMPGCB(GCMEA); 672 unsigned long cm_addr = read_gcr_error_addr();
714 unsigned long cm_other = GCMPGCB(GCMEO); 673 unsigned long cm_other = read_gcr_error_mult();
715 unsigned long cause, ocause; 674 unsigned long cause, ocause;
716 char buf[256]; 675 char buf[256];
717 676
718 cause = (cm_error & GCMP_GCB_GMEC_ERROR_TYPE_MSK); 677 cause = cm_error & CM_GCR_ERROR_CAUSE_ERRTYPE_MSK;
719 if (cause != 0) { 678 if (cause != 0) {
720 cause >>= GCMP_GCB_GMEC_ERROR_TYPE_SHF; 679 cause >>= CM_GCR_ERROR_CAUSE_ERRTYPE_SHF;
721 if (cause < 16) { 680 if (cause < 16) {
722 unsigned long cca_bits = (cm_error >> 15) & 7; 681 unsigned long cca_bits = (cm_error >> 15) & 7;
723 unsigned long tr_bits = (cm_error >> 12) & 7; 682 unsigned long tr_bits = (cm_error >> 12) & 7;
@@ -748,8 +707,8 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
748 mcmd[cmd_bits], sport_bits); 707 mcmd[cmd_bits], sport_bits);
749 } 708 }
750 709
751 ocause = (cm_other & GCMP_GCB_GMEO_ERROR_2ND_MSK) >> 710 ocause = (cm_other & CM_GCR_ERROR_MULT_ERR2ND_MSK) >>
752 GCMP_GCB_GMEO_ERROR_2ND_SHF; 711 CM_GCR_ERROR_MULT_ERR2ND_SHF;
753 712
754 pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error, 713 pr_err("CM_ERROR=%08lx %s <%s>\n", cm_error,
755 causes[cause], buf); 714 causes[cause], buf);
@@ -757,7 +716,7 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
757 pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]); 716 pr_err("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
758 717
759 /* reprime cause register */ 718 /* reprime cause register */
760 GCMPGCB(GCMEC) = 0; 719 write_gcr_error_cause(0);
761 } 720 }
762 } 721 }
763 722
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 1f73d63e92a7..6d0f4ab3632d 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -24,22 +24,30 @@ static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
24/* determined physical memory size, not overridden by command line args */ 24/* determined physical memory size, not overridden by command line args */
25unsigned long physical_memsize = 0L; 25unsigned long physical_memsize = 0L;
26 26
27fw_memblock_t * __init fw_getmdesc(void) 27fw_memblock_t * __init fw_getmdesc(int eva)
28{ 28{
29 char *memsize_str, *ptr; 29 char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr;
30 unsigned int memsize; 30 unsigned long memsize, ememsize __maybe_unused = 0;
31 static char cmdline[COMMAND_LINE_SIZE] __initdata; 31 static char cmdline[COMMAND_LINE_SIZE] __initdata;
32 long val;
33 int tmp; 32 int tmp;
34 33
35 /* otherwise look in the environment */ 34 /* otherwise look in the environment */
35
36 memsize_str = fw_getenv("memsize"); 36 memsize_str = fw_getenv("memsize");
37 if (!memsize_str) { 37 if (memsize_str)
38 tmp = kstrtol(memsize_str, 0, &memsize);
39 if (eva) {
40 /* Look for ememsize for EVA */
41 ememsize_str = fw_getenv("ememsize");
42 if (ememsize_str)
43 tmp = kstrtol(ememsize_str, 0, &ememsize);
44 }
45 if (!memsize && !ememsize) {
38 pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); 46 pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
39 physical_memsize = 0x02000000; 47 physical_memsize = 0x02000000;
40 } else { 48 } else {
41 tmp = kstrtol(memsize_str, 0, &val); 49 /* If ememsize is set, then set physical_memsize to that */
42 physical_memsize = (unsigned long)val; 50 physical_memsize = ememsize ? : memsize;
43 } 51 }
44 52
45#ifdef CONFIG_CPU_BIG_ENDIAN 53#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -54,20 +62,30 @@ fw_memblock_t * __init fw_getmdesc(void)
54 ptr = strstr(cmdline, "memsize="); 62 ptr = strstr(cmdline, "memsize=");
55 if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) 63 if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
56 ptr = strstr(ptr, " memsize="); 64 ptr = strstr(ptr, " memsize=");
65 /* And now look for ememsize */
66 if (eva) {
67 ptr = strstr(cmdline, "ememsize=");
68 if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
69 ptr = strstr(ptr, " ememsize=");
70 }
57 71
58 if (ptr) 72 if (ptr)
59 memsize = memparse(ptr + 8, &ptr); 73 memsize = memparse(ptr + 8 + (eva ? 1 : 0), &ptr);
60 else 74 else
61 memsize = physical_memsize; 75 memsize = physical_memsize;
62 76
77 /* Last 64K for HIGHMEM arithmetics */
78 if (memsize > 0x7fff0000)
79 memsize = 0x7fff0000;
80
63 memset(mdesc, 0, sizeof(mdesc)); 81 memset(mdesc, 0, sizeof(mdesc));
64 82
65 mdesc[0].type = fw_dontuse; 83 mdesc[0].type = fw_dontuse;
66 mdesc[0].base = 0x00000000; 84 mdesc[0].base = PHYS_OFFSET;
67 mdesc[0].size = 0x00001000; 85 mdesc[0].size = 0x00001000;
68 86
69 mdesc[1].type = fw_code; 87 mdesc[1].type = fw_code;
70 mdesc[1].base = 0x00001000; 88 mdesc[1].base = mdesc[0].base + 0x00001000UL;
71 mdesc[1].size = 0x000ef000; 89 mdesc[1].size = 0x000ef000;
72 90
73 /* 91 /*
@@ -78,21 +96,27 @@ fw_memblock_t * __init fw_getmdesc(void)
78 * devices. 96 * devices.
79 */ 97 */
80 mdesc[2].type = fw_dontuse; 98 mdesc[2].type = fw_dontuse;
81 mdesc[2].base = 0x000f0000; 99 mdesc[2].base = mdesc[0].base + 0x000f0000UL;
82 mdesc[2].size = 0x00010000; 100 mdesc[2].size = 0x00010000;
83 101
84 mdesc[3].type = fw_dontuse; 102 mdesc[3].type = fw_dontuse;
85 mdesc[3].base = 0x00100000; 103 mdesc[3].base = mdesc[0].base + 0x00100000UL;
86 mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - 104 mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
87 mdesc[3].base; 105 0x00100000UL;
88 106
89 mdesc[4].type = fw_free; 107 mdesc[4].type = fw_free;
90 mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); 108 mdesc[4].base = mdesc[0].base + CPHYSADDR(PFN_ALIGN(&_end));
91 mdesc[4].size = memsize - mdesc[4].base; 109 mdesc[4].size = memsize - CPHYSADDR(mdesc[4].base);
92 110
93 return &mdesc[0]; 111 return &mdesc[0];
94} 112}
95 113
114static void free_init_pages_eva_malta(void *begin, void *end)
115{
116 free_init_pages("unused kernel", __pa_symbol((unsigned long *)begin),
117 __pa_symbol((unsigned long *)end));
118}
119
96static int __init fw_memtype_classify(unsigned int type) 120static int __init fw_memtype_classify(unsigned int type)
97{ 121{
98 switch (type) { 122 switch (type) {
@@ -109,7 +133,9 @@ void __init fw_meminit(void)
109{ 133{
110 fw_memblock_t *p; 134 fw_memblock_t *p;
111 135
112 p = fw_getmdesc(); 136 p = fw_getmdesc(config_enabled(CONFIG_EVA));
137 free_init_pages_eva = (config_enabled(CONFIG_EVA) ?
138 free_init_pages_eva_malta : NULL);
113 139
114 while (p->size) { 140 while (p->size) {
115 long type; 141 long type;
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index c72a06936781..bf621516afff 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -26,12 +26,12 @@
26#include <linux/time.h> 26#include <linux/time.h>
27 27
28#include <asm/fw/fw.h> 28#include <asm/fw/fw.h>
29#include <asm/mips-cm.h>
29#include <asm/mips-boards/generic.h> 30#include <asm/mips-boards/generic.h>
30#include <asm/mips-boards/malta.h> 31#include <asm/mips-boards/malta.h>
31#include <asm/mips-boards/maltaint.h> 32#include <asm/mips-boards/maltaint.h>
32#include <asm/dma.h> 33#include <asm/dma.h>
33#include <asm/traps.h> 34#include <asm/traps.h>
34#include <asm/gcmpregs.h>
35#ifdef CONFIG_VT 35#ifdef CONFIG_VT
36#include <linux/console.h> 36#include <linux/console.h>
37#endif 37#endif
@@ -127,7 +127,7 @@ static int __init plat_enable_iocoherency(void)
127 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); 127 BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
128 pr_info("Enabled Bonito IOBC coherency\n"); 128 pr_info("Enabled Bonito IOBC coherency\n");
129 } 129 }
130 } else if (gcmp_niocu() != 0) { 130 } else if (mips_cm_numiocu() != 0) {
131 /* Nothing special needs to be done to enable coherency */ 131 /* Nothing special needs to be done to enable coherency */
132 pr_info("CMP IOCU detected\n"); 132 pr_info("CMP IOCU detected\n");
133 if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { 133 if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
@@ -165,7 +165,6 @@ static void __init plat_setup_iocoherency(void)
165#endif 165#endif
166} 166}
167 167
168#ifdef CONFIG_BLK_DEV_IDE
169static void __init pci_clock_check(void) 168static void __init pci_clock_check(void)
170{ 169{
171 unsigned int __iomem *jmpr_p = 170 unsigned int __iomem *jmpr_p =
@@ -175,18 +174,25 @@ static void __init pci_clock_check(void)
175 33, 20, 25, 30, 12, 16, 37, 10 174 33, 20, 25, 30, 12, 16, 37, 10
176 }; 175 };
177 int pciclock = pciclocks[jmpr]; 176 int pciclock = pciclocks[jmpr];
178 char *argptr = fw_getcmdline(); 177 char *optptr, *argptr = fw_getcmdline();
179 178
180 if (pciclock != 33 && !strstr(argptr, "idebus=")) { 179 /*
181 pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n", 180 * If user passed a pci_clock= option, don't tack on another one
181 */
182 optptr = strstr(argptr, "pci_clock=");
183 if (optptr && (optptr == argptr || optptr[-1] == ' '))
184 return;
185
186 if (pciclock != 33) {
187 pr_warn("WARNING: PCI clock is %dMHz, setting pci_clock\n",
182 pciclock); 188 pciclock);
183 argptr += strlen(argptr); 189 argptr += strlen(argptr);
184 sprintf(argptr, " idebus=%d", pciclock); 190 sprintf(argptr, " pci_clock=%d", pciclock);
185 if (pciclock < 20 || pciclock > 66) 191 if (pciclock < 20 || pciclock > 66)
186 pr_warn("WARNING: IDE timing calculations will be incorrect\n"); 192 pr_warn("WARNING: IDE timing calculations will be "
193 "incorrect\n");
187 } 194 }
188} 195}
189#endif
190 196
191#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE) 197#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
192static void __init screen_info_setup(void) 198static void __init screen_info_setup(void)
@@ -247,6 +253,10 @@ void __init plat_mem_setup(void)
247{ 253{
248 unsigned int i; 254 unsigned int i;
249 255
256 if (config_enabled(CONFIG_EVA))
257 /* EVA has already been configured in mach-malta/kernel-init.h */
258 pr_info("Enhanced Virtual Addressing (EVA) activated\n");
259
250 mips_pcibios_init(); 260 mips_pcibios_init();
251 261
252 /* Request I/O space for devices used on the Malta board. */ 262 /* Request I/O space for devices used on the Malta board. */
@@ -268,9 +278,7 @@ void __init plat_mem_setup(void)
268 278
269 plat_setup_iocoherency(); 279 plat_setup_iocoherency();
270 280
271#ifdef CONFIG_BLK_DEV_IDE
272 pci_clock_check(); 281 pci_clock_check();
273#endif
274 282
275#ifdef CONFIG_BLK_DEV_FD 283#ifdef CONFIG_BLK_DEV_FD
276 fd_activate(); 284 fd_activate();
diff --git a/arch/mips/mti-sead3/sead3-mtd.c b/arch/mips/mti-sead3/sead3-mtd.c
index ffa35f509789..f9c890d72677 100644
--- a/arch/mips/mti-sead3/sead3-mtd.c
+++ b/arch/mips/mti-sead3/sead3-mtd.c
@@ -50,5 +50,4 @@ static int __init sead3_mtd_init(void)
50 50
51 return 0; 51 return 0;
52} 52}
53 53device_initcall(sead3_mtd_init);
54module_init(sead3_mtd_init)
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 2a86e38872a7..e74732449478 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -86,8 +86,11 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
86 case CPU_34K: 86 case CPU_34K:
87 case CPU_1004K: 87 case CPU_1004K:
88 case CPU_74K: 88 case CPU_74K:
89 case CPU_1074K:
89 case CPU_INTERAPTIV: 90 case CPU_INTERAPTIV:
90 case CPU_PROAPTIV: 91 case CPU_PROAPTIV:
92 case CPU_P5600:
93 case CPU_M5150:
91 case CPU_LOONGSON1: 94 case CPU_LOONGSON1:
92 case CPU_SB1: 95 case CPU_SB1:
93 case CPU_SB1A: 96 case CPU_SB1A:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 4d94d75ec6f9..42821ae2d77e 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -372,6 +372,7 @@ static int __init mipsxx_init(void)
372 op_model_mipsxx_ops.cpu_type = "mips/34K"; 372 op_model_mipsxx_ops.cpu_type = "mips/34K";
373 break; 373 break;
374 374
375 case CPU_1074K:
375 case CPU_74K: 376 case CPU_74K:
376 op_model_mipsxx_ops.cpu_type = "mips/74K"; 377 op_model_mipsxx_ops.cpu_type = "mips/74K";
377 break; 378 break;
@@ -384,6 +385,14 @@ static int __init mipsxx_init(void)
384 op_model_mipsxx_ops.cpu_type = "mips/proAptiv"; 385 op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
385 break; 386 break;
386 387
388 case CPU_P5600:
389 op_model_mipsxx_ops.cpu_type = "mips/P5600";
390 break;
391
392 case CPU_M5150:
393 op_model_mipsxx_ops.cpu_type = "mips/M5150";
394 break;
395
387 case CPU_5KC: 396 case CPU_5KC:
388 op_model_mipsxx_ops.cpu_type = "mips/5K"; 397 op_model_mipsxx_ops.cpu_type = "mips/5K";
389 break; 398 break;
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 137f2a6feb25..d61138a177cc 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_LASAT) += pci-lasat.o
29obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o 29obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o
30obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o 30obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
31obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o 31obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o
32obj-$(CONFIG_LEMOTE_MACH3A) += fixup-loongson3.o ops-loongson3.o
32obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o 33obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o pci-malta.o
33obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o 34obj-$(CONFIG_PMC_MSP7120_GW) += fixup-pmcmsp.o ops-pmcmsp.o
34obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o 35obj-$(CONFIG_PMC_MSP7120_EVAL) += fixup-pmcmsp.o ops-pmcmsp.o
diff --git a/arch/mips/pci/fixup-loongson3.c b/arch/mips/pci/fixup-loongson3.c
new file mode 100644
index 000000000000..d708ae46d325
--- /dev/null
+++ b/arch/mips/pci/fixup-loongson3.c
@@ -0,0 +1,66 @@
1/*
2 * fixup-loongson3.c
3 *
4 * Copyright (C) 2012 Lemote, Inc.
5 * Author: Xiang Yu, xiangy@lemote.com
6 * Chen Huacai, chenhc@lemote.com
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 */
25
26#include <linux/pci.h>
27#include <boot_param.h>
28
29static void print_fixup_info(const struct pci_dev *pdev)
30{
31 dev_info(&pdev->dev, "Device %x:%x, irq %d\n",
32 pdev->vendor, pdev->device, pdev->irq);
33}
34
35int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
36{
37 print_fixup_info(dev);
38 return dev->irq;
39}
40
41static void pci_fixup_radeon(struct pci_dev *pdev)
42{
43 if (pdev->resource[PCI_ROM_RESOURCE].start)
44 return;
45
46 if (!loongson_sysconf.vgabios_addr)
47 return;
48
49 pdev->resource[PCI_ROM_RESOURCE].start =
50 loongson_sysconf.vgabios_addr;
51 pdev->resource[PCI_ROM_RESOURCE].end =
52 loongson_sysconf.vgabios_addr + 256*1024 - 1;
53 pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_COPY;
54
55 dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n",
56 PCI_ROM_RESOURCE, &pdev->resource[PCI_ROM_RESOURCE]);
57}
58
59DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
60 PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon);
61
62/* Do platform specific device initialization at pci_enable_device() time */
63int pcibios_plat_dev_init(struct pci_dev *dev)
64{
65 return 0;
66}
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 7a0eda782e35..2f9e52a1a750 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -51,6 +51,19 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
51 return 0; 51 return 0;
52} 52}
53 53
54static void malta_piix_func3_base_fixup(struct pci_dev *dev)
55{
56 /* Set a sane PM I/O base address */
57 pci_write_config_word(dev, PIIX4_FUNC3_PMBA, 0x1000);
58
59 /* Enable access to the PM I/O region */
60 pci_write_config_byte(dev, PIIX4_FUNC3_PMREGMISC,
61 PIIX4_FUNC3_PMREGMISC_EN);
62}
63
64DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3,
65 malta_piix_func3_base_fixup);
66
54static void malta_piix_func0_fixup(struct pci_dev *pdev) 67static void malta_piix_func0_fixup(struct pci_dev *pdev)
55{ 68{
56 unsigned char reg_val; 69 unsigned char reg_val;
diff --git a/arch/mips/pci/ops-loongson3.c b/arch/mips/pci/ops-loongson3.c
new file mode 100644
index 000000000000..46ed541a3ec7
--- /dev/null
+++ b/arch/mips/pci/ops-loongson3.c
@@ -0,0 +1,101 @@
1#include <linux/types.h>
2#include <linux/pci.h>
3#include <linux/kernel.h>
4
5#include <asm/mips-boards/bonito64.h>
6
7#include <loongson.h>
8
9#define PCI_ACCESS_READ 0
10#define PCI_ACCESS_WRITE 1
11
12#define HT1LO_PCICFG_BASE 0x1a000000
13#define HT1LO_PCICFG_BASE_TP1 0x1b000000
14
15static int loongson3_pci_config_access(unsigned char access_type,
16 struct pci_bus *bus, unsigned int devfn,
17 int where, u32 *data)
18{
19 unsigned char busnum = bus->number;
20 u_int64_t addr, type;
21 void *addrp;
22 int device = PCI_SLOT(devfn);
23 int function = PCI_FUNC(devfn);
24 int reg = where & ~3;
25
26 addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
27 if (busnum == 0) {
28 if (device > 31)
29 return PCIBIOS_DEVICE_NOT_FOUND;
30 addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE) | (addr & 0xffff));
31 type = 0;
32
33 } else {
34 addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE_TP1) | (addr));
35 type = 0x10000;
36 }
37
38 if (access_type == PCI_ACCESS_WRITE)
39 writel(*data, addrp);
40 else {
41 *data = readl(addrp);
42 if (*data == 0xffffffff) {
43 *data = -1;
44 return PCIBIOS_DEVICE_NOT_FOUND;
45 }
46 }
47 return PCIBIOS_SUCCESSFUL;
48}
49
50static int loongson3_pci_pcibios_read(struct pci_bus *bus, unsigned int devfn,
51 int where, int size, u32 *val)
52{
53 u32 data = 0;
54 int ret = loongson3_pci_config_access(PCI_ACCESS_READ,
55 bus, devfn, where, &data);
56
57 if (ret != PCIBIOS_SUCCESSFUL)
58 return ret;
59
60 if (size == 1)
61 *val = (data >> ((where & 3) << 3)) & 0xff;
62 else if (size == 2)
63 *val = (data >> ((where & 3) << 3)) & 0xffff;
64 else
65 *val = data;
66
67 return PCIBIOS_SUCCESSFUL;
68}
69
70static int loongson3_pci_pcibios_write(struct pci_bus *bus, unsigned int devfn,
71 int where, int size, u32 val)
72{
73 u32 data = 0;
74 int ret;
75
76 if (size == 4)
77 data = val;
78 else {
79 ret = loongson3_pci_config_access(PCI_ACCESS_READ,
80 bus, devfn, where, &data);
81 if (ret != PCIBIOS_SUCCESSFUL)
82 return ret;
83
84 if (size == 1)
85 data = (data & ~(0xff << ((where & 3) << 3))) |
86 (val << ((where & 3) << 3));
87 else if (size == 2)
88 data = (data & ~(0xffff << ((where & 3) << 3))) |
89 (val << ((where & 3) << 3));
90 }
91
92 ret = loongson3_pci_config_access(PCI_ACCESS_WRITE,
93 bus, devfn, where, &data);
94
95 return ret;
96}
97
98struct pci_ops loongson_pci_ops = {
99 .read = loongson3_pci_pcibios_read,
100 .write = loongson3_pci_pcibios_write
101};
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index d1faece21b6a..563d1f61d6ee 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -16,6 +16,7 @@
16#include <linux/syscore_ops.h> 16#include <linux/syscore_ops.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18 18
19#include <asm/dma-coherence.h>
19#include <asm/mach-au1x00/au1000.h> 20#include <asm/mach-au1x00/au1000.h>
20#include <asm/tlbmisc.h> 21#include <asm/tlbmisc.h>
21 22
@@ -411,17 +412,15 @@ static int alchemy_pci_probe(struct platform_device *pdev)
411 } 412 }
412 ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io; 413 ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io;
413 414
414#ifdef CONFIG_DMA_NONCOHERENT
415 /* Au1500 revisions older than AD have borked coherent PCI */ 415 /* Au1500 revisions older than AD have borked coherent PCI */
416 if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) && 416 if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) &&
417 (read_c0_prid() < 0x01030202)) { 417 (read_c0_prid() < 0x01030202) && !coherentio) {
418 val = __raw_readl(ctx->regs + PCI_REG_CONFIG); 418 val = __raw_readl(ctx->regs + PCI_REG_CONFIG);
419 val |= PCI_CONFIG_NC; 419 val |= PCI_CONFIG_NC;
420 __raw_writel(val, ctx->regs + PCI_REG_CONFIG); 420 __raw_writel(val, ctx->regs + PCI_REG_CONFIG);
421 wmb(); 421 wmb();
422 dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n"); 422 dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n");
423 } 423 }
424#endif
425 424
426 if (pd->board_map_irq) 425 if (pd->board_map_irq)
427 ctx->board_map_irq = pd->board_map_irq; 426 ctx->board_map_irq = pd->board_map_irq;
diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c
index f1a73890dd4f..cfbbc3e3e914 100644
--- a/arch/mips/pci/pci-malta.c
+++ b/arch/mips/pci/pci-malta.c
@@ -27,7 +27,7 @@
27#include <linux/init.h> 27#include <linux/init.h>
28 28
29#include <asm/gt64120.h> 29#include <asm/gt64120.h>
30#include <asm/gcmpregs.h> 30#include <asm/mips-cm.h>
31#include <asm/mips-boards/generic.h> 31#include <asm/mips-boards/generic.h>
32#include <asm/mips-boards/bonito64.h> 32#include <asm/mips-boards/bonito64.h>
33#include <asm/mips-boards/msc01_pci.h> 33#include <asm/mips-boards/msc01_pci.h>
@@ -201,11 +201,11 @@ void __init mips_pcibios_init(void)
201 msc_mem_resource.start = start & mask; 201 msc_mem_resource.start = start & mask;
202 msc_mem_resource.end = (start & mask) | ~mask; 202 msc_mem_resource.end = (start & mask) | ~mask;
203 msc_controller.mem_offset = (start & mask) - (map & mask); 203 msc_controller.mem_offset = (start & mask) - (map & mask);
204#ifdef CONFIG_MIPS_CMP 204 if (mips_cm_numiocu()) {
205 if (gcmp_niocu()) 205 write_gcr_reg0_base(start);
206 gcmp_setregion(0, start, mask, 206 write_gcr_reg0_mask(mask |
207 GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); 207 CM_GCR_REGn_MASK_CMTGT_IOCU0);
208#endif 208 }
209 MSC_READ(MSC01_PCI_SC2PIOBASL, start); 209 MSC_READ(MSC01_PCI_SC2PIOBASL, start);
210 MSC_READ(MSC01_PCI_SC2PIOMSKL, mask); 210 MSC_READ(MSC01_PCI_SC2PIOMSKL, mask);
211 MSC_READ(MSC01_PCI_SC2PIOMAPL, map); 211 MSC_READ(MSC01_PCI_SC2PIOMAPL, map);
@@ -213,11 +213,11 @@ void __init mips_pcibios_init(void)
213 msc_io_resource.end = (map & mask) | ~mask; 213 msc_io_resource.end = (map & mask) | ~mask;
214 msc_controller.io_offset = 0; 214 msc_controller.io_offset = 0;
215 ioport_resource.end = ~mask; 215 ioport_resource.end = ~mask;
216#ifdef CONFIG_MIPS_CMP 216 if (mips_cm_numiocu()) {
217 if (gcmp_niocu()) 217 write_gcr_reg1_base(start);
218 gcmp_setregion(1, start, mask, 218 write_gcr_reg1_mask(mask |
219 GCMP_GCB_GCMPB_CMDEFTGT_IOCU1); 219 CM_GCR_REGn_MASK_CMTGT_IOCU0);
220#endif 220 }
221 /* If ranges overlap I/O takes precedence. */ 221 /* If ranges overlap I/O takes precedence. */
222 start = start & mask; 222 start = start & mask;
223 end = start | ~mask; 223 end = start | ~mask;
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 396b2967ad85..7e980767679c 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -49,7 +49,7 @@ void msp7120_reset(void)
49 /* Cache the reset code of this function */ 49 /* Cache the reset code of this function */
50 __asm__ __volatile__ ( 50 __asm__ __volatile__ (
51 " .set push \n" 51 " .set push \n"
52 " .set mips3 \n" 52 " .set arch=r4000 \n"
53 " la %0,startpoint \n" 53 " la %0,startpoint \n"
54 " la %1,endpoint \n" 54 " la %1,endpoint \n"
55 " .set pop \n" 55 " .set pop \n"
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index 7e0277a1048f..32a7c828f073 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -43,6 +43,7 @@ LEAF(swsusp_arch_resume)
43 bne t1, t3, 1b 43 bne t1, t3, 1b
44 PTR_L t0, PBE_NEXT(t0) 44 PTR_L t0, PBE_NEXT(t0)
45 bnez t0, 0b 45 bnez t0, 0b
46 jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
46 PTR_LA t0, saved_regs 47 PTR_LA t0, saved_regs
47 PTR_L ra, PT_R31(t0) 48 PTR_L ra, PT_R31(t0)
48 PTR_L sp, PT_R29(t0) 49 PTR_L sp, PT_R29(t0)
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 1bfd1c17b3c2..4a296655f446 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -20,19 +20,13 @@ choice
20 config SOC_RT305X 20 config SOC_RT305X
21 bool "RT305x" 21 bool "RT305x"
22 select USB_ARCH_HAS_HCD 22 select USB_ARCH_HAS_HCD
23 select USB_ARCH_HAS_OHCI
24 select USB_ARCH_HAS_EHCI
25 23
26 config SOC_RT3883 24 config SOC_RT3883
27 bool "RT3883" 25 bool "RT3883"
28 select USB_ARCH_HAS_OHCI
29 select USB_ARCH_HAS_EHCI
30 select HW_HAS_PCI 26 select HW_HAS_PCI
31 27
32 config SOC_MT7620 28 config SOC_MT7620
33 bool "MT7620" 29 bool "MT7620"
34 select USB_ARCH_HAS_OHCI
35 select USB_ARCH_HAS_EHCI
36 30
37endchoice 31endchoice
38 32